├── A2A ├── demo │ ├── ui │ │ ├── __init__.py │ │ ├── README.md │ │ ├── pages │ │ │ ├── __init__.py │ │ │ ├── task_list.py │ │ │ ├── conversation.py │ │ │ ├── event_list.py │ │ │ └── home.py │ │ ├── service │ │ │ ├── __init__.py │ │ │ ├── client │ │ │ │ ├── __init__.py │ │ │ │ └── client.py │ │ │ ├── server │ │ │ │ ├── __init__.py │ │ │ │ └── application_manager.py │ │ │ └── types.py │ │ ├── state │ │ │ ├── __init__.py │ │ │ ├── agent_state.py │ │ │ └── state.py │ │ ├── styles │ │ │ ├── __init__.py │ │ │ └── styles.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── agent_card.py │ │ ├── components │ │ │ ├── __init__.py │ │ │ ├── header.py │ │ │ ├── async_poller.js │ │ │ ├── dialog.py │ │ │ ├── poller.py │ │ │ ├── task_card.py │ │ │ ├── async_poller.py │ │ │ ├── api_key_dialog.py │ │ │ ├── event_viewer.py │ │ │ ├── page_scaffold.py │ │ │ ├── agent_list.py │ │ │ └── conversation_list.py │ │ └── pyproject.toml │ └── a2a_demo_arch.png ├── agents │ ├── google_adk │ │ ├── __init__.py │ │ ├── .python-version │ │ ├── pyproject.toml │ │ ├── README.md │ │ └── __main__.py │ ├── langgraph │ │ ├── __init__.py │ │ ├── .python-version │ │ ├── pyproject.toml │ │ └── __main__.py │ ├── marvin │ │ ├── __init__.py │ │ ├── .python-version │ │ ├── pyproject.toml │ │ └── README.md │ ├── mindsdb │ │ ├── __init__.py │ │ ├── .python-version │ │ ├── pyproject.toml │ │ ├── __main__.py │ │ └── README.md │ ├── crewai │ │ ├── .python-version │ │ ├── pyproject.toml │ │ ├── __main__.py │ │ └── README.md │ ├── llama_index_file_chat │ │ ├── __init__.py │ │ ├── .python-version │ │ ├── pyproject.toml │ │ └── __main__.py │ ├── ag2 │ │ ├── __init__.py │ │ ├── pyproject.toml │ │ └── __main__.py │ ├── semantickernel │ │ ├── pyproject.toml │ │ └── __main__.py │ └── README.md └── README.md ├── smolagents ├── multi_agents.py ├── skip_proxy.py ├── requirements.txt ├── env.py.example ├── code_execution.py ├── helloworld.py └── any_llm.py ├── langchain-mcp-adapters ├── __init__.py ├── clients │ ├── __init__.py │ ├── util.py │ ├── llm.py │ ├── mcpServers.json │ ├── single_server_client.py │ ├── multi_server_client.py │ ├── multi_server_client_wconfig.py │ ├── langgraph_client.py │ └── langgraph_client_wconfig.py ├── servers │ ├── resources │ │ └── zu.jpeg │ ├── math_server.py │ ├── weather_server.py │ └── versatile_server.py ├── requirements.txt └── .env.example ├── openai-agents ├── requirements.txt ├── .env.example ├── helloworld.py └── README.md ├── crewai └── helloworld │ ├── src │ └── helloworld │ │ ├── __init__.py │ │ ├── tools │ │ ├── __init__.py │ │ └── custom_tool.py │ │ ├── config │ │ ├── tasks.yaml │ │ └── agents.yaml │ │ ├── main.py │ │ └── crew.py │ ├── .gitignore │ ├── requirements.txt │ ├── knowledge │ └── user_preference.txt │ ├── .env.example │ ├── pyproject.toml │ └── README.md ├── langmem ├── requirements.txt ├── helloworld.py └── README.md ├── browser-use ├── requirements.txt ├── .env.example └── helloworld.py ├── google-adk ├── requirements.txt ├── multi_tool_agent │ ├── __init__.py │ ├── .env.example │ └── agent.py └── README.md ├── model_context_protocol ├── weather │ ├── .python-version │ ├── hello.py │ ├── pyproject.toml │ ├── README.md │ └── weather.py ├── README.md └── READIT.md ├── langchain-sandbox ├── .npmrc ├── requirements.txt ├── package.json ├── react.py ├── helloworld_stateful.py ├── helloworld.py ├── stateful_react.py ├── package-lock.json └── README.md ├── swarm ├── requirements.txt ├── env.py.example ├── helloworld.py └── README.md ├── langgraph-platform ├── agents │ ├── __init__.py │ ├── react_agent │ │ ├── graph_without_config.py │ │ ├── graph.py │ │ ├── configuration.py │ │ └── tools.py │ ├── utils.py │ └── supervisor │ │ ├── supervisor_prebuilt.py │ │ └── subagents.py ├── .env.example ├── requirements.txt ├── langgraph.json ├── pyproject.toml └── client.py ├── langgraph-codeact ├── requirements.txt └── helloworld.py ├── langgraph ├── requirements.txt ├── .env.example ├── config.py ├── README.md └── helloworld.py ├── agno ├── requirements.txt ├── screenshots │ ├── helloworld.png │ ├── with_tools.png │ ├── with_memory_and_reasoning.png │ └── with_knowledge_and_storage.png ├── helloworld.py ├── with_tools.py ├── with_knowledge_and_storage.py └── with_memory_and_reasoning.py ├── autogen ├── requirements.txt ├── env.py.example ├── helloworld.py ├── team.py └── README.md ├── langgraph_swarm ├── requirements.txt ├── .env.example ├── helloworld.py └── config.py ├── langchain ├── requirements.txt ├── .env.example ├── config.py └── README.md ├── langgraphjs ├── .env.example ├── package.json ├── .gitignore ├── helloworld_ollama.ts ├── helloworld_high_level.ts └── README.md ├── LICENSE └── README.md /A2A/demo/ui/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /smolagents/multi_agents.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /smolagents/skip_proxy.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/agents/google_adk/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/agents/langgraph/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/agents/marvin/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/agents/mindsdb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/pages/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/service/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/state/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/styles/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/components/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/service/client/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/demo/ui/service/server/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /langchain-mcp-adapters/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /A2A/agents/crewai/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /A2A/agents/marvin/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /A2A/agents/mindsdb/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /langchain-mcp-adapters/clients/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /openai-agents/requirements.txt: -------------------------------------------------------------------------------- 1 | openai-agents -------------------------------------------------------------------------------- /A2A/agents/google_adk/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /A2A/agents/langgraph/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /A2A/agents/llama_index_file_chat/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /crewai/helloworld/src/helloworld/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /langmem/requirements.txt: -------------------------------------------------------------------------------- 1 | langmem 2 | httpx[socks] -------------------------------------------------------------------------------- /browser-use/requirements.txt: -------------------------------------------------------------------------------- 1 | browser-use>=0.1.40 2 | -------------------------------------------------------------------------------- /crewai/helloworld/src/helloworld/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /google-adk/requirements.txt: -------------------------------------------------------------------------------- 1 | google-adk 2 | aiohttp -------------------------------------------------------------------------------- /A2A/agents/llama_index_file_chat/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /google-adk/multi_tool_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from . import agent -------------------------------------------------------------------------------- /model_context_protocol/weather/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /langchain-sandbox/.npmrc: -------------------------------------------------------------------------------- 1 | @jsr:registry=https://npm.jsr.io 2 | -------------------------------------------------------------------------------- /swarm/requirements.txt: -------------------------------------------------------------------------------- 1 | git+ssh://git@github.com/openai/swarm.git -------------------------------------------------------------------------------- /crewai/helloworld/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | __pycache__/ 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /langchain-sandbox/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-sandbox -------------------------------------------------------------------------------- /A2A/agents/ag2/__init__.py: -------------------------------------------------------------------------------- 1 | # AG2 MCP Youtube Agent for A2A Protocol 2 | -------------------------------------------------------------------------------- /langgraph-platform/agents/__init__.py: -------------------------------------------------------------------------------- 1 | # Agents package for assistants demo -------------------------------------------------------------------------------- /langgraph-codeact/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-openai 3 | langgraph-codeact -------------------------------------------------------------------------------- /langgraph/requirements.txt: -------------------------------------------------------------------------------- 1 | langgraph>=0.2.69 2 | litellm 3 | httpx[socks] 4 | python-dotenv -------------------------------------------------------------------------------- /openai-agents/.env.example: -------------------------------------------------------------------------------- 1 | # OpenAI API configurations 2 | OPENAI_API_KEY=your_openai_api_key -------------------------------------------------------------------------------- /smolagents/requirements.txt: -------------------------------------------------------------------------------- 1 | smolagents>=1.7.0 2 | smolagents[transformers] 3 | litellm 4 | -------------------------------------------------------------------------------- /swarm/env.py.example: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["OPENAI_API_KEY"] = "your-api-key-here" 4 | -------------------------------------------------------------------------------- /browser-use/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your-api-key-here 2 | OPENAI_API_BASE=your-api-base-here 3 | -------------------------------------------------------------------------------- /crewai/helloworld/requirements.txt: -------------------------------------------------------------------------------- 1 | crewai 2 | crewai[tools] 3 | httpx[socks]>=0.24.1 4 | socksio>=1.0.0 -------------------------------------------------------------------------------- /agno/requirements.txt: -------------------------------------------------------------------------------- 1 | agno 2 | yfinance 3 | pylance 4 | lancedb 5 | tantivy 6 | openai 7 | sqlalchemy 8 | -------------------------------------------------------------------------------- /autogen/requirements.txt: -------------------------------------------------------------------------------- 1 | autogen-agentchat>=0.4.5 2 | autogen-ext[openai,web-surfer] 3 | autogenstudio 4 | playwright -------------------------------------------------------------------------------- /A2A/demo/a2a_demo_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ababdotai/awesome-agent-quickstart/HEAD/A2A/demo/a2a_demo_arch.png -------------------------------------------------------------------------------- /google-adk/multi_tool_agent/.env.example: -------------------------------------------------------------------------------- 1 | GOOGLE_GENAI_USE_VERTEXAI=FALSE 2 | GOOGLE_API_KEY=PASTE_YOUR_ACTUAL_API_KEY_HERE -------------------------------------------------------------------------------- /agno/screenshots/helloworld.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ababdotai/awesome-agent-quickstart/HEAD/agno/screenshots/helloworld.png -------------------------------------------------------------------------------- /agno/screenshots/with_tools.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ababdotai/awesome-agent-quickstart/HEAD/agno/screenshots/with_tools.png -------------------------------------------------------------------------------- /langgraph_swarm/requirements.txt: -------------------------------------------------------------------------------- 1 | langgraph>=0.3.2 2 | langgraph-prebuilt>=0.1.1 3 | langgraph-swarm>=0.0.4 4 | langchain-openai>=0.2.14 -------------------------------------------------------------------------------- /model_context_protocol/weather/hello.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | print("Hello from weather!") 3 | 4 | 5 | if __name__ == "__main__": 6 | main() 7 | -------------------------------------------------------------------------------- /langchain-sandbox/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@langchain/pyodide-sandbox": "npm:@jsr/langchain__pyodide-sandbox@^0.0.4" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /agno/screenshots/with_memory_and_reasoning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ababdotai/awesome-agent-quickstart/HEAD/agno/screenshots/with_memory_and_reasoning.png -------------------------------------------------------------------------------- /autogen/env.py.example: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["OPENAI_API_KEY"] = "your-api-key-here" 4 | os.environ["OPENAI_API_BASE"] = "https://api.openai.com/v1" 5 | -------------------------------------------------------------------------------- /smolagents/env.py.example: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["OPENAI_API_KEY"] = "your-api-key-here" 4 | os.environ["OPENAI_API_BASE"] = "https://api.openai.com/v1" 5 | -------------------------------------------------------------------------------- /agno/screenshots/with_knowledge_and_storage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ababdotai/awesome-agent-quickstart/HEAD/agno/screenshots/with_knowledge_and_storage.png -------------------------------------------------------------------------------- /langchain-mcp-adapters/servers/resources/zu.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ababdotai/awesome-agent-quickstart/HEAD/langchain-mcp-adapters/servers/resources/zu.jpeg -------------------------------------------------------------------------------- /langchain-mcp-adapters/clients/util.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def get_servers_dir(): 4 | return os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "servers") -------------------------------------------------------------------------------- /langchain-mcp-adapters/requirements.txt: -------------------------------------------------------------------------------- 1 | socksio 2 | fastmcp 3 | langchain-mcp-adapters>=0.1.4 4 | langgraph>=0.3.21 5 | langchain-openai 6 | langchain-anthropic 7 | langchain_deepseek -------------------------------------------------------------------------------- /crewai/helloworld/knowledge/user_preference.txt: -------------------------------------------------------------------------------- 1 | User name is John Doe. 2 | User is an AI Engineer. 3 | User is interested in AI Agents. 4 | User is based in San Francisco, California. 5 | -------------------------------------------------------------------------------- /langchain/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain>=0.1.0 2 | langchain-core>=0.1.0 3 | langchain-community>=0.0.10 4 | langchain-openai>=0.0.2 5 | duckduckgo-search>=4.1.1 6 | python-dotenv>=1.0.0 7 | pydantic>=2.0.0 -------------------------------------------------------------------------------- /crewai/helloworld/.env.example: -------------------------------------------------------------------------------- 1 | # LLM Model configurations 2 | MODEL=gpt-4o-mini 3 | 4 | # OpenAI API configurations 5 | OPENAI_API_KEY=your-api-key-here 6 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: custom API endpoint -------------------------------------------------------------------------------- /langchain/.env.example: -------------------------------------------------------------------------------- 1 | # LLM Model configurations 2 | DEFAULT_MODEL=gpt-3.5-turbo 3 | DEFAULT_TEMPERATURE=0.7 4 | 5 | # OpenAI API configurations 6 | OPENAI_API_KEY=your-api-key-here 7 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: for API proxies -------------------------------------------------------------------------------- /langgraph/.env.example: -------------------------------------------------------------------------------- 1 | # LLM Model configurations 2 | DEFAULT_MODEL=gpt-4o-mini 3 | DEFAULT_TEMPERATURE=0.7 4 | 5 | # OpenAI API configurations 6 | OPENAI_API_KEY=your-api-key-here 7 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: custom API endpoint -------------------------------------------------------------------------------- /langgraph_swarm/.env.example: -------------------------------------------------------------------------------- 1 | # LLM Model configurations 2 | DEFAULT_MODEL=gpt-4o-mini 3 | DEFAULT_TEMPERATURE=0.7 4 | 5 | # OpenAI API configurations 6 | OPENAI_API_KEY=your-api-key-here 7 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: custom API endpoint -------------------------------------------------------------------------------- /model_context_protocol/weather/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "weather" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "httpx>=0.28.1", 9 | "mcp[cli]>=1.2.1", 10 | ] 11 | -------------------------------------------------------------------------------- /agno/helloworld.py: -------------------------------------------------------------------------------- 1 | from agno.agent import Agent 2 | from agno.models.openai import OpenAIChat 3 | 4 | agent = Agent( 5 | model=OpenAIChat(id="gpt-4o"), 6 | description="Share 15 minute healthy recipes.", 7 | markdown=True, 8 | ) 9 | agent.print_response("Share a breakfast recipe.", stream=True) -------------------------------------------------------------------------------- /langgraph-platform/.env.example: -------------------------------------------------------------------------------- 1 | # To separate your traces from other application 2 | LANGSMITH_API_KEY=... 3 | LANGSMITH_PROJECT=... 4 | LANGSMITH_TRACING=... 5 | 6 | ## LLM choice: 7 | OPENAI_API_KEY=... 8 | ANTHROPIC_API_KEY=... 9 | 10 | TAVILY_API_KEY=... 11 | 12 | DEPLOYMENT_URL=... 13 | GRAPH_ID=... -------------------------------------------------------------------------------- /openai-agents/helloworld.py: -------------------------------------------------------------------------------- 1 | from agents import Agent, Runner 2 | from dotenv import load_dotenv 3 | 4 | load_dotenv() 5 | 6 | agent = Agent(name="Assistant", instructions="You are a helpful assistant") 7 | 8 | result = Runner.run_sync(agent, "Write a haiku about recursion in programming.") 9 | print(result.final_output) -------------------------------------------------------------------------------- /langchain-mcp-adapters/clients/llm.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from langchain_openai import ChatOpenAI 5 | openai_model = ChatOpenAI(model="gpt-4o") 6 | 7 | from langchain_deepseek import ChatDeepSeek 8 | deepseek_model = ChatDeepSeek(model="deepseek-chat", temperature=0.0, streaming=True) 9 | -------------------------------------------------------------------------------- /langchain-mcp-adapters/.env.example: -------------------------------------------------------------------------------- 1 | # OpenAI API configurations 2 | OPENAI_API_KEY=your_openai_api_key 3 | OPENAI_API_BASE=your_openai_api_base 4 | 5 | DEEPSEEK_API_KEY=your_deepseek_api_key 6 | 7 | NO_PROXY=localhost,127.0.0.1 8 | 9 | # MCP server configurations 10 | MCP_SERVER_PORT=8000 11 | MCP_SERVER_LOG_LEVEL=INFO -------------------------------------------------------------------------------- /langgraph-platform/requirements.txt: -------------------------------------------------------------------------------- 1 | dotenv 2 | ipykernel 3 | jupyter 4 | langchain 5 | langchain-core 6 | langchain-anthropic 7 | langchain-community 8 | langchain-openai 9 | langchain-tavily 10 | langgraph 11 | langgraph-cli[inmem] 12 | langgraph-sdk 13 | langgraph-supervisor 14 | pydantic 15 | python-dotenv 16 | typing-extensions 17 | yfinance -------------------------------------------------------------------------------- /langgraphjs/.env.example: -------------------------------------------------------------------------------- 1 | # Model configurations 2 | MODEL_NAME=claude-3-5-sonnet-20241022 3 | TEMPERATURE=0.7 4 | 5 | # Anthropic API configurations 6 | ANTHROPIC_API_KEY=your-api-key-here 7 | ANTHROPIC_API_BASE=your-api-base-here 8 | 9 | # Optional: LangSmith for observability 10 | LANGSMITH_API_KEY=your-langsmith-key-here 11 | LANGSMITH_TRACING=true -------------------------------------------------------------------------------- /langgraph-platform/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "react_agent_no_config": "./agents/react_agent/graph_without_config.py:make_graph", 5 | "react_agent": "./agents/react_agent/graph.py:make_graph", 6 | "supervisor_prebuilt": "./agents/supervisor/supervisor_prebuilt.py:make_supervisor_graph" 7 | }, 8 | "env": ".env" 9 | } -------------------------------------------------------------------------------- /A2A/demo/ui/utils/agent_card.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from common.types import AgentCard 4 | 5 | 6 | def get_agent_card(remote_agent_address: str) -> AgentCard: 7 | """Get the agent card.""" 8 | agent_card = requests.get( 9 | f'http://{remote_agent_address}/.well-known/agent.json' 10 | ) 11 | return AgentCard(**agent_card.json()) 12 | -------------------------------------------------------------------------------- /langchain-mcp-adapters/clients/mcpServers.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "math": { 4 | "command": "python", 5 | "args": ["$SERVERS_DIR/math_server.py"], 6 | "transport": "stdio" 7 | }, 8 | "weather": { 9 | "url": "http://localhost:$MCP_SERVER_PORT/mcp", 10 | "transport": "streamable_http" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /A2A/agents/marvin/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-samples-marvin" 3 | version = "0.1.0" 4 | description = "Currency conversion using A2A and Marvin" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = ["marvin>=3.0.0", "a2a-samples"] 8 | 9 | [tool.uv.sources] 10 | a2a-samples = { workspace = true } 11 | 12 | [tool.ruff.lint] 13 | extend-select = ["I", "UP"] 14 | -------------------------------------------------------------------------------- /browser-use/helloworld.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from browser_use import Agent 3 | import asyncio 4 | from dotenv import load_dotenv 5 | load_dotenv() 6 | 7 | async def main(): 8 | agent = Agent( 9 | task="Compare the price of gpt-4o and DeepSeek-V3", 10 | llm=ChatOpenAI(model="gpt-4o"), 11 | ) 12 | await agent.run() 13 | 14 | asyncio.run(main()) -------------------------------------------------------------------------------- /A2A/agents/ag2/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-samples-mcp" 3 | version = "0.1.0" 4 | description = "MCP agent using A2A and AG2" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "ag2>=0.8.6", 9 | "ag2[mcp, openai]>=0.8.6", 10 | "google-genai>=1.10.0", 11 | "a2a-samples", 12 | ] 13 | 14 | [tool.uv.sources] 15 | a2a-samples = { workspace = true } -------------------------------------------------------------------------------- /A2A/agents/crewai/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-samples-image-gen" 3 | version = "0.1.0" 4 | description = "Generate or modify images using A2A and CrewAI" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "crewai[tools]>=0.95.0", 9 | "google-genai>=1.9.0", 10 | "a2a-samples", 11 | ] 12 | 13 | [tool.uv.sources] 14 | a2a-samples = { workspace = true } -------------------------------------------------------------------------------- /agno/with_tools.py: -------------------------------------------------------------------------------- 1 | from agno.agent import Agent 2 | from agno.models.openai import OpenAIChat 3 | from agno.tools.yfinance import YFinanceTools 4 | 5 | agent = Agent( 6 | model=OpenAIChat(id="gpt-4o"), 7 | tools=[YFinanceTools(stock_price=True)], 8 | instructions="Use tables to display data. Don't include any other text.", 9 | markdown=True, 10 | ) 11 | agent.print_response("What is the stock price of Apple?", stream=True) -------------------------------------------------------------------------------- /langchain-mcp-adapters/servers/math_server.py: -------------------------------------------------------------------------------- 1 | # math_server.py 2 | from mcp.server.fastmcp import FastMCP 3 | 4 | mcp = FastMCP("Math") 5 | 6 | @mcp.tool() 7 | def add(a: int, b: int) -> int: 8 | """Add two numbers""" 9 | return a + b 10 | 11 | @mcp.tool() 12 | def multiply(a: int, b: int) -> int: 13 | """Multiply two numbers""" 14 | return a * b 15 | 16 | if __name__ == "__main__": 17 | mcp.run(transport="stdio") -------------------------------------------------------------------------------- /smolagents/code_execution.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example demonstrates how to use the CodeAgent to execute code. 3 | """ 4 | from smolagents import CodeAgent, VisitWebpageTool, HfApiModel 5 | 6 | agent = CodeAgent( 7 | tools = [VisitWebpageTool()], 8 | model=HfApiModel(), 9 | additional_authorized_imports=["requests", "markdownify"], 10 | use_e2b_executor=True 11 | ) 12 | 13 | agent.run("What was Abraham Lincoln's preferred pet?") -------------------------------------------------------------------------------- /A2A/agents/llama_index_file_chat/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-samples-file-chat" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "a2a-samples", 9 | "llama-cloud-services>=0.6.12", 10 | "llama-index-core>=0.12.30", 11 | "llama-index-llms-google-genai>=0.1.7", 12 | ] 13 | 14 | [tool.uv.sources] 15 | a2a-samples = { workspace = true } 16 | -------------------------------------------------------------------------------- /A2A/demo/ui/state/agent_state.py: -------------------------------------------------------------------------------- 1 | import mesop as me 2 | 3 | 4 | @me.stateclass 5 | class AgentState: 6 | """Agents List State""" 7 | 8 | agent_dialog_open: bool = False 9 | agent_address: str = '' 10 | agent_name: str = '' 11 | agent_description: str = '' 12 | input_modes: list[str] 13 | output_modes: list[str] 14 | stream_supported: bool = False 15 | push_notifications_supported: bool = False 16 | error: str = '' 17 | agent_framework_type: str = '' 18 | -------------------------------------------------------------------------------- /langchain-mcp-adapters/servers/weather_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | load_dotenv() 4 | 5 | from mcp.server.fastmcp import FastMCP 6 | 7 | mcp = FastMCP("Weather", port=os.getenv("MCP_SERVER_PORT"), log_level=os.getenv("MCP_SERVER_LOG_LEVEL")) 8 | 9 | @mcp.tool() 10 | async def get_weather(location: str) -> str: 11 | """Get weather for location.""" 12 | return "It's always sunny in New York" 13 | 14 | if __name__ == "__main__": 15 | mcp.run(transport="streamable-http") -------------------------------------------------------------------------------- /A2A/agents/mindsdb/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-data-agent" 3 | version = "0.1.0" 4 | description = "Sample enterprise data agent." 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "a2a-samples", 9 | "aiohttp", 10 | "python-dotenv" 11 | ] 12 | 13 | [tool.hatch.build.targets.wheel] 14 | packages = ["."] 15 | 16 | [tool.uv.sources] 17 | a2a-samples = { workspace = true } 18 | 19 | [build-system] 20 | requires = ["hatchling"] 21 | build-backend = "hatchling.build" 22 | -------------------------------------------------------------------------------- /A2A/demo/ui/pages/task_list.py: -------------------------------------------------------------------------------- 1 | from components.header import header 2 | from components.page_scaffold import page_frame, page_scaffold 3 | from components.task_card import task_card 4 | from state.state import AppState 5 | 6 | 7 | def task_list_page(app_state: AppState): 8 | """Task List Page""" 9 | with page_scaffold(): # pylint: disable=not-context-manager 10 | with page_frame(): 11 | with header('Task List', 'task'): 12 | pass 13 | task_card(app_state.task_list) 14 | -------------------------------------------------------------------------------- /A2A/agents/semantickernel/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-semantic-kernel" 3 | version = "0.1.0" 4 | description = "Leverage Semantic Kernel Agents using the A2A protocol." 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "semantic-kernel>=1.28.0", 9 | "a2a-samples", 10 | ] 11 | 12 | [tool.hatch.build.targets.wheel] 13 | packages = ["."] 14 | 15 | [tool.uv.sources] 16 | a2a-samples = { workspace = true } 17 | 18 | [build-system] 19 | requires = ["hatchling"] 20 | build-backend = "hatchling.build" 21 | -------------------------------------------------------------------------------- /smolagents/helloworld.py: -------------------------------------------------------------------------------- 1 | """ 2 | A minimal example of using smolagents to create a simple agent. 3 | This example demonstrates the basic concepts of smolagents including: 4 | - Defining a tool 5 | - Choosing a model 6 | - Creating a code agent 7 | """ 8 | from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel 9 | from env import * 10 | 11 | model = HfApiModel() 12 | agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model) 13 | 14 | agent.run("How many seconds would it take for a leopard at full speed to run through Pont des Arts?") -------------------------------------------------------------------------------- /A2A/demo/ui/pages/conversation.py: -------------------------------------------------------------------------------- 1 | import mesop as me 2 | 3 | from components.conversation import conversation 4 | from components.header import header 5 | from components.page_scaffold import page_frame, page_scaffold 6 | from state.state import AppState 7 | 8 | 9 | def conversation_page(app_state: AppState): 10 | """Conversation Page""" 11 | state = me.state(AppState) 12 | with page_scaffold(): # pylint: disable=not-context-manager 13 | with page_frame(): 14 | with header('Conversation', 'chat'): 15 | pass 16 | conversation() 17 | -------------------------------------------------------------------------------- /swarm/helloworld.py: -------------------------------------------------------------------------------- 1 | from swarm import Swarm, Agent 2 | from env import * 3 | 4 | client = Swarm() 5 | 6 | def transfer_to_agent_b(): 7 | return agent_b 8 | 9 | 10 | agent_a = Agent( 11 | name="Agent A", 12 | instructions="You are a helpful agent.", 13 | functions=[transfer_to_agent_b], 14 | ) 15 | 16 | agent_b = Agent( 17 | name="Agent B", 18 | instructions="Only speak in Haikus.", 19 | ) 20 | 21 | response = client.run( 22 | agent=agent_a, 23 | messages=[{"role": "user", "content": "I want to talk to agent B."}], 24 | ) 25 | 26 | print(response.messages[-1]["content"]) -------------------------------------------------------------------------------- /A2A/demo/ui/pages/event_list.py: -------------------------------------------------------------------------------- 1 | import mesop as me 2 | 3 | from components.event_viewer import event_list 4 | from components.header import header 5 | from components.page_scaffold import page_frame, page_scaffold 6 | from state.agent_state import AgentState 7 | from state.state import AppState 8 | 9 | 10 | def event_list_page(app_state: AppState): 11 | """Agents List Page""" 12 | state = me.state(AgentState) 13 | with page_scaffold(): # pylint: disable=not-context-manager 14 | with page_frame(): 15 | with header('Event List', 'list'): 16 | pass 17 | event_list() 18 | -------------------------------------------------------------------------------- /A2A/agents/google_adk/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-sample-agent-adk" 3 | version = "0.1.0" 4 | description = "Sample Google ADK-based Expense Reimbursement agent hosted as an A2A server." 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "a2a-samples", 9 | "click>=8.1.8", 10 | "google-adk>=0.0.3", 11 | "google-genai>=1.9.0", 12 | "python-dotenv>=1.1.0", 13 | ] 14 | 15 | [tool.hatch.build.targets.wheel] 16 | packages = ["."] 17 | 18 | [tool.uv.sources] 19 | a2a-samples = { workspace = true } 20 | 21 | [build-system] 22 | requires = ["hatchling"] 23 | build-backend = "hatchling.build" 24 | -------------------------------------------------------------------------------- /crewai/helloworld/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "helloworld" 3 | version = "0.1.0" 4 | description = "helloworld using crewAI" 5 | authors = [{ name = "Your Name", email = "you@example.com" }] 6 | requires-python = ">=3.10,<3.13" 7 | dependencies = [ 8 | "crewai[tools]>=0.100.1,<1.0.0" 9 | ] 10 | 11 | [project.scripts] 12 | helloworld = "helloworld.main:run" 13 | run_crew = "helloworld.main:run" 14 | train = "helloworld.main:train" 15 | replay = "helloworld.main:replay" 16 | test = "helloworld.main:test" 17 | 18 | [build-system] 19 | requires = ["hatchling"] 20 | build-backend = "hatchling.build" 21 | 22 | [tool.crewai] 23 | type = "crew" 24 | -------------------------------------------------------------------------------- /A2A/agents/langgraph/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-sample-agent-langgraph" 3 | version = "0.1.0" 4 | description = "Sample LangGraph currency agent with A2A Protocol" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "a2a-samples", 9 | "click>=8.1.8", 10 | "httpx>=0.28.1", 11 | "langchain-google-genai>=2.0.10", 12 | "langgraph>=0.3.18", 13 | "pydantic>=2.10.6", 14 | "python-dotenv>=1.1.0", 15 | ] 16 | 17 | [tool.hatch.build.targets.wheel] 18 | packages = ["."] 19 | 20 | [tool.uv.sources] 21 | a2a-samples = { workspace = true } 22 | 23 | [build-system] 24 | requires = ["hatchling"] 25 | build-backend = "hatchling.build" 26 | -------------------------------------------------------------------------------- /A2A/demo/ui/components/header.py: -------------------------------------------------------------------------------- 1 | import mesop as me 2 | 3 | from .poller import polling_buttons 4 | 5 | 6 | @me.content_component 7 | def header(title: str, icon: str): 8 | """Header component""" 9 | with me.box( 10 | style=me.Style( 11 | display='flex', 12 | justify_content='space-between', 13 | ) 14 | ): 15 | with me.box( 16 | style=me.Style(display='flex', flex_direction='row', gap=5) 17 | ): 18 | me.icon(icon=icon) 19 | me.text( 20 | title, 21 | type='headline-5', 22 | style=me.Style(font_family='Google Sans'), 23 | ) 24 | me.slot() 25 | polling_buttons() 26 | -------------------------------------------------------------------------------- /autogen/helloworld.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a simple example of using autogen to create an assistant agent. 3 | """ 4 | import asyncio 5 | from autogen_agentchat.agents import AssistantAgent 6 | from autogen_ext.models.openai import OpenAIChatCompletionClient 7 | from env import * 8 | 9 | async def main() -> None: 10 | agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o", base_url=os.environ["OPENAI_API_BASE"])) 11 | result = await agent.run(task="Introuce your self!'") 12 | for msg in result.messages: 13 | tokens_num = 0 if msg.models_usage is None else msg.models_usage.prompt_tokens + msg.models_usage.completion_tokens 14 | print(f"{msg.source}: {msg.content} ({tokens_num} tokens)") 15 | 16 | asyncio.run(main()) -------------------------------------------------------------------------------- /langgraphjs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "langgraph-quickstart", 3 | "version": "1.0.0", 4 | "description": "A simple example of using LangGraph.js", 5 | "main": "helloworld.ts", 6 | "type": "module", 7 | "scripts": { 8 | "start": "tsx helloworld_high_level.ts", 9 | "start-low": "tsx helloworld_low_level.ts", 10 | "start—ollama": "tsx helloworld_ollama.ts" 11 | }, 12 | "dependencies": { 13 | "@langchain/anthropic": "^0.3.13", 14 | "@langchain/community": "^0.3.29", 15 | "@langchain/core": "^0.3.39", 16 | "@langchain/langgraph": "^0.2.45", 17 | "@langchain/ollama": "^0.2.0", 18 | "dotenv": "^16.4.7", 19 | "zod": "^3.24.1" 20 | }, 21 | "devDependencies": { 22 | "tsx": "^4.7.1", 23 | "typescript": "^5.4.2" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /crewai/helloworld/src/helloworld/tools/custom_tool.py: -------------------------------------------------------------------------------- 1 | from crewai.tools import BaseTool 2 | from typing import Type 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class MyCustomToolInput(BaseModel): 7 | """Input schema for MyCustomTool.""" 8 | argument: str = Field(..., description="Description of the argument.") 9 | 10 | class MyCustomTool(BaseTool): 11 | name: str = "Name of my tool" 12 | description: str = ( 13 | "Clear description for what this tool is useful for, your agent will need this information to use it." 14 | ) 15 | args_schema: Type[BaseModel] = MyCustomToolInput 16 | 17 | def _run(self, argument: str) -> str: 18 | # Implementation goes here 19 | return "this is an example of a tool output, ignore it and move along." 20 | -------------------------------------------------------------------------------- /crewai/helloworld/src/helloworld/config/tasks.yaml: -------------------------------------------------------------------------------- 1 | research_task: 2 | description: > 3 | Conduct a thorough research about {topic} 4 | Make sure you find any interesting and relevant information given 5 | the current year is {current_year}. 6 | expected_output: > 7 | A list with 10 bullet points of the most relevant information about {topic} 8 | agent: researcher 9 | 10 | reporting_task: 11 | description: > 12 | Review the context you got and expand each topic into a full section for a report. 13 | Make sure the report is detailed and contains any and all relevant information. 14 | expected_output: > 15 | A fully fledged report with the main topics, each with a full section of information. 16 | Formatted as markdown without '```' 17 | agent: reporting_analyst 18 | -------------------------------------------------------------------------------- /crewai/helloworld/src/helloworld/config/agents.yaml: -------------------------------------------------------------------------------- 1 | researcher: 2 | role: > 3 | {topic} Senior Data Researcher 4 | goal: > 5 | Uncover cutting-edge developments in {topic} 6 | backstory: > 7 | You're a seasoned researcher with a knack for uncovering the latest 8 | developments in {topic}. Known for your ability to find the most relevant 9 | information and present it in a clear and concise manner. 10 | 11 | reporting_analyst: 12 | role: > 13 | {topic} Reporting Analyst 14 | goal: > 15 | Create detailed reports based on {topic} data analysis and research findings 16 | backstory: > 17 | You're a meticulous analyst with a keen eye for detail. You're known for 18 | your ability to turn complex data into clear and concise reports, making 19 | it easy for others to understand and act on the information you provide. -------------------------------------------------------------------------------- /A2A/demo/ui/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "a2a-python-example-ui" 3 | version = "0.1.0" 4 | description = "Agent2Agent example UI" 5 | readme = "README.md" 6 | requires-python = ">=3.13" 7 | dependencies = [ 8 | "asyncio>=3.4.3", 9 | "httpx>=0.28.1", 10 | "httpx-sse>=0.4.0", 11 | "pydantic>=2.10.6", 12 | "fastapi>=0.115.0", 13 | "uvicorn>=0.34.0", 14 | "mesop>=1.0.0", 15 | "a2a-samples", 16 | "pandas>=2.2.0", 17 | "google-genai>=1.9.0", 18 | "google-adk>=0.0.3", 19 | ] 20 | 21 | [tool.hatch.build.targets.wheel] 22 | packages = ["a2a_ui"] 23 | 24 | [tool.uv.sources] 25 | a2a_ui = { workspace = true } 26 | a2a_samples = { path = "../../samples/python", editable = true } 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | 32 | [dependency-groups] 33 | dev = [ 34 | "ruff>=0.11.2", 35 | ] 36 | -------------------------------------------------------------------------------- /langgraph-platform/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "assistants-demo" 7 | version = "0.1.0" 8 | description = "LangGraph assistants demo project" 9 | readme = "README.md" 10 | requires-python = ">=3.8" 11 | dependencies = [ 12 | "dotenv", 13 | "ipykernel", 14 | "jupyter", 15 | "langchain", 16 | "langchain-core", 17 | "langchain-anthropic", 18 | "langchain-community", 19 | "langchain-openai", 20 | "langchain-tavily", 21 | "langgraph", 22 | "langgraph-cli[inmem]", 23 | "langgraph-sdk", 24 | "langgraph-supervisor", 25 | "pydantic", 26 | "python-dotenv", 27 | "typing-extensions", 28 | "yfinance", 29 | ] 30 | 31 | [tool.setuptools.packages.find] 32 | where = ["."] 33 | include = ["agents*"] 34 | 35 | [tool.setuptools.package-data] 36 | "*" = ["**/*"] -------------------------------------------------------------------------------- /A2A/demo/ui/styles/styles.py: -------------------------------------------------------------------------------- 1 | import mesop as me 2 | 3 | 4 | SIDENAV_MIN_WIDTH = 68 5 | SIDENAV_MAX_WIDTH = 200 6 | 7 | DEFAULT_MENU_STYLE = me.Style(align_content='left') 8 | 9 | _FANCY_TEXT_GRADIENT = me.Style( 10 | color='transparent', 11 | background=( 12 | 'linear-gradient(72.83deg,#4285f4 11.63%,#9b72cb 40.43%,#d96570 68.07%)' 13 | ' text' 14 | ), 15 | ) 16 | 17 | MAIN_COLUMN_STYLE = me.Style( 18 | display='flex', 19 | flex_direction='column', 20 | height='100%', 21 | ) 22 | 23 | PAGE_BACKGROUND_STYLE = me.Style( 24 | background=me.theme_var('background'), 25 | height='100%', 26 | overflow_y='scroll', 27 | margin=me.Margin(bottom=20), 28 | ) 29 | 30 | PAGE_BACKGROUND_PADDING_STYLE = me.Style( 31 | background=me.theme_var('background'), 32 | padding=me.Padding(top=24, left=24, right=24, bottom=24), 33 | display='flex', 34 | flex_direction='column', 35 | ) 36 | -------------------------------------------------------------------------------- /langgraph-platform/agents/react_agent/graph_without_config.py: -------------------------------------------------------------------------------- 1 | from agents.react_agent.tools import basic_research_tool, get_todays_date 2 | from langgraph.prebuilt import create_react_agent 3 | from agents.utils import load_chat_model 4 | 5 | async def make_graph(): 6 | 7 | # initialize our model and tools 8 | llm = load_chat_model("openai/gpt-4.1-mini") 9 | tools = [basic_research_tool, get_todays_date] 10 | prompt = """ 11 | You are a helpful AI assistant trained in creating engaging social media content! 12 | you have access to two tools: basic_research_tool and get_todays_date. Please get_todays_date then 13 | perform any research if needed, before generating a social media post. 14 | """ 15 | 16 | # Compile the builder into an executable graph 17 | graph = create_react_agent( 18 | model=llm, 19 | tools=tools, 20 | prompt=prompt 21 | ) 22 | 23 | return graph -------------------------------------------------------------------------------- /langchain-sandbox/react.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from langchain_sandbox import PyodideSandboxTool 4 | from langgraph.prebuilt import create_react_agent 5 | from langchain_openai import ChatOpenAI 6 | 7 | from dotenv import load_dotenv 8 | 9 | load_dotenv() 10 | # Define the sandbox tool 11 | sandbox_tool = PyodideSandboxTool( 12 | # Allow Pyodide to install python packages that 13 | # might be required. 14 | allow_net=True, 15 | ) 16 | 17 | model = ChatOpenAI(model="gpt-4o-mini") 18 | 19 | # Create an agent with the sandbox tool 20 | agent = create_react_agent( 21 | model, [sandbox_tool] 22 | ) 23 | 24 | query = "Calculate the area of a circle with a radius of 5." 25 | 26 | 27 | async def run_agent(query: str): 28 | # Stream agent outputs 29 | async for chunk in agent.astream({"messages": query}): 30 | print(chunk) 31 | print("\n") 32 | 33 | 34 | if __name__ == "__main__": 35 | # Run the agent 36 | asyncio.run(run_agent(query)) -------------------------------------------------------------------------------- /langgraph-platform/agents/utils.py: -------------------------------------------------------------------------------- 1 | """Utility & helper functions.""" 2 | 3 | from langchain.chat_models import init_chat_model 4 | from langchain_core.language_models import BaseChatModel 5 | from langchain_core.messages import BaseMessage 6 | 7 | 8 | def get_message_text(msg: BaseMessage) -> str: 9 | """Get the text content of a message.""" 10 | content = msg.content 11 | if isinstance(content, str): 12 | return content 13 | elif isinstance(content, dict): 14 | return content.get("text", "") 15 | else: 16 | txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content] 17 | return "".join(txts).strip() 18 | 19 | 20 | def load_chat_model(fully_specified_name: str) -> BaseChatModel: 21 | """Load a chat model from a fully specified name. 22 | 23 | Args: 24 | fully_specified_name (str): String in the format 'provider/model'. 25 | """ 26 | provider, model = fully_specified_name.split("/", maxsplit=1) 27 | return init_chat_model(model, model_provider=provider) 28 | -------------------------------------------------------------------------------- /A2A/demo/ui/components/async_poller.js: -------------------------------------------------------------------------------- 1 | import { 2 | LitElement, 3 | html, 4 | } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/core/lit-core.min.js'; 5 | 6 | class AsyncPoller extends LitElement { 7 | static properties = { 8 | triggerEvent: {type: String}, 9 | action: {type: Object}, 10 | polling_interval: {type: Number}, 11 | }; 12 | 13 | render() { 14 | return html`
`; 15 | } 16 | 17 | firstUpdated() { 18 | if (this.polling_interval <= 0) { 19 | return; 20 | } 21 | if (this.action) { 22 | setTimeout(() => { 23 | this.runTimeout(this.action) 24 | }, this.polling_interval * 1000); 25 | } 26 | } 27 | 28 | runTimeout(action) { 29 | this.dispatchEvent( 30 | new MesopEvent(this.triggerEvent, { 31 | action: action, 32 | }), 33 | ); 34 | if (this.polling_interval > 0) { 35 | setTimeout(() => { 36 | this.runTimeout(); 37 | }, this.polling_interval * 1000); 38 | } 39 | } 40 | } 41 | 42 | customElements.define('async-action-component', AsyncPoller); 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 abab.ai 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /langchain-sandbox/helloworld_stateful.py: -------------------------------------------------------------------------------- 1 | from langchain_sandbox import PyodideSandbox 2 | import asyncio 3 | 4 | async def main(): 5 | sandbox = PyodideSandbox( 6 | # Create stateful sandbox 7 | stateful=True, 8 | # Allow Pyodide to install python packages that 9 | # might be required. 10 | allow_net=True, 11 | ) 12 | code = """\ 13 | import numpy as np 14 | x = np.array([1, 2, 3]) 15 | print(x) 16 | """ 17 | 18 | result = await sandbox.execute(code) 19 | 20 | # Pass previous result 21 | print(await sandbox.execute("float(x[0])", session_bytes=result.session_bytes, session_metadata=result.session_metadata)) 22 | 23 | # CodeExecutionResult( 24 | # result=1, 25 | # stdout=None, 26 | # stderr=None, 27 | # status='success', 28 | # execution_time=2.7027177810668945 29 | # session_metadata={'created': '2025-05-15T21:27:57.120Z', 'lastModified': '2025-05-15T21:28:00.061Z', 'packages': ['numpy', 'dill']}, 30 | # session_bytes=b'\x80\x04\x95d\x01\x00..." 31 | # ) 32 | 33 | if __name__ == "__main__": 34 | asyncio.run(main()) -------------------------------------------------------------------------------- /A2A/demo/ui/service/server/application_manager.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | from common.types import AgentCard, Message, Task 4 | from service.types import Conversation, Event 5 | 6 | 7 | class ApplicationManager(ABC): 8 | @abstractmethod 9 | def create_conversation(self) -> Conversation: 10 | pass 11 | 12 | @abstractmethod 13 | def sanitize_message(self, message: Message) -> Message: 14 | pass 15 | 16 | @abstractmethod 17 | async def process_message(self, message: Message): 18 | pass 19 | 20 | @abstractmethod 21 | def register_agent(self, url: str): 22 | pass 23 | 24 | @abstractmethod 25 | def get_pending_messages(self) -> list[str]: 26 | pass 27 | 28 | @property 29 | @abstractmethod 30 | def conversations(self) -> list[Conversation]: 31 | pass 32 | 33 | @property 34 | @abstractmethod 35 | def tasks(self) -> list[Task]: 36 | pass 37 | 38 | @property 39 | @abstractmethod 40 | def agents(self) -> list[AgentCard]: 41 | pass 42 | 43 | @property 44 | @abstractmethod 45 | def events(self) -> list[Event]: 46 | pass 47 | -------------------------------------------------------------------------------- /langmem/helloworld.py: -------------------------------------------------------------------------------- 1 | # Import core components 2 | from langgraph.prebuilt import create_react_agent 3 | from langgraph.store.memory import InMemoryStore 4 | from langmem import create_manage_memory_tool, create_search_memory_tool 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | 10 | # Set up storage 11 | store = InMemoryStore( 12 | index={ 13 | "dims": 1536, 14 | "embed": "openai:text-embedding-3-small", 15 | } 16 | ) 17 | 18 | # Create an agent with memory capabilities 19 | agent = create_react_agent( 20 | "openai:gpt-4o", 21 | tools=[ 22 | # Memory tools use LangGraph's BaseStore for persistence (4) 23 | create_manage_memory_tool(namespace=("memories",)), 24 | create_search_memory_tool(namespace=("memories",)), 25 | ], 26 | store=store, 27 | ) 28 | # Store a new memory 29 | agent.invoke( 30 | {"messages": [{"role": "user", "content": "Remember that I prefer dark mode."}]} 31 | ) 32 | 33 | # Retrieve the stored memory 34 | response = agent.invoke( 35 | {"messages": [{"role": "user", "content": "What are my lighting preferences?"}]} 36 | ) 37 | print(response["messages"][-1].content) 38 | # Output: "You've told me that you prefer dark mode." -------------------------------------------------------------------------------- /langgraph-platform/agents/supervisor/supervisor_prebuilt.py: -------------------------------------------------------------------------------- 1 | from langchain_core.runnables import RunnableConfig 2 | from agents.supervisor.supervisor_configuration import Configuration 3 | from agents.supervisor.subagents import create_subagents 4 | from agents.utils import load_chat_model 5 | 6 | from langgraph_supervisor import create_supervisor 7 | 8 | # Main graph construction 9 | async def make_supervisor_graph(config: RunnableConfig): 10 | # Extract configuration values directly from the config 11 | configurable = config.get("configurable", {}) 12 | supervisor_model = configurable.get("supervisor_model", "openai/gpt-4.1") 13 | supervisor_system_prompt = configurable.get("supervisor_system_prompt", "You are a helpful supervisor agent.") 14 | 15 | # Create subagents using the new async function, passing configurable values 16 | subagents = await create_subagents(configurable) 17 | 18 | # Create supervisor graph 19 | supervisor_graph = create_supervisor( 20 | agents=subagents, 21 | model=load_chat_model(supervisor_model), 22 | prompt=supervisor_system_prompt, 23 | config_schema=Configuration 24 | ) 25 | 26 | compiled_graph = supervisor_graph.compile() 27 | return compiled_graph 28 | -------------------------------------------------------------------------------- /langmem/README.md: -------------------------------------------------------------------------------- 1 | # LangGraph Memory Hello World Example 2 | 3 | A minimal example demonstrating how to use LangMem to create a simple conversational agent that learns and adapts from their interactions over time. 4 | 5 | ## Features 6 | 7 | - Simple conversational agent implementation 8 | - Uses LangGraph's ReAct Agent 9 | - Uses LangMem to manage memory 10 | - Uses LangGraph's InMemoryStore for persistence 11 | 12 | ## Prerequisites 13 | 14 | - Python 3.12+ 15 | - OpenAI API key or compatible API key for other LLMs 16 | 17 | ## Installation 18 | 19 | 1. Install dependencies: 20 | ```bash 21 | pip install -r requirements.txt 22 | ``` 23 | 2. Set up environment: 24 | ```bash 25 | cp .env.example .env 26 | ``` 27 | 28 | Edit `.env` with your settings: 29 | ```bash 30 | # OpenAI API configurations 31 | OPENAI_API_KEY=your-api-key-here 32 | OPENAI_API_BASE= # Optional: custom API endpoint (e.g. for API proxies) 33 | ``` 34 | 3. Run the script: 35 | ```bash 36 | python helloworld.py 37 | ``` 38 | 39 | 4. Expected output: 40 | ``` 41 | Your lighting preference is for dark mode in applications and interfaces. 42 | ``` 43 | 44 | # References 45 | 46 | - [LangMem docs](https://docs.langchain.com/langmem) 47 | - [LangMem code](https://github.com/langchain-ai/langmem) -------------------------------------------------------------------------------- /A2A/agents/google_adk/README.md: -------------------------------------------------------------------------------- 1 | ## ADK Agent 2 | 3 | This sample uses the Agent Development Kit (ADK) to create a simple "Expense Reimbursement" agent that is hosted as an A2A server. 4 | 5 | This agent takes text requests from the client and, if any details are missing, returns a webform for the client (or its user) to fill out. After the client fills out the form, the agent will complete the task. 6 | 7 | ## Prerequisites 8 | 9 | - Python 3.9 or higher 10 | - [UV](https://docs.astral.sh/uv/) 11 | - Access to an LLM and API Key 12 | 13 | 14 | ## Running the Sample 15 | 16 | 1. Navigate to the samples directory: 17 | ```bash 18 | cd samples/python/agents/google_adk 19 | ``` 20 | 2. Create an environment file with your API key: 21 | 22 | ```bash 23 | echo "GOOGLE_API_KEY=your_api_key_here" > .env 24 | ``` 25 | 26 | 4. Run an agent: 27 | ```bash 28 | uv run . 29 | ``` 30 | 31 | 5. In a separate terminal, run the A2A client: 32 | ``` 33 | # Connect to the agent (specify the agent URL with correct port) 34 | cd samples/python/hosts/cli 35 | uv run . --agent http://localhost:10002 36 | 37 | # If you changed the port when starting the agent, use that port instead 38 | # uv run . --agent http://localhost:YOUR_PORT 39 | ``` 40 | -------------------------------------------------------------------------------- /langchain-mcp-adapters/clients/single_server_client.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from mcp import ClientSession, StdioServerParameters 4 | from mcp.client.stdio import stdio_client 5 | 6 | from langchain_mcp_adapters.tools import load_mcp_tools 7 | from langgraph.prebuilt import create_react_agent 8 | from util import get_servers_dir 9 | from llm import deepseek_model as model 10 | 11 | server_params = StdioServerParameters( 12 | command="python", 13 | # Make sure to update to the full absolute path to your math_server.py file 14 | args=[os.path.join(get_servers_dir(), "math_server.py")], 15 | ) 16 | 17 | async def run_agent(): 18 | async with stdio_client(server_params) as (read, write): 19 | async with ClientSession(read, write) as session: 20 | # Initialize the connection 21 | await session.initialize() 22 | 23 | # Get tools 24 | tools = await load_mcp_tools(session) 25 | 26 | # Create and run the agent 27 | agent = create_react_agent(model, tools) 28 | agent_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) 29 | return agent_response 30 | 31 | if __name__ == "__main__": 32 | result = asyncio.run(run_agent()) 33 | print(result["messages"][-1].content) 34 | -------------------------------------------------------------------------------- /langgraph_swarm/helloworld.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | 3 | from langgraph.checkpoint.memory import MemorySaver 4 | from langgraph.prebuilt import create_react_agent 5 | from langgraph_swarm import create_handoff_tool, create_swarm 6 | from config import * 7 | 8 | model = ChatOpenAI(model="gpt-4o") 9 | 10 | def add(a: int, b: int) -> int: 11 | """Add two numbers""" 12 | return a + b 13 | 14 | alice = create_react_agent( 15 | model, 16 | [add, create_handoff_tool(agent_name="Bob")], 17 | prompt="You are Alice, an addition expert.", 18 | name="Alice", 19 | ) 20 | 21 | bob = create_react_agent( 22 | model, 23 | [create_handoff_tool(agent_name="Alice", description="Transfer to Alice, she can help with math")], 24 | prompt="You are Bob, you speak like a pirate.", 25 | name="Bob", 26 | ) 27 | 28 | checkpointer = MemorySaver() 29 | workflow = create_swarm( 30 | [alice, bob], 31 | default_active_agent="Alice" 32 | ) 33 | app = workflow.compile(checkpointer=checkpointer) 34 | 35 | config = {"configurable": {"thread_id": "1"}} 36 | turn_1 = app.invoke( 37 | {"messages": [{"role": "user", "content": "i'd like to speak to Bob"}]}, 38 | config, 39 | ) 40 | print(turn_1) 41 | turn_2 = app.invoke( 42 | {"messages": [{"role": "user", "content": "what's 5 + 7?"}]}, 43 | config, 44 | ) 45 | print(turn_2) -------------------------------------------------------------------------------- /langchain/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration file for LLM parameters and environment settings. 3 | This file contains common parameters that can be reused across different examples. 4 | """ 5 | 6 | import os 7 | from typing import Optional 8 | from pathlib import Path 9 | from dotenv import load_dotenv 10 | 11 | # Load environment variables from .env file 12 | env_path = Path(__file__).parent / '.env' 13 | load_dotenv(env_path) 14 | 15 | # Model configurations 16 | DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'gpt-4o-mini') 17 | DEFAULT_TEMPERATURE = float(os.getenv('DEFAULT_TEMPERATURE', '0.7')) 18 | 19 | # API configurations 20 | API_KEY = os.getenv('OPENAI_API_KEY') 21 | API_BASE = os.getenv('OPENAI_API_BASE') 22 | 23 | def get_api_key() -> Optional[str]: 24 | """Get the API key from environment variable.""" 25 | if not API_KEY: 26 | raise ValueError("Please set API_KEY environment variable") 27 | return API_KEY 28 | 29 | def get_api_base() -> Optional[str]: 30 | """Get the API base URL from environment variable.""" 31 | return API_BASE 32 | 33 | def get_model_config(model: Optional[str] = None, temperature: Optional[float] = None) -> dict: 34 | """Get model configuration with default values.""" 35 | return { 36 | "model": model or DEFAULT_MODEL, 37 | "temperature": temperature if temperature is not None else DEFAULT_TEMPERATURE, 38 | } -------------------------------------------------------------------------------- /langgraph/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration file for LLM parameters and environment settings. 3 | This file contains common parameters that can be reused across different examples. 4 | """ 5 | 6 | import os 7 | from typing import Optional 8 | from pathlib import Path 9 | from dotenv import load_dotenv 10 | 11 | # Load environment variables from .env file 12 | env_path = Path(__file__).parent / '.env' 13 | load_dotenv(env_path) 14 | 15 | # Model configurations 16 | DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'gpt-4o-mini') 17 | DEFAULT_TEMPERATURE = float(os.getenv('DEFAULT_TEMPERATURE', '0.7')) 18 | 19 | # API configurations 20 | API_KEY = os.getenv('OPENAI_API_KEY') 21 | API_BASE = os.getenv('OPENAI_API_BASE') 22 | 23 | def get_api_key() -> Optional[str]: 24 | """Get the API key from environment variable.""" 25 | if not API_KEY: 26 | raise ValueError("Please set API_KEY environment variable") 27 | return API_KEY 28 | 29 | def get_api_base() -> Optional[str]: 30 | """Get the API base URL from environment variable.""" 31 | return API_BASE 32 | 33 | def get_model_config(model: Optional[str] = None, temperature: Optional[float] = None) -> dict: 34 | """Get model configuration with default values.""" 35 | return { 36 | "model": model or DEFAULT_MODEL, 37 | "temperature": temperature if temperature is not None else DEFAULT_TEMPERATURE, 38 | } -------------------------------------------------------------------------------- /langchain-mcp-adapters/clients/multi_server_client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | from langchain_mcp_adapters.client import MultiServerMCPClient 4 | from langgraph.prebuilt import create_react_agent 5 | from util import get_servers_dir 6 | from llm import deepseek_model as model 7 | 8 | async def run_agent(): 9 | client = MultiServerMCPClient( 10 | { 11 | "math": { 12 | "command": "python", 13 | # Make sure to update to the full absolute path to your math_server.py file 14 | "args": [os.path.join(get_servers_dir(), "math_server.py")], 15 | "transport": "stdio", 16 | }, 17 | "weather": { 18 | # make sure you start your weather server on port 8000 19 | "url": f"http://localhost:{os.getenv('MCP_SERVER_PORT')}/mcp", 20 | "transport": "streamable_http", 21 | } 22 | } 23 | ) 24 | tools = await client.get_tools() 25 | agent = create_react_agent(model, tools) 26 | math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) 27 | print(math_response["messages"][-1].content) 28 | weather_response = await agent.ainvoke({"messages": "what is the weather in nyc?"}) 29 | print(weather_response["messages"][-1].content) 30 | 31 | if __name__ == "__main__": 32 | asyncio.run(run_agent()) 33 | -------------------------------------------------------------------------------- /langgraph_swarm/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration file for LLM parameters and environment settings. 3 | This file contains common parameters that can be reused across different examples. 4 | """ 5 | 6 | import os 7 | from typing import Optional 8 | from pathlib import Path 9 | from dotenv import load_dotenv 10 | 11 | # Load environment variables from .env file 12 | env_path = Path(__file__).parent / '.env' 13 | load_dotenv(env_path) 14 | 15 | # Model configurations 16 | DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'gpt-4o-mini') 17 | DEFAULT_TEMPERATURE = float(os.getenv('DEFAULT_TEMPERATURE', '0.7')) 18 | 19 | # API configurations 20 | API_KEY = os.getenv('OPENAI_API_KEY') 21 | API_BASE = os.getenv('OPENAI_API_BASE') 22 | 23 | def get_api_key() -> Optional[str]: 24 | """Get the API key from environment variable.""" 25 | if not API_KEY: 26 | raise ValueError("Please set API_KEY environment variable") 27 | return API_KEY 28 | 29 | def get_api_base() -> Optional[str]: 30 | """Get the API base URL from environment variable.""" 31 | return API_BASE 32 | 33 | def get_model_config(model: Optional[str] = None, temperature: Optional[float] = None) -> dict: 34 | """Get model configuration with default values.""" 35 | return { 36 | "model": model or DEFAULT_MODEL, 37 | "temperature": temperature if temperature is not None else DEFAULT_TEMPERATURE, 38 | } -------------------------------------------------------------------------------- /langchain-sandbox/helloworld.py: -------------------------------------------------------------------------------- 1 | from langchain_sandbox import PyodideSandbox 2 | import asyncio 3 | 4 | async def main(): 5 | # Create a sandbox instance 6 | sandbox = PyodideSandbox( 7 | # Allow Pyodide to install python packages that 8 | # might be required. 9 | allow_net=True, 10 | ) 11 | code = """\ 12 | import numpy as np 13 | x = np.array([1, 2, 3]) 14 | print(x) 15 | """ 16 | 17 | # Execute Python code 18 | response = await sandbox.execute(code) 19 | print(response) 20 | 21 | # Check if execution was successful 22 | if response.stderr: 23 | return f"Error during execution: {response.stderr}", {} 24 | 25 | # Get the output from stdout 26 | output = ( 27 | response.stdout 28 | if response.stdout 29 | else ""
30 | )
31 | result = response.result
32 |
33 | print(output)
34 | print(result)
35 |
36 | # CodeExecutionResult(
37 | # result=None,
38 | # stdout='[1 2 3]',
39 | # stderr=None,
40 | # status='success',
41 | # execution_time=2.8578367233276367,
42 | # session_metadata={'created': '2025-05-15T21:26:37.204Z', 'lastModified': '2025-05-15T21:26:37.831Z', 'packages': ['numpy']},
43 | # session_bytes=None
44 | # )
45 |
46 | if __name__ == "__main__":
47 | asyncio.run(main())
--------------------------------------------------------------------------------
/langchain-mcp-adapters/clients/multi_server_client_wconfig.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | from pathlib import Path
4 | from langchain_mcp_adapters.client import MultiServerMCPClient
5 | from langgraph.prebuilt import create_react_agent
6 | from llm import deepseek_model as model
7 | from util import get_servers_dir
8 |
9 | async def run_agent():
10 | # read mcpServers.json
11 | config_path = Path(__file__).parent / "mcpServers.json"
12 | with open(config_path, "r") as f:
13 | server_config = json.load(f)
14 | # replace environment variables for path and url
15 | server_config["mcpServers"]["math"]["args"][0] = server_config["mcpServers"]["math"]["args"][0].replace("$SERVERS_DIR", get_servers_dir())
16 | server_config["mcpServers"]["weather"]["url"] = server_config["mcpServers"]["weather"]["url"].replace("$MCP_SERVER_PORT", "8001")
17 |
18 | print(server_config["mcpServers"])
19 |
20 | client = MultiServerMCPClient(server_config["mcpServers"])
21 | tools = await client.get_tools()
22 | agent = create_react_agent(model, tools)
23 | math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"})
24 | print(math_response["messages"][-1].content)
25 | weather_response = await agent.ainvoke({"messages": "what is the weather in nyc?"})
26 | print(weather_response["messages"][-1].content)
27 |
28 | if __name__ == "__main__":
29 | asyncio.run(run_agent())
30 |
--------------------------------------------------------------------------------
/openai-agents/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Agents SDK
2 |
3 | The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.
4 |
5 | ### Core concepts:
6 |
7 | 1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
8 | 2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/): A specialized tool call used by the Agents SDK for transferring control between agents
9 | 3. [**Guardrails**](https://openai.github.io/openai-agents-python/guardrails/): Configurable safety checks for input and output validation
10 | 4. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
11 |
12 | ## Quickstart
13 |
14 | 1. Copy `.env.example` to `.env`, and set up your config:
15 |
16 | ```shell
17 | # OpenAI API configurations
18 | OPENAI_API_KEY=your_openai_api_key
19 | ```
20 |
21 | 2. Install the dependencies:
22 |
23 | ```shell
24 | pip install -r requirements.txt
25 | ```
26 |
27 | For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
28 |
29 | 3. Run the hello world example:
30 |
31 | ```shell
32 | python helloworld.py
33 | ```
34 |
35 | 4. Example output:
36 |
37 | ```shell
38 | Code within the code,
39 | Functions calling themselves,
40 | Infinite loop's dance.
41 | ```
42 |
43 |
--------------------------------------------------------------------------------
/langgraph-platform/agents/react_agent/graph.py:
--------------------------------------------------------------------------------
1 | """Define a Reasoning and Action agent using the LangGraph prebuilt react agent.
2 |
3 | Add configuration and implement using a make_graph function to rebuild the graph at runtime.
4 | """
5 | from agents.react_agent.tools import get_tools
6 | from langgraph.prebuilt import create_react_agent
7 | from agents.utils import load_chat_model
8 |
9 | from agents.react_agent.configuration import Configuration
10 | from langchain_core.runnables import RunnableConfig
11 |
12 |
13 |
14 | async def make_graph(config: RunnableConfig):
15 |
16 | # Get name from config or use default
17 | configurable = config.get("configurable", {})
18 |
19 | # get values from configuration
20 | llm = configurable.get("model", "openai/gpt-4.1")
21 | selected_tools = configurable.get("selected_tools", ["get_todays_date"])
22 | prompt = configurable.get("system_prompt", "You are a helpful assistant.")
23 |
24 | # specify the name for use in supervisor architecture
25 | name = configurable.get("name", "react_agent")
26 |
27 | # Compile the builder into an executable graph
28 | # You can customize this by adding interrupt points for state updates
29 | graph = create_react_agent(
30 | model=load_chat_model(llm),
31 | tools=get_tools(selected_tools),
32 | prompt=prompt,
33 | config_schema=Configuration,
34 | name=name
35 | )
36 |
37 | return graph
--------------------------------------------------------------------------------
/langgraph-platform/agents/react_agent/configuration.py:
--------------------------------------------------------------------------------
1 | """Define the configurable parameters for the agent."""
2 |
3 | from typing import Annotated, Literal
4 | from pydantic import BaseModel, Field
5 |
6 |
7 | class Configuration(BaseModel):
8 | """The configuration for the agent."""
9 |
10 | system_prompt: str = Field(
11 | default="You are a helpful AI assistant.",
12 | description="The system prompt to use for the agent's interactions. "
13 | "This prompt sets the context and behavior for the agent."
14 | )
15 |
16 | model: Annotated[
17 | Literal[
18 | "anthropic/claude-sonnet-4-20250514",
19 | "anthropic/claude-3-5-sonnet-latest",
20 | "openai/gpt-4.1",
21 | "openai/gpt-4.1-mini"
22 | ],
23 | {"__template_metadata__": {"kind": "llm"}},
24 | ] = Field(
25 | default="anthropic/claude-3-5-sonnet-latest",
26 | description="The name of the language model to use for the agent's main interactions. "
27 | "Should be in the form: provider/model-name."
28 | )
29 |
30 | selected_tools: list[Literal["finance_research", "advanced_research_tool", "basic_research_tool", "get_todays_date"]] = Field(
31 | default = ["get_todays_date"],
32 | description="The list of tools to use for the agent's interactions. "
33 | "This list should contain the names of the tools to use."
34 | )
--------------------------------------------------------------------------------
/autogen/team.py:
--------------------------------------------------------------------------------
1 | """
2 | This is a simple example of using autogen to create a team of agents:
3 | - A web surfer agent that can search the web for information
4 | - An assistant agent that can answer questions
5 | - A user proxy agent that can interact with the user
6 |
7 | The team is a round robin group chat that will continue to run until the user terminates the conversation.
8 | """
9 | # pip install -U autogen-agentchat autogen-ext[openai,web-surfer]
10 | # playwright install
11 | import asyncio
12 | from autogen_agentchat.agents import AssistantAgent, UserProxyAgent
13 | from autogen_agentchat.conditions import TextMentionTermination
14 | from autogen_agentchat.teams import RoundRobinGroupChat
15 | from autogen_agentchat.ui import Console
16 | from autogen_ext.models.openai import OpenAIChatCompletionClient
17 | from autogen_ext.agents.web_surfer import MultimodalWebSurfer
18 | from env import *
19 |
20 | async def main() -> None:
21 | model_client = OpenAIChatCompletionClient(model="gpt-4o", base_url=os.environ["OPENAI_API_BASE"])
22 | assistant = AssistantAgent("assistant", model_client)
23 | web_surfer = MultimodalWebSurfer("web_surfer", model_client)
24 | user_proxy = UserProxyAgent("user_proxy")
25 | termination = TextMentionTermination("exit") # Type 'exit' to end the conversation.
26 | team = RoundRobinGroupChat([web_surfer, assistant, user_proxy], termination_condition=termination)
27 | await Console(team.run_stream(task="Find information about AutoGen and write a short summary."))
28 |
29 | asyncio.run(main())
--------------------------------------------------------------------------------
/model_context_protocol/weather/README.md:
--------------------------------------------------------------------------------
1 | 1. Install uv
2 | ```shell
3 | # macOS/Linux
4 | curl -LsSf https://astral.sh/uv/install.sh | sh
5 | # Windows
6 | powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
7 | ```
8 |
9 | 2. Install dependencies
10 | ```shell
11 | uv sync
12 | ```
13 |
14 | 3. Add the server to client
15 | - Claude Desktop
16 |
17 | Open the `claude_desktop_config.json` file:
18 | ```shell
19 | # macOS/Linux
20 | code ~/Library/Application\ Support/Claude/claude_desktop_config.json
21 | # Windows
22 | code $env:AppData\Claude\claude_desktop_config.json
23 | ```
24 |
25 | Add the server to the `mcpServers` object:
26 | ```json
27 | # macOS/Linux
28 | {
29 | "mcpServers": {
30 | "weather": {
31 | "command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv",
32 | "args": [
33 | "--directory",
34 | "/ABSOLUTE/PATH/TO/PARENT/FOLDER/weather",
35 | "run",
36 | "weather.py"
37 | ]
38 | }
39 | }
40 | }
41 | # Windows
42 | {
43 | "mcpServers": {
44 | "weather": {
45 | "command": "C:\\ABSOLUTE\\PATH\\TO\\PARENT\\FOLDER\\uv",
46 | "args": [
47 | "--directory",
48 | "C:\\ABSOLUTE\\PATH\\TO\\PARENT\\FOLDER\\weather",
49 | "run",
50 | "weather.py"
51 | ]
52 | }
53 | }
54 | }
55 | ```
56 |
57 |
58 | 4. Test the server
59 | - Claude Desktop: Ask "What is the weather in San Francisco?" or What are the active weather alerts in Texas?", you shall see Claude is asking you for server invocation.
--------------------------------------------------------------------------------
/A2A/demo/ui/components/dialog.py:
--------------------------------------------------------------------------------
1 | import mesop as me
2 |
3 |
4 | @me.content_component
5 | def dialog(is_open: bool):
6 | with me.box(
7 | style=me.Style(
8 | background='rgba(0,0,0,0.4)',
9 | display='block' if is_open else 'none',
10 | height='100%',
11 | overflow_x='auto',
12 | overflow_y='auto',
13 | position='fixed',
14 | width='100%',
15 | z_index=1000,
16 | )
17 | ):
18 | with me.box(
19 | style=me.Style(
20 | align_items='center',
21 | display='grid',
22 | height='100vh',
23 | justify_items='center',
24 | )
25 | ):
26 | with me.box(
27 | style=me.Style(
28 | background=me.theme_var('background'),
29 | border_radius=20,
30 | box_sizing='content-box',
31 | box_shadow=(
32 | '0 3px 1px -2px #0003, 0 2px 2px #00000024, 0 1px 5px #0000001f'
33 | ),
34 | margin=me.Margin.symmetric(vertical='0', horizontal='auto'),
35 | padding=me.Padding.all(20),
36 | )
37 | ):
38 | me.slot()
39 |
40 |
41 | @me.content_component
42 | def dialog_actions():
43 | with me.box(
44 | style=me.Style(
45 | display='flex', justify_content='end', margin=me.Margin(top=20)
46 | )
47 | ):
48 | me.slot()
49 |
--------------------------------------------------------------------------------
/A2A/demo/ui/components/poller.py:
--------------------------------------------------------------------------------
1 | import mesop as me
2 |
3 | from state.host_agent_service import UpdateAppState
4 | from state.state import AppState
5 |
6 |
7 | @me.content_component
8 | def polling_buttons():
9 | """Polling buttons component"""
10 | state = me.state(AppState)
11 | with me.box(
12 | style=me.Style(
13 | display='flex',
14 | justify_content='end',
15 | )
16 | ):
17 | me.button_toggle(
18 | value=[str(state.polling_interval)],
19 | buttons=[
20 | me.ButtonToggleButton(label='1s', value='1'),
21 | me.ButtonToggleButton(label='5s', value='5'),
22 | me.ButtonToggleButton(label='30s', value='30'),
23 | me.ButtonToggleButton(label='Disable', value='0'),
24 | ],
25 | multiple=False,
26 | hide_selection_indicator=True,
27 | disabled=False,
28 | on_change=on_change,
29 | style=me.Style(
30 | margin=me.Margin(bottom=20),
31 | ),
32 | )
33 | with me.content_button(
34 | type='raised',
35 | on_click=force_refresh,
36 | ):
37 | me.icon('refresh')
38 | me.slot()
39 |
40 |
41 | def on_change(e: me.ButtonToggleChangeEvent):
42 | state = me.state(AppState)
43 | state.polling_interval = int(e.value)
44 |
45 |
46 | async def force_refresh(e: me.ClickEvent):
47 | """Refresh app state event handler"""
48 | yield
49 | app_state = me.state(AppState)
50 | await UpdateAppState(app_state, app_state.current_conversation_id)
51 | yield
52 |
--------------------------------------------------------------------------------
/A2A/demo/ui/components/task_card.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import mesop as me
4 | import pandas as pd
5 |
6 | from state.state import ContentPart, SessionTask, StateTask
7 |
8 |
9 | def message_string(content: ContentPart) -> str:
10 | if isinstance(content, str):
11 | return content
12 | return json.dumps(content)
13 |
14 |
15 | @me.component
16 | def task_card(tasks: list[SessionTask]):
17 | """Task card component"""
18 | columns = ['Conversation ID', 'Task ID', 'Description', 'Status', 'Output']
19 | df_data = {c: [] for c in columns}
20 | for task in tasks:
21 | df_data['Conversation ID'].append(task.session_id)
22 | df_data['Task ID'].append(task.task.task_id)
23 | df_data['Description'].append(
24 | '\n'.join(message_string(x[0]) for x in task.task.message.content)
25 | )
26 | df_data['Status'].append(task.task.state)
27 | df_data['Output'].append(flatten_artifacts(task.task))
28 | df = pd.DataFrame(pd.DataFrame(df_data), columns=columns)
29 | with me.box(
30 | style=me.Style(
31 | display='flex',
32 | justify_content='space-between',
33 | )
34 | ):
35 | me.table(
36 | df,
37 | header=me.TableHeader(sticky=True),
38 | columns={c: me.TableColumn(sticky=True) for c in columns},
39 | )
40 |
41 |
42 | def flatten_artifacts(task: StateTask) -> str:
43 | parts = []
44 | for a in task.artifacts:
45 | for p in a:
46 | if p[1] == 'text/plain' or p[1] == 'application/json':
47 | parts.append(message_string(p[0]))
48 | else:
49 | parts.append(p[1])
50 |
51 | return '\n'.join(parts)
52 |
--------------------------------------------------------------------------------
/A2A/demo/ui/components/async_poller.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 | from dataclasses import asdict, dataclass
3 | from typing import Any
4 |
5 | import mesop.labs as mel
6 |
7 | from state.state import AppState
8 |
9 |
10 | @dataclass
11 | class AsyncAction:
12 | value: AppState
13 | duration_seconds: int
14 |
15 |
16 | @mel.web_component(path='./async_poller.js')
17 | def async_poller(
18 | *,
19 | trigger_event: Callable[[mel.WebEvent], Any],
20 | action: AsyncAction | None = None,
21 | key: str | None = None,
22 | ):
23 | """Creates an invisible component that will delay state changes asynchronously.
24 |
25 | Right now this implementation is limited since we basically just pass the key
26 | around. But ideally we also pass in some kind of value to update when the time
27 | out expires.
28 |
29 | The main benefit of this component is for cases, such as status messages that
30 | may appear and disappear after some duration. The primary example here is the
31 | example snackbar widget, which right now blocks the UI when using the sleep
32 | yield approach.
33 |
34 | The other benefit of this component is that it works generically (rather than
35 | say implementing a custom snackbar widget as a web component).
36 |
37 | Returns:
38 | The web component that was created.
39 | """
40 | return mel.insert_web_component(
41 | name='async-action-component',
42 | key=key,
43 | events={
44 | 'triggerEvent': trigger_event,
45 | },
46 | properties={
47 | 'polling_interval': action.duration_seconds if action else 1,
48 | 'action': asdict(action) if action else {},
49 | },
50 | )
51 |
--------------------------------------------------------------------------------
/agno/with_knowledge_and_storage.py:
--------------------------------------------------------------------------------
1 | from agno.agent import Agent
2 | from agno.embedder.openai import OpenAIEmbedder
3 | from agno.models.openai import OpenAIChat
4 | from agno.knowledge.url import UrlKnowledge
5 | from agno.storage.sqlite import SqliteStorage
6 | from agno.vectordb.lancedb import LanceDb, SearchType
7 |
8 | # Load Agno documentation in a knowledge base
9 | # You can also use `https://docs.agno.com/llms-full.txt` for the full documentation
10 | knowledge = UrlKnowledge(
11 | urls=["https://docs.agno.com/introduction.md"],
12 | vector_db=LanceDb(
13 | uri="tmp/lancedb",
14 | table_name="agno_docs",
15 | search_type=SearchType.hybrid,
16 | # Use OpenAI for embeddings
17 | embedder=OpenAIEmbedder(id="text-embedding-3-small", dimensions=1536),
18 | ),
19 | )
20 |
21 | # Store agent sessions in a SQLite database
22 | storage = SqliteStorage(table_name="agent_sessions", db_file="tmp/agent.db")
23 |
24 | agent = Agent(
25 | name="Agno Assist",
26 | model=OpenAIChat(id="gpt-4o"),
27 | instructions=[
28 | "Search your knowledge before answering the question.",
29 | "Only include the output in your response. No other text.",
30 | ],
31 | knowledge=knowledge,
32 | storage=storage,
33 | add_datetime_to_instructions=True,
34 | # Add the chat history to the messages
35 | add_history_to_messages=True,
36 | # Number of history runs
37 | num_history_runs=3,
38 | markdown=True,
39 | )
40 |
41 | if __name__ == "__main__":
42 | # Load the knowledge base, comment out after first run
43 | # Set recreate to True to recreate the knowledge base if needed
44 | agent.knowledge.load(recreate=False)
45 | agent.print_response("What is Agno?", stream=True)
--------------------------------------------------------------------------------
/agno/with_memory_and_reasoning.py:
--------------------------------------------------------------------------------
1 | from agno.agent import Agent
2 | from agno.memory.v2.db.sqlite import SqliteMemoryDb
3 | from agno.memory.v2.memory import Memory
4 | from agno.models.openai import OpenAIChat
5 | from agno.tools.reasoning import ReasoningTools
6 | from agno.tools.yfinance import YFinanceTools
7 |
8 | memory = Memory(
9 | # Use any model for creating and managing memories
10 | model=OpenAIChat(id="gpt-4o"),
11 | # Store memories in a SQLite database
12 | db=SqliteMemoryDb(table_name="user_memories", db_file="tmp/agent.db"),
13 | # We disable deletion by default, enable it if needed
14 | delete_memories=True,
15 | clear_memories=True,
16 | )
17 |
18 | agent = Agent(
19 | model=OpenAIChat(id="gpt-4o"),
20 | tools=[
21 | ReasoningTools(add_instructions=True),
22 | YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True),
23 | ],
24 | # User ID for storing memories, `default` if not provided
25 | user_id="ava",
26 | instructions=[
27 | "Use tables to display data.",
28 | "Include sources in your response.",
29 | "Only include the report in your response. No other text.",
30 | ],
31 | memory=memory,
32 | # Let the Agent manage its memories
33 | enable_agentic_memory=True,
34 | markdown=True,
35 | )
36 |
37 | if __name__ == "__main__":
38 | # This will create a memory that "ava's" favorite stocks are NVIDIA and TSLA
39 | agent.print_response(
40 | "My favorite stocks are NVIDIA and TSLA",
41 | stream=True,
42 | show_full_reasoning=True,
43 | stream_intermediate_steps=True,
44 | )
45 | # This will use the memory to answer the question
46 | agent.print_response(
47 | "Can you compare my favorite stocks?",
48 | stream=True,
49 | show_full_reasoning=True,
50 | stream_intermediate_steps=True,
51 | )
--------------------------------------------------------------------------------
/A2A/demo/ui/components/api_key_dialog.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import mesop as me
4 |
5 | from state.host_agent_service import UpdateApiKey
6 | from state.state import AppState
7 |
8 | from .dialog import dialog, dialog_actions
9 |
10 |
11 | def on_api_key_change(e: me.InputBlurEvent):
12 | """Save API key to app state when input changes"""
13 | state = me.state(AppState)
14 | state.api_key = e.value
15 |
16 |
17 | async def save_api_key(e: me.ClickEvent):
18 | """Save API key and close dialog"""
19 | yield # Yield to allow UI update
20 |
21 | state = me.state(AppState)
22 |
23 | # Validate API key is not empty
24 | if not state.api_key.strip():
25 | return
26 |
27 | # Set the environment variable for current process
28 | os.environ['GOOGLE_API_KEY'] = state.api_key
29 |
30 | # Update the API key in the server
31 | await UpdateApiKey(state.api_key)
32 |
33 | state.api_key_dialog_open = False
34 |
35 | yield
36 |
37 |
38 | @me.component
39 | def api_key_dialog():
40 | """Dialog for API key input"""
41 | state = me.state(AppState)
42 |
43 | with dialog(state.api_key_dialog_open):
44 | with me.box(
45 | style=me.Style(display='flex', flex_direction='column', gap=12)
46 | ):
47 | me.text(
48 | 'Google API Key Required',
49 | type='headline-4',
50 | style=me.Style(margin=me.Margin(bottom=10)),
51 | )
52 | me.text(
53 | 'Please enter your Google API Key to use the application.',
54 | style=me.Style(margin=me.Margin(bottom=20)),
55 | )
56 | me.input(
57 | label='Google API Key',
58 | value=state.api_key,
59 | on_blur=on_api_key_change,
60 | type='password',
61 | style=me.Style(width='100%'),
62 | )
63 |
64 | with dialog_actions():
65 | me.button('Save', on_click=save_api_key)
66 |
--------------------------------------------------------------------------------
/A2A/demo/ui/components/event_viewer.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import mesop as me
4 | import pandas as pd
5 |
6 | from state.host_agent_service import GetEvents, convert_event_to_state
7 |
8 |
9 | def flatten_content(content: list[tuple[str, str]]) -> str:
10 | parts = []
11 | for p in content:
12 | if p[1] == 'text/plain' or p[1] == 'application/json':
13 | parts.append(p[0])
14 | else:
15 | parts.append(p[1])
16 |
17 | return '\n'.join(parts)
18 |
19 |
20 | @me.component
21 | def event_list():
22 | """Events list component"""
23 | df_data = {
24 | 'Conversation ID': [],
25 | 'Actor': [],
26 | 'Role': [],
27 | 'Id': [],
28 | 'Content': [],
29 | }
30 | events = asyncio.run(GetEvents())
31 | for e in events:
32 | event = convert_event_to_state(e)
33 | df_data['Conversation ID'].append(event.conversation_id)
34 | df_data['Role'].append(event.role)
35 | df_data['Id'].append(event.id)
36 | df_data['Content'].append(flatten_content(event.content))
37 | df_data['Actor'].append(event.actor)
38 | if not df_data['Conversation ID']:
39 | me.text('No events found')
40 | return
41 | df = pd.DataFrame(
42 | pd.DataFrame(df_data),
43 | columns=['Conversation ID', 'Actor', 'Role', 'Id', 'Content'],
44 | )
45 | with me.box(
46 | style=me.Style(
47 | display='flex',
48 | justify_content='space-between',
49 | flex_direction='column',
50 | )
51 | ):
52 | me.table(
53 | df,
54 | header=me.TableHeader(sticky=True),
55 | columns={
56 | 'Conversation ID': me.TableColumn(sticky=True),
57 | 'Actor': me.TableColumn(sticky=True),
58 | 'Role': me.TableColumn(sticky=True),
59 | 'Id': me.TableColumn(sticky=True),
60 | 'Content': me.TableColumn(sticky=True),
61 | },
62 | )
63 |
--------------------------------------------------------------------------------
/crewai/helloworld/src/helloworld/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Example of a simple crew with a sequential process.
4 | """
5 | import sys
6 | import warnings
7 |
8 | from datetime import datetime
9 |
10 | from helloworld.crew import Helloworld
11 |
12 | warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
13 |
14 | # This main file is intended to be a way for you to run your
15 | # crew locally, so refrain from adding unnecessary logic into this file.
16 | # Replace with inputs you want to test with, it will automatically
17 | # interpolate any tasks and agents information
18 |
19 | def run():
20 | """
21 | Run the crew.
22 | """
23 | inputs = {
24 | 'topic': 'AI LLMs',
25 | 'current_year': str(datetime.now().year)
26 | }
27 |
28 | try:
29 | Helloworld().crew().kickoff(inputs=inputs)
30 | except Exception as e:
31 | raise Exception(f"An error occurred while running the crew: {e}")
32 |
33 |
34 | def train():
35 | """
36 | Train the crew for a given number of iterations.
37 | """
38 | inputs = {
39 | "topic": "AI LLMs"
40 | }
41 | try:
42 | Helloworld().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
43 |
44 | except Exception as e:
45 | raise Exception(f"An error occurred while training the crew: {e}")
46 |
47 | def replay():
48 | """
49 | Replay the crew execution from a specific task.
50 | """
51 | try:
52 | Helloworld().crew().replay(task_id=sys.argv[1])
53 |
54 | except Exception as e:
55 | raise Exception(f"An error occurred while replaying the crew: {e}")
56 |
57 | def test():
58 | """
59 | Test the crew execution and returns the results.
60 | """
61 | inputs = {
62 | "topic": "AI LLMs"
63 | }
64 | try:
65 | Helloworld().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
66 |
67 | except Exception as e:
68 | raise Exception(f"An error occurred while testing the crew: {e}")
69 |
--------------------------------------------------------------------------------
/model_context_protocol/README.md:
--------------------------------------------------------------------------------
1 | # Model Context Protocol Examples
2 |
3 | Welcome to the Model Context Protocol (MCP) examples! This directory contains ready-to-run examples demonstrating how to extend Claude's capabilities through custom context protocols.
4 |
5 | ## ⚡ Quick Start (5-Minute Setup)
6 |
7 | Let's create your first MCP server! We'll start with a simple weather service example.
8 |
9 | 1. Install uv (Fast Python Package Installer):
10 | ```shell
11 | # macOS/Linux
12 | curl -LsSf https://astral.sh/uv/install.sh | sh
13 | # Windows
14 | powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
15 | ```
16 |
17 | 2. Install dependencies:
18 | ```shell
19 | cd weather
20 | python -m venv .venv
21 | source .venv/bin/activate # On Windows: .venv\Scripts\activate
22 | uv sync
23 | ```
24 |
25 | 3. Configure Claude Desktop:
26 |
27 | Open the configuration file:
28 | ```shell
29 | # macOS/Linux
30 | code ~/Library/Application\ Support/Claude/claude_desktop_config.json
31 | # Windows
32 | code $env:AppData\Claude\claude_desktop_config.json
33 | ```
34 |
35 | Add the server configuration:
36 | ```json
37 | # macOS/Linux
38 | {
39 | "mcpServers": {
40 | "weather": {
41 | "command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv",
42 | "args": [
43 | "--directory",
44 | "/ABSOLUTE/PATH/TO/PARENT/FOLDER/weather",
45 | "run",
46 | "weather.py"
47 | ]
48 | }
49 | }
50 | }
51 | # Windows
52 | {
53 | "mcpServers": {
54 | "weather": {
55 | "command": "C:\\ABSOLUTE\\PATH\\TO\\PARENT\\FOLDER\\uv",
56 | "args": [
57 | "--directory",
58 | "C:\\ABSOLUTE\\PATH\\TO\\PARENT\\FOLDER\\weather",
59 | "run",
60 | "weather.py"
61 | ]
62 | }
63 | }
64 | }
65 | ```
66 |
67 | 4. Test the server
68 | - Claude Desktop: Ask "What is the weather in San Francisco?" or What are the active weather alerts in Texas?", you shall see Claude is asking you for server invocation.
--------------------------------------------------------------------------------
/A2A/demo/ui/components/page_scaffold.py:
--------------------------------------------------------------------------------
1 | import mesop as me
2 | import mesop.labs as mel
3 |
4 | from state.host_agent_service import UpdateAppState
5 | from state.state import AppState
6 | from styles.styles import (
7 | MAIN_COLUMN_STYLE,
8 | PAGE_BACKGROUND_PADDING_STYLE,
9 | PAGE_BACKGROUND_STYLE,
10 | SIDENAV_MAX_WIDTH,
11 | SIDENAV_MIN_WIDTH,
12 | )
13 |
14 | from .async_poller import AsyncAction, async_poller
15 | from .side_nav import sidenav
16 |
17 |
18 | async def refresh_app_state(e: mel.WebEvent): # pylint: disable=unused-argument
19 | """Refresh app state event handler"""
20 | yield
21 | app_state = me.state(AppState)
22 | await UpdateAppState(app_state, app_state.current_conversation_id)
23 | yield
24 |
25 |
26 | @me.content_component
27 | def page_scaffold():
28 | """Page scaffold component"""
29 | app_state = me.state(AppState)
30 | action = (
31 | AsyncAction(
32 | value=app_state, duration_seconds=app_state.polling_interval
33 | )
34 | if app_state
35 | else None
36 | )
37 | async_poller(action=action, trigger_event=refresh_app_state)
38 |
39 | sidenav('')
40 |
41 | with me.box(
42 | style=me.Style(
43 | display='flex',
44 | flex_direction='column',
45 | height='100%',
46 | margin=me.Margin(
47 | left=SIDENAV_MAX_WIDTH
48 | if app_state.sidenav_open
49 | else SIDENAV_MIN_WIDTH,
50 | ),
51 | ),
52 | ):
53 | with me.box(
54 | style=me.Style(
55 | background=me.theme_var('background'),
56 | height='100%',
57 | overflow_y='scroll',
58 | margin=me.Margin(bottom=20),
59 | )
60 | ):
61 | me.slot()
62 |
63 |
64 | @me.content_component
65 | def page_frame():
66 | """Page Frame"""
67 | with me.box(style=MAIN_COLUMN_STYLE):
68 | with me.box(style=PAGE_BACKGROUND_STYLE):
69 | with me.box(style=PAGE_BACKGROUND_PADDING_STYLE):
70 | me.slot()
71 |
--------------------------------------------------------------------------------
/A2A/demo/ui/pages/home.py:
--------------------------------------------------------------------------------
1 | import mesop as me
2 |
3 | from components.conversation_list import conversation_list
4 | from components.header import header
5 | from state.state import AppState
6 |
7 |
8 | @me.stateclass
9 | class PageState:
10 | """Local Page State"""
11 |
12 | temp_name: str = ''
13 |
14 |
15 | def on_blur_set_name(e: me.InputBlurEvent):
16 | """Input handler"""
17 | state = me.state(PageState)
18 | state.temp_name = e.value
19 |
20 |
21 | def on_enter_change_name(e: me.components.input.input.InputEnterEvent): # pylint: disable=unused-argument
22 | """Change name button handler"""
23 | state = me.state(PageState)
24 | app_state = me.state(AppState)
25 | app_state.name = state.temp_name
26 | app_state.greeting = '' # reset greeting
27 | yield
28 |
29 |
30 | def on_click_change_name(e: me.ClickEvent): # pylint: disable=unused-argument
31 | """Change name button handler"""
32 | state = me.state(PageState)
33 | app_state = me.state(AppState)
34 | app_state.name = state.temp_name
35 | app_state.greeting = '' # reset greeting
36 | yield
37 |
38 |
39 | def home_page_content(app_state: AppState):
40 | """Home Page"""
41 | with me.box(
42 | style=me.Style(
43 | display='flex',
44 | flex_direction='column',
45 | height='100%',
46 | ),
47 | ):
48 | with me.box(
49 | style=me.Style(
50 | background=me.theme_var('background'),
51 | height='100%',
52 | margin=me.Margin(bottom=20),
53 | )
54 | ):
55 | with me.box(
56 | style=me.Style(
57 | background=me.theme_var('background'),
58 | padding=me.Padding(top=24, left=24, right=24, bottom=24),
59 | display='flex',
60 | flex_direction='column',
61 | width='100%',
62 | )
63 | ):
64 | with header('Conversations', 'message'):
65 | pass
66 | conversation_list(app_state.conversations)
67 |
--------------------------------------------------------------------------------
/langgraph/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph Hello World Example
2 |
3 | A minimal example demonstrating how to use LangGraph to create a simple conversational agent. This example shows the basic concepts of LangGraph including creating nodes, building a graph, handling state, and running an agent.
4 |
5 | ## Features
6 |
7 | - Simple conversational agent implementation
8 | - Basic graph structure with two nodes
9 | - Error handling and state management
10 | - Model-agnostic design using LiteLLM
11 |
12 | ## Prerequisites
13 |
14 | - Python 3.9+
15 | - OpenAI API key or compatible API key for other LLMs
16 |
17 | ## Installation
18 |
19 | 1. Install dependencies:
20 | ```bash
21 | pip install -r requirements.txt
22 | ```
23 | 2. Set up environment:
24 | ```bash
25 | cp .env.example .env
26 | ```
27 |
28 | Edit `.env` with your settings:
29 | ```ini
30 | # LLM Model configurations
31 | DEFAULT_MODEL=gpt-4o-mini
32 | DEFAULT_TEMPERATURE=0.7
33 |
34 | # OpenAI API configurations
35 | OPENAI_API_KEY=your-api-key-here
36 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: custom API endpoint (e.g. for API proxies)
37 | ```
38 | 3. Run the script:
39 | ```bash
40 | python helloworld.py
41 | ```
42 |
43 | 4. Expected output:
44 | ```
45 | User: Tell me a short joke
46 | Assistant: Why did the scarecrow win an award? Because he was outstanding in his field!
47 | ```
48 |
49 | ## Code Structure
50 |
51 | - `helloworld.py`: Main implementation file
52 | - `AgentState`: Defines the state structure
53 | - `get_llm_response()`: Node for getting LLM responses
54 | - `format_response()`: Node for formatting and updating state
55 | - `build_graph()`: Creates and configures the graph
56 | - `main()`: Entry point and execution logic
57 |
58 | ## Extending the Example
59 |
60 | You can extend this example by:
61 | - Adding more nodes to the graph
62 | - Implementing more complex state management
63 | - Adding different types of interactions
64 | - Integrating with other LLM providers
65 |
66 | ## Error Handling
67 |
68 | The example includes basic error handling:
69 | - Checks for required environment variables
70 | - Handles LLM API errors gracefully
71 | - Maintains state consistency
--------------------------------------------------------------------------------
/langgraph-platform/client.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from langgraph_sdk import get_client
3 |
4 | # replace url with API url after langgraph up (locally) or LangGraph Platform (remotely)
5 | client = get_client(url="http://localhost:8123")
6 |
7 | openai_assistant = None
8 |
9 |
10 | async def create_assistant():
11 | print("Creating assistant")
12 | assistant = await client.assistants.create(
13 | "react_agent", config={"configurable": {
14 | "model_name": "openai/gpt-4.1-mini",
15 | "system_prompt": "You are a helpful assistant."
16 | }},
17 | name="Open AI Assistant"
18 | )
19 | return assistant
20 |
21 |
22 | async def update_assistant(assistant):
23 | print("Updating assistant")
24 | assistant_v2 = await client.assistants.update(
25 | assistant["assistant_id"],
26 | config={
27 | "configurable": {
28 | "model_name": "openai/gpt-4.1",
29 | "system_prompt": "You are a funny assistant!",
30 | }
31 | },
32 | )
33 | return assistant_v2
34 |
35 |
36 | async def run_assistant(assistant):
37 | print("Running assistant")
38 | thread = await client.threads.create()
39 | input = {"messages": [{"role": "user", "content": "who made you?"}]}
40 | async for event in client.runs.stream(
41 | thread["thread_id"],
42 | # this is where we specify the assistant id to use
43 | assistant["assistant_id"],
44 | input=input,
45 | stream_mode="updates",
46 | ):
47 | print(f"Receiving event of type: {event.event}")
48 | print(event.data)
49 | print("\n\n")
50 |
51 |
52 | async def main():
53 | global openai_assistant
54 | if openai_assistant is None:
55 | openai_assistant = await create_assistant()
56 | await run_assistant(openai_assistant)
57 | openai_assistant = await update_assistant(openai_assistant)
58 | await run_assistant(openai_assistant)
59 | await client.assistants.set_latest(openai_assistant['assistant_id'], 1)
60 | await run_assistant(openai_assistant)
61 |
62 | if __name__ == "__main__":
63 | asyncio.run(main())
64 |
--------------------------------------------------------------------------------
/langchain-sandbox/stateful_react.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from langgraph.prebuilt import create_react_agent
3 | from langgraph.prebuilt.chat_agent_executor import AgentState
4 | from langgraph.checkpoint.memory import InMemorySaver
5 | from langchain_sandbox import PyodideSandboxTool, PyodideSandbox
6 | from dotenv import load_dotenv
7 |
8 | load_dotenv()
9 |
10 | class State(AgentState):
11 | # important: add session_bytes & session_metadata keys to your graph state schema -
12 | # these keys are required to store the session data between tool invocations.
13 | # `session_bytes` contains pickled session state. It should not be unpickled
14 | # and is only meant to be used by the sandbox itself
15 | session_bytes: bytes
16 | session_metadata: dict
17 |
18 | tool = PyodideSandboxTool(
19 | # Create stateful sandbox
20 | stateful=True,
21 | # Allow Pyodide to install python packages that
22 | # might be required.
23 | allow_net=True
24 | )
25 | agent = create_react_agent(
26 | "gpt-4o-mini",
27 | tools=[tool],
28 | checkpointer=InMemorySaver(),
29 | state_schema=State
30 | )
31 |
32 | async def run_agent():
33 | async for typ, chunk in agent.astream(
34 | {
35 | "messages": [
36 | {"role": "user", "content": "what's 5 + 7? save result as 'a'"}
37 | ],
38 | # Important: set session_bytes & session_metadata for Input State
39 | "session_bytes": None,
40 | "session_metadata": None
41 | },
42 | stream_mode=["messages"],
43 | config={"configurable": {"thread_id": "123"}},
44 | ):
45 | if typ == "messages":
46 | print(chunk[0].content, end="")
47 |
48 | print("\n")
49 | print("-"*100)
50 | print("\n")
51 |
52 | async for typ, chunk in agent.astream(
53 | {"messages": [{"role": "user", "content": "what's the sine of 'a'?"}]},
54 | stream_mode=["messages"],
55 | config={"configurable": {"thread_id": "123"}},
56 | ):
57 | if typ == "messages":
58 | print(chunk[0].content, end="")
59 | print("\n")
60 |
61 | if __name__ == "__main__":
62 | asyncio.run(run_agent())
--------------------------------------------------------------------------------
/A2A/agents/mindsdb/__main__.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import click
4 |
5 | from agent import MindsDBAgent
6 | from common.server import A2AServer
7 | from common.types import (
8 | AgentCapabilities,
9 | AgentCard,
10 | AgentSkill,
11 | MissingAPIKeyError,
12 | )
13 | from task_manager import AgentTaskManager
14 |
15 |
16 | logging.basicConfig(level=logging.INFO)
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | @click.command()
21 | @click.option('--host', default='localhost')
22 | @click.option('--port', default=10006)
23 | def main(host, port):
24 | try:
25 | capabilities = AgentCapabilities(streaming=True)
26 | skill = AgentSkill(
27 | id='chat_with_your_data',
28 | name='Chat with your data',
29 | description='Interact with your databases and tables through natural language queries using MindsDB.',
30 | tags=['database', 'sql', 'mindsdb', 'data analysis'],
31 | examples=[
32 | 'What TABLES are in my database?',
33 | 'What are some good queries to run on my data?',
34 | ],
35 | )
36 | agent_card = AgentCard(
37 | name='MindsDB Data Chat Agent',
38 | description="An agent that allows you to interact with your data through natural language queries using MindsDB's capabilities. Query and analyze your databases conversationally.",
39 | url=f'http://{host}:{port}/',
40 | version='1.0.0',
41 | defaultInputModes=MindsDBAgent.SUPPORTED_CONTENT_TYPES,
42 | defaultOutputModes=MindsDBAgent.SUPPORTED_CONTENT_TYPES,
43 | capabilities=capabilities,
44 | skills=[skill],
45 | )
46 | server = A2AServer(
47 | agent_card=agent_card,
48 | task_manager=AgentTaskManager(agent=MindsDBAgent()),
49 | host=host,
50 | port=port,
51 | )
52 | server.start()
53 | except MissingAPIKeyError as e:
54 | logger.error(f'Error: {e}')
55 | exit(1)
56 | except Exception as e:
57 | logger.error(f'An error occurred during server startup: {e}')
58 | exit(1)
59 |
60 |
61 | if __name__ == '__main__':
62 | main()
63 |
--------------------------------------------------------------------------------
/crewai/helloworld/src/helloworld/crew.py:
--------------------------------------------------------------------------------
1 | from crewai import Agent, Crew, Process, Task
2 | from crewai.project import CrewBase, agent, crew, task
3 |
4 | # If you want to run a snippet of code before or after the crew starts,
5 | # you can use the @before_kickoff and @after_kickoff decorators
6 | # https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
7 |
8 | @CrewBase
9 | class Helloworld():
10 | """Helloworld crew"""
11 |
12 | # Learn more about YAML configuration files here:
13 | # Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
14 | # Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
15 | agents_config = 'config/agents.yaml'
16 | tasks_config = 'config/tasks.yaml'
17 |
18 | # If you would like to add tools to your agents, you can learn more about it here:
19 | # https://docs.crewai.com/concepts/agents#agent-tools
20 | @agent
21 | def researcher(self) -> Agent:
22 | return Agent(
23 | config=self.agents_config['researcher'],
24 | verbose=True
25 | )
26 |
27 | @agent
28 | def reporting_analyst(self) -> Agent:
29 | return Agent(
30 | config=self.agents_config['reporting_analyst'],
31 | verbose=True
32 | )
33 |
34 | # To learn more about structured task outputs,
35 | # task dependencies, and task callbacks, check out the documentation:
36 | # https://docs.crewai.com/concepts/tasks#overview-of-a-task
37 | @task
38 | def research_task(self) -> Task:
39 | return Task(
40 | config=self.tasks_config['research_task'],
41 | )
42 |
43 | @task
44 | def reporting_task(self) -> Task:
45 | return Task(
46 | config=self.tasks_config['reporting_task'],
47 | output_file='report.md'
48 | )
49 |
50 | @crew
51 | def crew(self) -> Crew:
52 | """Creates the Helloworld crew"""
53 | # To learn how to add knowledge sources to your crew, check out the documentation:
54 | # https://docs.crewai.com/concepts/knowledge#what-is-knowledge
55 |
56 | return Crew(
57 | agents=self.agents, # Automatically created by the @agent decorator
58 | tasks=self.tasks, # Automatically created by the @task decorator
59 | process=Process.sequential,
60 | verbose=True,
61 | # process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
62 | )
63 |
--------------------------------------------------------------------------------
/A2A/agents/README.md:
--------------------------------------------------------------------------------
1 | ## Sample Agents
2 |
3 | All the agents in this directory are samples built on different frameworks highlighting different capabilities. Each agent runs as a standalone A2A server.
4 |
5 | Each agent can be run as its own A2A server with the instructions on its README. By default, each will run on a separate port on localhost (you can use command line arguments to override).
6 |
7 | To interact with the servers, use an A2AClient in a host app (such as the CLI). See [Host Apps](/samples/python/hosts/README.md) for details.
8 |
9 | * [**Google ADK**](/samples/python/agents/google_adk/README.md)
10 | Sample agent to (mock) fill out expense reports. Showcases multi-turn interactions and returning/replying to webforms through A2A.
11 |
12 | * [**AG2 MCP Agent with A2A Protocol**](/samples/python/agents/ag2/README.md)
13 | Demonstrates an MCP-enabled agent built with [AG2](https://github.com/ag2ai/ag2) that is exposed through the A2A protocol.
14 |
15 | * [**LangGraph**](/samples/python/agents/langgraph/README.md)
16 | Sample agent which can convert currency using tools. Showcases multi-turn interactions, tool usage, and streaming updates.
17 |
18 | * [**CrewAI**](/samples/python/agents/crewai/README.md)
19 | Sample agent which can generate images. Showcases multi-turn interactions and sending images through A2A.
20 |
21 | * [**LlamaIndex**](/samples/python/agents/llama_index_file_chat/README.md)
22 | Sample agent which can parse a file and then chat with the user using the parsed content as context. Showcases multi-turn interactions, file upload and parsing, and streaming updates.
23 |
24 | * [**Marvin Contact Extractor Agent**](/samples/python/agents/marvin/README.md)
25 | Demonstrates an agent using the [Marvin](https://github.com/prefecthq/marvin) framework to extract structured contact information from text, integrated with the Agent2Agent (A2A) protocol.
26 |
27 | * [**Enterprise Data Agent**](/samples/python/agents/mindsdb/README.md)
28 | Sample agent which can answer questions from any database, datawarehouse, app. - Powered by Gemini 2.5 flash + MindsDB.
29 |
30 | * [**Semantic Kernel Agent**](/samples/python/agents/semantickernel/README.md)
31 | Demonstrates how to implement a travel agent built on [Semantic Kernel](https://github.com/microsoft/semantic-kernel/) and exposed through the A2A protocol.
32 |
--------------------------------------------------------------------------------
/google-adk/multi_tool_agent/agent.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | from zoneinfo import ZoneInfo
3 | from google.adk.agents import Agent
4 | from google.adk.tools import google_search
5 |
6 | def get_weather(city: str) -> dict:
7 | """Retrieves the current weather report for a specified city.
8 |
9 | Args:
10 | city (str): The name of the city for which to retrieve the weather report.
11 |
12 | Returns:
13 | dict: status and result or error msg.
14 | """
15 | if city.lower() == "new york":
16 | return {
17 | "status": "success",
18 | "report": (
19 | "The weather in New York is sunny with a temperature of 25 degrees"
20 | " Celsius (77 degrees Fahrenheit)."
21 | ),
22 | }
23 | else:
24 | return {
25 | "status": "error",
26 | "error_message": f"Weather information for '{city}' is not available.",
27 | }
28 |
29 |
30 | def get_current_time(city: str) -> dict:
31 | """Returns the current time in a specified city.
32 |
33 | Args:
34 | city (str): The name of the city for which to retrieve the current time.
35 |
36 | Returns:
37 | dict: status and result or error msg.
38 | """
39 |
40 | if city.lower() == "new york":
41 | tz_identifier = "America/New_York"
42 | else:
43 | return {
44 | "status": "error",
45 | "error_message": (
46 | f"Sorry, I don't have timezone information for {city}."
47 | ),
48 | }
49 |
50 | tz = ZoneInfo(tz_identifier)
51 | now = datetime.datetime.now(tz)
52 | report = (
53 | f'The current time in {city} is {now.strftime("%Y-%m-%d %H:%M:%S %Z%z")}'
54 | )
55 | return {"status": "success", "report": report}
56 |
57 |
58 | root_agent = Agent(
59 | name="weather_time_agent",
60 | model="gemini-2.5-flash",
61 | description=(
62 | "Agent to answer questions about the time and weather in a city."
63 | ),
64 | instruction=(
65 | "You are a helpful agent who can answer user questions about the time and weather in a city."
66 | ),
67 | tools=[get_weather, get_current_time],
68 | # tools=[get_weather, get_current_time, google_search], # Multiple tools are supported only when they are all search tools. But WHY?
69 | )
--------------------------------------------------------------------------------
/A2A/agents/mindsdb/README.md:
--------------------------------------------------------------------------------
1 | ## MindsDB Enterprise Data Agent
2 |
3 | Powered by Gemini 2.5 flash + MindsDB. This sample uses A2A to connect, query and analyze data across hundreds of federated data sources including databases, data lakes, and SaaS applications.
4 |
5 | The agent takes natural language queries from users and translates them into appropriate SQL queries for MindsDB, handling data federation across multiple sources. It can:
6 |
7 | - Query data from various sources including databases, data lakes, and SaaS applications
8 | - Perform analytics across federated data sources
9 | - Handle natural language questions about your data
10 | - Return structured results from multiple data sources
11 |
12 |
13 | ## Prerequisites
14 |
15 | - Python 3.9 or higher
16 | - MindsDB account and API credentials (https://mdb.ai)
17 | - Create a MindsDB Mind (an AI model that can query data from a database), by default we use the demo one: `Sales_Data_Expert_Demo_Mind`
18 |
19 | ## Environment Variables
20 |
21 | In mdb.ai, once you create a Mind (an AI model that can query data from a database), you can use it in the agent.
22 |
23 | Create a `.env` file in the project directory with the following variables:
24 |
25 | ```
26 | MINDS_API_KEY=your_mindsdb_api_key
27 | MIND_NAME=your_mindsdb_model_name
28 | ```
29 |
30 | - `MINDS_API_KEY`: Your MindsDB API key (required)
31 | - `MIND_NAME`: The name of the MindsDB Mind to use (required)
32 |
33 | ## Running the Sample
34 |
35 | 1. Navigate to the samples directory:
36 | ```bash
37 | cd samples/python/agents/mindsdb
38 | ```
39 |
40 | 2. Run the agent:
41 | ```bash
42 | uv run .
43 | ```
44 |
45 | 3. In a separate terminal, run the A2A client:
46 | ```bash
47 | # Connect to the agent (specify the agent URL with correct port)
48 | cd samples/python/hosts/cli
49 | uv run . --agent http://localhost:10006
50 |
51 | # If you changed the port when starting the agent, use that port instead
52 | # uv run . --agent http://localhost:YOUR_PORT
53 | ```
54 | 4. Ask a question to the agent about your data.
55 |
56 | ## Example Queries
57 |
58 | You can ask questions like:
59 |
60 | - "What percentage of prospects are executives?"
61 | - "What is the distribution of companies by size?"
62 |
63 | The agent will handle the complexity of joining and analyzing data across different sources.
64 |
--------------------------------------------------------------------------------
/langchain-sandbox/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "langgraph_sandbox",
3 | "lockfileVersion": 3,
4 | "requires": true,
5 | "packages": {
6 | "": {
7 | "dependencies": {
8 | "@langchain/pyodide-sandbox": "npm:@jsr/langchain__pyodide-sandbox@^0.0.4"
9 | }
10 | },
11 | "node_modules/@jsr/std__cli": {
12 | "version": "1.0.19",
13 | "resolved": "https://npm.jsr.io/~/11/@jsr/std__cli/1.0.19.tgz",
14 | "integrity": "sha512-Dh5yzpdQVu6TAgzTjnWqD+pLgAquesmpuLHELBW8gsJKgCqy4DbiqfjU8lEk3gtImmfInaACGC1Sljt3YJJIyA=="
15 | },
16 | "node_modules/@jsr/std__path": {
17 | "version": "1.1.0",
18 | "resolved": "https://npm.jsr.io/~/11/@jsr/std__path/1.1.0.tgz",
19 | "integrity": "sha512-rnxGg/nJGfDbJO+xIJ9YEzLD7dCzjr3NHShf4dbnlt44WEYNwMjg+TcDO6F2NPHBnn/6iUFwbnNzysrZvyD1Og=="
20 | },
21 | "node_modules/@langchain/pyodide-sandbox": {
22 | "name": "@jsr/langchain__pyodide-sandbox",
23 | "version": "0.0.4",
24 | "resolved": "https://npm.jsr.io/~/11/@jsr/langchain__pyodide-sandbox/0.0.4.tgz",
25 | "integrity": "sha512-ej4YUbGmB72xW/pGBos82FZtcC3BTnTjQbGK86zmSerPJ/e53d13q4TM/3ALs1IxOwM1M87bomGaCkFgNSyO3g==",
26 | "dependencies": {
27 | "@jsr/std__cli": "^1.0.16",
28 | "@jsr/std__path": "^1.0.8",
29 | "pyodide": "^0.27.4"
30 | }
31 | },
32 | "node_modules/pyodide": {
33 | "version": "0.27.7",
34 | "resolved": "https://registry.npmjs.org/pyodide/-/pyodide-0.27.7.tgz",
35 | "integrity": "sha512-RUSVJlhQdfWfgO9hVHCiXoG+nVZQRS5D9FzgpLJ/VcgGBLSAKoPL8kTiOikxbHQm1kRISeWUBdulEgO26qpSRA==",
36 | "license": "MPL-2.0",
37 | "dependencies": {
38 | "ws": "^8.5.0"
39 | },
40 | "engines": {
41 | "node": ">=18.0.0"
42 | }
43 | },
44 | "node_modules/ws": {
45 | "version": "8.18.2",
46 | "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz",
47 | "integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==",
48 | "license": "MIT",
49 | "engines": {
50 | "node": ">=10.0.0"
51 | },
52 | "peerDependencies": {
53 | "bufferutil": "^4.0.1",
54 | "utf-8-validate": ">=5.0.2"
55 | },
56 | "peerDependenciesMeta": {
57 | "bufferutil": {
58 | "optional": true
59 | },
60 | "utf-8-validate": {
61 | "optional": true
62 | }
63 | }
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/langgraph-platform/agents/supervisor/subagents.py:
--------------------------------------------------------------------------------
1 | """Create all subagents using the make_graph pattern from react_agent."""
2 | from agents.supervisor.supervisor_configuration import Configuration
3 |
4 | from agents.react_agent.graph import make_graph
5 | from langchain_core.runnables import RunnableConfig
6 |
7 | # Load supervisor configuration
8 | supervisor_config = Configuration()
9 |
10 | async def create_subagents(configurable: dict = None):
11 | """Create all subagents using the make_graph pattern from react_agent."""
12 |
13 | # Use configurable values if provided, otherwise fall back to defaults
14 | if configurable is None:
15 | configurable = {}
16 |
17 | # Create finance research agent using make_graph
18 | finance_config = RunnableConfig(
19 | configurable={
20 | "model": configurable.get("finance_model", supervisor_config.finance_model),
21 | "system_prompt": configurable.get("finance_system_prompt", supervisor_config.finance_system_prompt),
22 | "selected_tools": configurable.get("finance_tools", supervisor_config.finance_tools),
23 | "name": "finance_research_agent"
24 | }
25 | )
26 | finance_research_agent = await make_graph(finance_config)
27 |
28 | # Create general research agent using make_graph
29 | research_config = RunnableConfig(
30 | configurable={
31 | "model": configurable.get("research_model", supervisor_config.research_model),
32 | "system_prompt": configurable.get("research_system_prompt", supervisor_config.research_system_prompt),
33 | "selected_tools": configurable.get("research_tools", supervisor_config.research_tools),
34 | "name": "general_research_agent"
35 | }
36 | )
37 | general_research_agent = await make_graph(research_config)
38 |
39 | # Create writing agent using make_graph
40 | writing_config = RunnableConfig(
41 | configurable={
42 | "model": configurable.get("writing_model", supervisor_config.writing_model),
43 | "system_prompt": configurable.get("writing_system_prompt", supervisor_config.writing_system_prompt),
44 | "selected_tools": configurable.get("writing_tools", supervisor_config.writing_tools),
45 | "name": "writing_agent"
46 | }
47 | )
48 | writing_agent = await make_graph(writing_config)
49 |
50 | return [finance_research_agent, general_research_agent, writing_agent]
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/A2A/agents/google_adk/__main__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | import click
5 |
6 | from agent import ReimbursementAgent
7 | from common.server import A2AServer
8 | from common.types import (
9 | AgentCapabilities,
10 | AgentCard,
11 | AgentSkill,
12 | MissingAPIKeyError,
13 | )
14 | from dotenv import load_dotenv
15 | from task_manager import AgentTaskManager
16 |
17 |
18 | load_dotenv()
19 |
20 | logging.basicConfig(level=logging.INFO)
21 | logger = logging.getLogger(__name__)
22 |
23 |
24 | @click.command()
25 | @click.option('--host', default='localhost')
26 | @click.option('--port', default=10002)
27 | def main(host, port):
28 | try:
29 | # Check for API key only if Vertex AI is not configured
30 | if not os.getenv('GOOGLE_GENAI_USE_VERTEXAI') == 'TRUE':
31 | if not os.getenv('GOOGLE_API_KEY'):
32 | raise MissingAPIKeyError(
33 | 'GOOGLE_API_KEY environment variable not set and GOOGLE_GENAI_USE_VERTEXAI is not TRUE.'
34 | )
35 |
36 | capabilities = AgentCapabilities(streaming=True)
37 | skill = AgentSkill(
38 | id='process_reimbursement',
39 | name='Process Reimbursement Tool',
40 | description='Helps with the reimbursement process for users given the amount and purpose of the reimbursement.',
41 | tags=['reimbursement'],
42 | examples=[
43 | 'Can you reimburse me $20 for my lunch with the clients?'
44 | ],
45 | )
46 | agent_card = AgentCard(
47 | name='Reimbursement Agent',
48 | description='This agent handles the reimbursement process for the employees given the amount and purpose of the reimbursement.',
49 | url=f'http://{host}:{port}/',
50 | version='1.0.0',
51 | defaultInputModes=ReimbursementAgent.SUPPORTED_CONTENT_TYPES,
52 | defaultOutputModes=ReimbursementAgent.SUPPORTED_CONTENT_TYPES,
53 | capabilities=capabilities,
54 | skills=[skill],
55 | )
56 | server = A2AServer(
57 | agent_card=agent_card,
58 | task_manager=AgentTaskManager(agent=ReimbursementAgent()),
59 | host=host,
60 | port=port,
61 | )
62 | server.start()
63 | except MissingAPIKeyError as e:
64 | logger.error(f'Error: {e}')
65 | exit(1)
66 | except Exception as e:
67 | logger.error(f'An error occurred during server startup: {e}')
68 | exit(1)
69 |
70 |
71 | if __name__ == '__main__':
72 | main()
73 |
--------------------------------------------------------------------------------
/A2A/demo/ui/components/agent_list.py:
--------------------------------------------------------------------------------
1 | import mesop as me
2 | import pandas as pd
3 |
4 | from common.types import AgentCard
5 | from state.agent_state import AgentState
6 |
7 |
8 | @me.component
9 | def agents_list(
10 | agents: list[AgentCard],
11 | ):
12 | """Agents list component."""
13 | df_data = {
14 | 'Address': [],
15 | 'Name': [],
16 | 'Description': [],
17 | 'Organization': [],
18 | 'Input Modes': [],
19 | 'Output Modes': [],
20 | 'Streaming': [],
21 | }
22 | for agent_info in agents:
23 | df_data['Address'].append(agent_info.url)
24 | df_data['Name'].append(agent_info.name)
25 | df_data['Description'].append(agent_info.description)
26 | df_data['Organization'].append(
27 | agent_info.provider.organization if agent_info.provider else ''
28 | )
29 | df_data['Input Modes'].append(', '.join(agent_info.defaultInputModes))
30 | df_data['Output Modes'].append(', '.join(agent_info.defaultOutputModes))
31 | df_data['Streaming'].append(agent_info.capabilities.streaming)
32 | df = pd.DataFrame(
33 | pd.DataFrame(df_data),
34 | columns=[
35 | 'Address',
36 | 'Name',
37 | 'Description',
38 | 'Organization',
39 | 'Input Modes',
40 | 'Output Modes',
41 | 'Streaming',
42 | ],
43 | )
44 | with me.box(
45 | style=me.Style(
46 | display='flex',
47 | justify_content='space-between',
48 | flex_direction='column',
49 | )
50 | ):
51 | me.table(
52 | df,
53 | header=me.TableHeader(sticky=True),
54 | columns={
55 | 'Address': me.TableColumn(sticky=True),
56 | 'Name': me.TableColumn(sticky=True),
57 | 'Description': me.TableColumn(sticky=True),
58 | },
59 | )
60 | with me.content_button(
61 | type='raised',
62 | on_click=add_agent,
63 | key='new_agent',
64 | style=me.Style(
65 | display='flex',
66 | flex_direction='row',
67 | gap=5,
68 | align_items='center',
69 | margin=me.Margin(top=10),
70 | ),
71 | ):
72 | me.icon(icon='upload')
73 |
74 |
75 | def add_agent(e: me.ClickEvent): # pylint: disable=unused-argument
76 | """Import agent button handler."""
77 | state = me.state(AgentState)
78 | state.agent_dialog_open = True
79 |
--------------------------------------------------------------------------------
/crewai/helloworld/README.md:
--------------------------------------------------------------------------------
1 | # Helloworld Crew
2 |
3 | Welcome to the Helloworld Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
4 |
5 | ## Installation
6 |
7 | Ensure you have Python >=3.10 <3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
8 |
9 | First, if you haven't already, install uv:
10 |
11 | ```bash
12 | pip install uv
13 | ```
14 |
15 | Next, navigate to your project directory and install the dependencies:
16 |
17 | (Optional) Lock the dependencies and install them by using the CLI command:
18 | ```bash
19 | crewai install
20 | ```
21 | ### Customizing
22 |
23 | **Add your `OPENAI_API_KEY` into the `.env` file**
24 |
25 | - Modify `src/helloworld/config/agents.yaml` to define your agents
26 | - Modify `src/helloworld/config/tasks.yaml` to define your tasks
27 | - Modify `src/helloworld/crew.py` to add your own logic, tools and specific args
28 | - Modify `src/helloworld/main.py` to add custom inputs for your agents and tasks
29 |
30 | ## Running the Project
31 |
32 | To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
33 |
34 | ```bash
35 | $ crewai run
36 | ```
37 |
38 | This command initializes the helloworld Crew, assembling the agents and assigning them tasks as defined in your configuration.
39 |
40 | This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
41 |
42 | ## Understanding Your Crew
43 |
44 | The helloworld Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
45 |
46 | ## Support
47 |
48 | For support, questions, or feedback regarding the Helloworld Crew or crewAI.
49 | - Visit our [documentation](https://docs.crewai.com)
50 | - Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
51 | - [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
52 | - [Chat with our docs](https://chatg.pt/DWjSBZn)
53 |
54 | Let's create wonders together with the power and simplicity of crewAI.
55 |
--------------------------------------------------------------------------------
/langchain-mcp-adapters/servers/versatile_server.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from fastmcp import Client
3 | from fastmcp import FastMCP
4 | from pathlib import Path
5 |
6 | # create a mcp object with name TopicExplainer
7 | mcp = FastMCP(name='VersatileServer')
8 |
9 |
10 | # define the tool
11 | @mcp.tool()
12 | def hello_world(name: str) -> str:
13 | return f"Hello World. This is {name} 👋"
14 |
15 |
16 | # create prompt
17 | @mcp.prompt
18 | def explain_topic(topic: str) -> str:
19 | """Generates a query prompt for explanation of topic"""
20 | return f"Can you explain {topic} in a beginner friendly manner with simple wordings and no technical jargon. Include Concept & Examples."
21 |
22 |
23 | # basic resource
24 | @mcp.resource("resource://greeting")
25 | def greet() -> str:
26 | """Simple greet"""
27 | return "Hey This Is Zu👋"
28 |
29 |
30 | # Image resource with URL - protocol://host//path
31 | @mcp.resource("images://zu.jpeg", mime_type="image/jpeg") # defined uri -> returns in json output for resource calls
32 | def fetch_image_bytes() -> bytes:
33 | """Returns Zu's profile photo"""
34 | file_path = Path("resources/zu.jpeg").resolve() # file must be present at script route
35 |
36 | if not file_path.exists():
37 | raise FileNotFoundError(f"Image file not found: {file_path}")
38 |
39 | return file_path.read_bytes()
40 |
41 |
42 | async def self_test():
43 | # create a aynschronus loop to run mcp client
44 | async with Client(mcp) as client:
45 | # fetch all tools
46 | tools = await client.list_tools()
47 | print("Available tools:", [t.name for t in tools])
48 |
49 | # fetch all prompts
50 | prompts = await client.list_prompts()
51 | print("Available prompts:", [p.name for p in prompts])
52 |
53 | # fetch all resources
54 | resources = await client.list_resources()
55 | print("Available resources:", [r.uri for r in resources])
56 |
57 | # Provide the topic to explain_topic for testing and check results
58 | result = await client.get_prompt("explain_topic", {"topic": "machine learning"}) # change topic
59 | # add more prompts here for testing multiple prompts
60 |
61 | print("Generated prompt:", result.messages[0].content.text)
62 |
63 |
64 | if __name__ == "__main__":
65 | # `fastmcp run versatile_server.py` to start the server
66 | # `fastmcp dev versatile_server.py` to debug the server
67 | # mcp.run(transport="stdio")
68 |
69 | # `uv run versatile_server.py` to test the server
70 | # `python versatile_server.py` to test the server
71 | asyncio.run(self_test())
72 |
--------------------------------------------------------------------------------
/langchain/README.md:
--------------------------------------------------------------------------------
1 | # LangChain Hello World Example
2 |
3 | A minimal example demonstrating how to use LangChain to create a simple conversational agent. This example shows the basic concepts of LangChain including creating a conversational agent, using tools, handling conversations, and basic error handling.
4 |
5 | > Note: LangChain agents will continue to be supported, but it is recommended for new use cases to be built with LangGraph. LangGraph offers a more flexible and full-featured framework for building agents, including support for tool-calling, persistence of state, and human-in-the-loop workflows.
6 |
7 | ## ⚡ Quick Start (5-Minute Setup)
8 |
9 | 1. Install dependencies:
10 | ```shell
11 | pip install -r requirements.txt
12 | ```
13 |
14 | 2. Set up environment:
15 | ```bash
16 | cp .env.example .env
17 | ```
18 |
19 | Edit `.env` with your settings:
20 | ```ini
21 | # LLM Model configurations
22 | DEFAULT_MODEL=gpt-4o-mini
23 | DEFAULT_TEMPERATURE=0.7
24 |
25 | # OpenAI API configurations
26 | OPENAI_API_KEY=your-api-key-here
27 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: custom API endpoint (e.g. for API proxies)
28 | ```
29 |
30 | 3. Run the example:
31 | ```shell
32 | python helloworld.py
33 | ```
34 |
35 | 4. Expected output:
36 | ```
37 | awesome-agent-quickstart/langchain/helloworld.py:66: LangChainDeprecationWarning: LangChain agents will continue to be supported, but it is recommended for new use cases to be built with LangGraph. LangGraph offers a more flexible and full-featured framework for building agents, including support for tool-calling, persistence of state, and human-in-the-loop workflows. For details, refer to the `LangGraph documentation `_ as well as guides for `Migrating from AgentExecutor `_ and LangGraph's `Pre-built ReAct agent `_.
38 | agent = initialize_agent(
39 | Agent initialized! Let's have a conversation.
40 | You can ask questions, and the agent will use web search if needed.
41 | Type 'exit' to end the conversation.
42 |
43 | You: Hello
44 | awesome-agent-quickstart/langchain/helloworld.py:100: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use :meth:`~invoke` instead.
45 | response = agent.run(input=user_input)
46 |
47 |
48 | > Entering new AgentExecutor chain...
49 | ```json
50 | {
51 | "action": "Final Answer",
52 | "action_input": "Hello! How can I help you?"
53 | }
54 | ```
55 |
56 | > Finished chain.
57 |
58 | Assistant: Hello! How can I help you?
59 | ```
--------------------------------------------------------------------------------
/A2A/demo/ui/components/conversation_list.py:
--------------------------------------------------------------------------------
1 | import mesop as me
2 | import pandas as pd
3 |
4 | from state.host_agent_service import CreateConversation
5 | from state.state import AppState, StateConversation
6 |
7 |
8 | @me.component
9 | def conversation_list(conversations: list[StateConversation]):
10 | """Conversation list component"""
11 | df_data = {'ID': [], 'Name': [], 'Status': [], 'Messages': []}
12 | for conversation in conversations:
13 | df_data['ID'].append(conversation.conversation_id)
14 | df_data['Name'].append(conversation.conversation_name)
15 | df_data['Status'].append('Open' if conversation.is_active else 'Closed')
16 | df_data['Messages'].append(len(conversation.message_ids))
17 | df = pd.DataFrame(
18 | pd.DataFrame(df_data), columns=['ID', 'Name', 'Status', 'Messages']
19 | )
20 | with me.box(
21 | style=me.Style(
22 | display='flex',
23 | justify_content='space-between',
24 | flex_direction='column',
25 | )
26 | ):
27 | me.table(
28 | df,
29 | on_click=on_click,
30 | header=me.TableHeader(sticky=True),
31 | columns={
32 | 'ID': me.TableColumn(sticky=True),
33 | 'Name': me.TableColumn(sticky=True),
34 | 'Status': me.TableColumn(sticky=True),
35 | 'Messages': me.TableColumn(sticky=True),
36 | },
37 | )
38 | with me.content_button(
39 | type='raised',
40 | on_click=add_conversation,
41 | key='new_conversation',
42 | style=me.Style(
43 | display='flex',
44 | flex_direction='row',
45 | gap=5,
46 | align_items='center',
47 | margin=me.Margin(top=10),
48 | ),
49 | ):
50 | me.icon(icon='add')
51 |
52 |
53 | async def add_conversation(e: me.ClickEvent): # pylint: disable=unused-argument
54 | """Add conversation button handler"""
55 | response = await CreateConversation()
56 | me.state(AppState).messages = []
57 | me.navigate(
58 | '/conversation',
59 | query_params={'conversation_id': response.conversation_id},
60 | )
61 | yield
62 |
63 |
64 | def on_click(e: me.TableClickEvent):
65 | state = me.state(AppState)
66 | conversation = state.conversations[e.row_index]
67 | state.current_conversation_id = conversation.conversation_id
68 | me.query_params.update({'conversation_id': conversation.conversation_id})
69 | me.navigate('/conversation', query_params=me.query_params)
70 | yield
71 |
--------------------------------------------------------------------------------
/smolagents/any_llm.py:
--------------------------------------------------------------------------------
1 | """
2 | This example demonstrates different ways to initialize models for your agents
3 | Choose one of the following inference types by setting chosen_inference:
4 | - "hf_api": Use Hugging Face Inference API (requires API token)
5 | - "transformers": Run models locally using transformers library
6 | - "ollama": Use local Ollama server
7 | - "litellm": Use LiteLLM to access various model providers
8 | """
9 |
10 | from typing import Optional
11 |
12 | from smolagents import HfApiModel, LiteLLMModel, TransformersModel, tool
13 | from smolagents.agents import CodeAgent, ToolCallingAgent
14 |
15 | available_inferences = ["hf_api", "transformers", "ollama", "litellm"]
16 | chosen_inference = "transformers"
17 |
18 | print(f"Chose model: '{chosen_inference}'")
19 |
20 | if chosen_inference == "hf_api":
21 | model = HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct")
22 |
23 | elif chosen_inference == "transformers":
24 | # Use a small but capable local model
25 | model = TransformersModel(model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", device_map="auto", max_new_tokens=1000)
26 |
27 | elif chosen_inference == "ollama":
28 | model = LiteLLMModel(
29 | model_id="ollama_chat/llama3.2",
30 | api_base="http://localhost:11434", # Change if using remote server
31 | api_key="your-api-key", # Add your API key if required
32 | num_ctx=8192, # Increased context window for better performance
33 | )
34 |
35 | elif chosen_inference == "litellm":
36 | # LiteLLM supports many providers including OpenAI, Anthropic, etc.
37 | # Example: For Claude 3, use model_id='anthropic/claude-3-5-sonnet-latest'
38 | model = LiteLLMModel(model_id="gpt-4")
39 |
40 |
41 | # Example tool that agents can use
42 | @tool
43 | def get_weather(location: str, celsius: Optional[bool] = False) -> str:
44 | """
45 | A simple mock weather tool to demonstrate tool usage.
46 | Returns the same response regardless of input (for demo purposes).
47 |
48 | Args:
49 | location: Location to get weather for
50 | celsius: Whether to return temperature in Celsius (not implemented in this demo)
51 | """
52 | return "The weather is UNGODLY with torrential rains and temperatures below -10°C"
53 |
54 |
55 | # Demo using ToolCallingAgent - uses a structured approach to call tools
56 | agent = ToolCallingAgent(tools=[get_weather], model=model)
57 | print("ToolCallingAgent:", agent.run("What's the weather like in Paris?"))
58 |
59 | # Demo using CodeAgent - writes and executes Python code to solve tasks
60 | agent = CodeAgent(tools=[get_weather], model=model)
61 | print("CodeAgent:", agent.run("What's the weather like in Paris?"))
62 |
--------------------------------------------------------------------------------
/A2A/README.md:
--------------------------------------------------------------------------------
1 | # Google Agent2Agent Protocol
2 |
3 | ## Quickstart
4 |
5 | 1. Setup env for agents:
6 |
7 | > Get GOOGLE_API_KEY: https://aistudio.google.com/apikey
8 |
9 | - Crewai agent:
10 |
11 | ```bash
12 | cd agents/crewai
13 | echo "GOOGLE_API_KEY=your_api_key_here" > .env
14 | ```
15 |
16 | - Langgraph agent:
17 |
18 | ```bash
19 | cd agents/langgraph
20 | echo "GOOGLE_API_KEY=your_api_key_here" > .env
21 | ```
22 |
23 | 2. Start the agent servers:
24 |
25 | - Crewai agent:
26 | ```bash
27 | cd agents/crewai
28 | uv python pin 3.12
29 | uv venv
30 | source .venv/bin/activate
31 | uv run .
32 | INFO:__main__:Starting server on localhost:10001
33 | INFO: Started server process [53304]
34 | INFO: Waiting for application startup.
35 | INFO: Application startup complete.
36 | INFO: Uvicorn running on http://localhost:10001 (Press CTRL+C to quit)
37 | ```
38 |
39 | - Langgraph agent:
40 | ```bash
41 | cd agents/langgraph
42 | uv run .
43 | INFO:__main__:Starting server on localhost:10000
44 | INFO: Started server process [42606]
45 | INFO: Waiting for application startup.
46 | INFO: Application startup complete.
47 | INFO: Uvicorn running on http://localhost:10000 (Press CTRL+C to quit)
48 | ```
49 |
50 | 3. Start the GUI client:
51 | ```bash
52 | cd demo/ui
53 | echo "GOOGLE_API_KEY=your_api_key_here" >> .env
54 | # Otherwise, you will enter it directly in the UI everytime
55 | uv run main.py
56 | INFO: Uvicorn running on http://0.0.0.0:12000 (Press CTRL+C to quit)
57 | WARNING: --reload-include and --reload-exclude have no effect unless watchfiles is installed.
58 | INFO: Started reloader process [42916] using StatReload
59 | INFO: Started server process [43176]
60 | INFO: Waiting for application startup.
61 | INFO: Application startup complete.
62 | INFO: 127.0.0.1:53827 - "GET / HTTP/1.1" 200 OK
63 | INFO: 127.0.0.1:53827 - "GET /zone.js/bundles/zone.umd.js HTTP/1.1" 200 OK
64 | ```
65 |
66 | 4. Open Chrome and navigate to:
67 | ```
68 | http://localhost:12000/
69 | ```
70 |
71 | 5. Click _Remote Agents_ tab and input running agent's address (although there is a placeholder, you have to input):
72 |
73 | ```
74 | localhost:10001
75 | ```
76 | Then click _Read_, agents.json will be read and displayed, click _Save_ then.
77 |
78 | 6. Click _Home_, add a new conversation, then input:
79 | ```
80 | What remote agents do you have access to?
81 | ```
82 | The connected agents will be listed:
83 | ```
84 | I have access to the Currency Agent and the Image Generator Agent.
85 | ```
86 |
87 |
88 | ## References
89 | - Github Repository: https://github.com/google/A2A/
90 | - Official Website: https://google.github.io/A2A/
--------------------------------------------------------------------------------
/google-adk/README.md:
--------------------------------------------------------------------------------
1 | # Agent Development Kit
2 |
3 | Agent Development Kit (ADK) is a flexible and modular framework for developing and deploying AI agents. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and is built for compatibility with other frameworks. ADK was designed to make agent development feel more like software development, to make it easier for developers to create, deploy, and orchestrate agentic architectures that range from simple tasks to complex workflows.
4 |
5 | ## ✨ Key Features
6 | Rich Tool Ecosystem: Utilize pre-built tools, custom functions, OpenAPI specs, or integrate existing tools to give agents diverse capabilities, all for tight integration with the Google ecosystem.
7 |
8 | Code-First Development: Define agent logic, tools, and orchestration directly in Python for ultimate flexibility, testability, and versioning.
9 |
10 | Modular Multi-Agent Systems: Design scalable applications by composing multiple specialized agents into flexible hierarchies.
11 |
12 | Deploy Anywhere: Easily containerize and deploy agents on Cloud Run or scale seamlessly with Vertex AI Agent Engine.
13 |
14 | ## Quickstart
15 |
16 | 1. Copy `.env.example` to `.env`, and set up your config:
17 |
18 | ```shell
19 | GOOGLE_GENAI_USE_VERTEXAI=FALSE
20 | GOOGLE_API_KEY=PASTE_YOUR_ACTUAL_API_KEY_HERE
21 | ```
22 |
23 | 2. Install the dependencies:
24 |
25 | ```shell
26 | pip install -r requirements.txt
27 | ```
28 |
29 | 3. Run the code:
30 | ```shell
31 | adk web
32 | ```
33 |
34 | Example output:
35 |
36 | ```shell
37 |
38 | 22:41:00 with symbolk in awesome-agent-quickstart/google-adk on main [!?] using ☁️ default/veo-test-461409 via 🅒 agent took 1m 36.4s
39 | ➜ adk web
40 | INFO: Started server process [37497]
41 | INFO: Waiting for application startup.
42 |
43 | +-----------------------------------------------------------------------------+
44 | | ADK Web Server started |
45 | | |
46 | | For local testing, access at http://localhost:8000. |
47 | +-----------------------------------------------------------------------------+
48 |
49 | INFO: Application startup complete.
50 | INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
51 | ```
52 |
53 | 4. Open the browser and go to `http://localhost:8080` to see the development UI, select agent and talk to it:
54 | ```shell
55 | What's the time and weather in New York?
56 | ```
57 |
58 |
59 | # References
60 |
61 | - [ADK Doc](https://google.github.io/adk-docs/)
62 | - [GitHub Repository](https://github.com/google/adk-python)
--------------------------------------------------------------------------------
/A2A/agents/ag2/__main__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | import click
5 |
6 | from agents.ag2.agent import YoutubeMCPAgent
7 | from agents.ag2.task_manager import AgentTaskManager
8 | from common.server import A2AServer
9 | from common.types import (
10 | AgentCapabilities,
11 | AgentCard,
12 | AgentSkill,
13 | MissingAPIKeyError,
14 | )
15 | from dotenv import load_dotenv
16 |
17 |
18 | load_dotenv()
19 |
20 | logging.basicConfig(level=logging.INFO)
21 | logger = logging.getLogger(__name__)
22 |
23 |
24 | @click.command()
25 | @click.option('--host', 'host', default='localhost')
26 | @click.option('--port', 'port', default=10003)
27 | def main(host, port):
28 | """Starts the AG2 MCP Agent server."""
29 | try:
30 | if not os.getenv('OPENAI_API_KEY'):
31 | raise MissingAPIKeyError(
32 | 'OPENAI_API_KEY environment variable not set.'
33 | )
34 |
35 | capabilities = AgentCapabilities(streaming=True)
36 | skills = [
37 | AgentSkill(
38 | id='download_closed_captions',
39 | name='Download YouTube Closed Captions',
40 | description='Retrieve closed captions/transcripts from YouTube videos',
41 | tags=['youtube', 'captions', 'transcription', 'video'],
42 | examples=[
43 | 'Extract the transcript from this YouTube video: https://www.youtube.com/watch?v=dQw4w9WgXcQ',
44 | 'Download the captions for this YouTube tutorial',
45 | ],
46 | )
47 | ]
48 |
49 | agent_card = AgentCard(
50 | name='YouTube Captions Agent',
51 | description='AI agent that can extract closed captions and transcripts from YouTube videos. This agent provides raw transcription data that can be used for further processing.',
52 | url=f'http://{host}:{port}/',
53 | version='1.0.0',
54 | defaultInputModes=YoutubeMCPAgent.SUPPORTED_CONTENT_TYPES,
55 | defaultOutputModes=YoutubeMCPAgent.SUPPORTED_CONTENT_TYPES,
56 | capabilities=capabilities,
57 | skills=skills,
58 | )
59 |
60 | server = A2AServer(
61 | agent_card=agent_card,
62 | task_manager=AgentTaskManager(agent=YoutubeMCPAgent()),
63 | host=host,
64 | port=port,
65 | )
66 |
67 | logger.info(f'Starting AG2 Youtube MCP agent on {host}:{port}')
68 | server.start()
69 | except MissingAPIKeyError as e:
70 | logger.error(f'Error: {e}')
71 | exit(1)
72 | except Exception as e:
73 | logger.error(f'An error occurred during server startup: {e}')
74 | exit(1)
75 |
76 |
77 | if __name__ == '__main__':
78 | main()
79 |
--------------------------------------------------------------------------------
/autogen/README.md:
--------------------------------------------------------------------------------
1 | # AutoGen Examples
2 |
3 | Welcome to the AutoGen examples! This directory contains ready-to-run examples demonstrating how to use Microsoft AutoGen for building powerful multi-agent systems.
4 |
5 | ## ⚡ Quick Start (5-Minute Setup)
6 |
7 | Let's create your first AutoGen multi-agent system! We'll start with a simple example and then explore more advanced scenarios.
8 |
9 | 1. Install dependencies:
10 | ```bash
11 | cd autogen
12 | pip install -r requirements.txt
13 | ```
14 |
15 | 2. Set up environment:
16 | ```bash
17 | cp env.py.example env.py
18 | ```
19 |
20 | Edit `env.py` with your settings:
21 | ```python
22 | # OpenAI API configurations
23 | OPENAI_API_KEY=your-api-key-here
24 | OPENAI_API_BASE=https://api.openai.com/v1 # Optional: for API proxies
25 | ```
26 |
27 | 3. Run your first multi-agent conversation:
28 | ```bash
29 | python helloworld.py
30 | ```
31 |
32 | ## 🚀 Available Examples
33 |
34 | 1. `helloworld.py` - Basic two-agent conversation
35 | - Learn the fundamentals of AutoGen
36 | - See how agents interact with each other
37 | - Understand basic agent configuration
38 |
39 | 2. `team.py` - Multi-agent team collaboration
40 | - Create a team of specialized agents
41 | - Implement group chat and task delegation
42 | - Handle complex problem-solving scenarios
43 |
44 | ## 💡 Key Features
45 |
46 | - **Multi-Agent System**: Create multiple agents with different roles
47 | - **Agent Communication**: Enable natural conversations between agents
48 | - **Web Capabilities**: Agents can browse and analyze web content
49 | - **Flexible Configuration**: Easy to customize agent behaviors
50 | - **Error Recovery**: Built-in error handling and conversation recovery
51 |
52 | ## 🤝 Next Steps
53 | - Explore the examples in order of complexity
54 | - Read the comments in each example for detailed explanations
55 | - Try modifying the examples to understand the concepts better
56 | - Check out the [AutoGen documentation](https://microsoft.github.io/autogen/stable/reference/index.html) for more details
57 |
58 | ## 📚 Additional Resources
59 |
60 | - [AutoGen GitHub Repository](https://github.com/microsoft/autogen)
61 | - [AutoGen Official Documentation](https://microsoft.github.io/autogen/stable/reference/index.html)
62 |
63 | ## 🛠️ Troubleshooting
64 |
65 | 1. If you see API key errors:
66 | - Check if your API key is correctly set in `env.py`
67 | - Verify your API key has sufficient credits
68 |
69 | 2. If web browsing fails:
70 | - Ensure you installed with `pip install 'autogen-ext[web-surfer]'`
71 | - Check your internet connection
72 | - Verify the website is accessible
73 |
74 | 3. For model-specific errors:
75 | - Try using a different model from your config list
76 | - Check if your API key has access to the requested model
77 |
--------------------------------------------------------------------------------
/A2A/agents/semantickernel/__main__.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import click
4 |
5 | from agents.semantickernel.task_manager import TaskManager
6 | from common.server import A2AServer
7 | from common.types import AgentCapabilities, AgentCard, AgentSkill
8 | from common.utils.push_notification_auth import PushNotificationSenderAuth
9 | from dotenv import load_dotenv
10 |
11 |
12 | logging.basicConfig(level=logging.INFO)
13 | logger = logging.getLogger(__name__)
14 |
15 | load_dotenv()
16 |
17 |
18 | @click.command()
19 | @click.option('--host', default='localhost')
20 | @click.option('--port', default=10020)
21 | def main(host, port):
22 | """Starts the Semantic Kernel Agent server using A2A."""
23 | # Build the agent card
24 | capabilities = AgentCapabilities(streaming=True, pushNotifications=True)
25 | skill_trip_planning = AgentSkill(
26 | id='trip_planning_sk',
27 | name='Semantic Kernel Trip Planning',
28 | description=(
29 | 'Handles comprehensive trip planning, including currency exchanges, itinerary creation, sightseeing, '
30 | 'dining recommendations, and event bookings using Frankfurter API for currency conversions.'
31 | ),
32 | tags=['trip', 'planning', 'travel', 'currency', 'semantic-kernel'],
33 | examples=[
34 | 'Plan a budget-friendly day trip to Seoul including currency exchange.',
35 | "What's the exchange rate and recommended itinerary for visiting Tokyo?",
36 | ],
37 | )
38 |
39 | agent_card = AgentCard(
40 | name='SK Travel Agent',
41 | description=(
42 | 'Semantic Kernel-based travel agent providing comprehensive trip planning services '
43 | 'including currency exchange and personalized activity planning.'
44 | ),
45 | url=f'http://{host}:{port}/',
46 | version='1.0.0',
47 | defaultInputModes=['text'],
48 | defaultOutputModes=['text'],
49 | capabilities=capabilities,
50 | skills=[skill_trip_planning],
51 | )
52 |
53 | # Prepare push notification system
54 | notification_sender_auth = PushNotificationSenderAuth()
55 | notification_sender_auth.generate_jwk()
56 |
57 | # Create the server
58 | task_manager = TaskManager(
59 | notification_sender_auth=notification_sender_auth
60 | )
61 | server = A2AServer(
62 | agent_card=agent_card, task_manager=task_manager, host=host, port=port
63 | )
64 | server.app.add_route(
65 | '/.well-known/jwks.json',
66 | notification_sender_auth.handle_jwks_endpoint,
67 | methods=['GET'],
68 | )
69 |
70 | logger.info(f'Starting the Semantic Kernel agent server on {host}:{port}')
71 | server.start()
72 |
73 |
74 | if __name__ == '__main__':
75 | main()
76 |
--------------------------------------------------------------------------------
/langgraphjs/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | lerna-debug.log*
8 | .pnpm-debug.log*
9 |
10 | # Diagnostic reports (https://nodejs.org/api/report.html)
11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
12 |
13 | # Runtime data
14 | pids
15 | *.pid
16 | *.seed
17 | *.pid.lock
18 |
19 | # Directory for instrumented libs generated by jscoverage/JSCover
20 | lib-cov
21 |
22 | # Coverage directory used by tools like istanbul
23 | coverage
24 | *.lcov
25 |
26 | # nyc test coverage
27 | .nyc_output
28 |
29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
30 | .grunt
31 |
32 | # Bower dependency directory (https://bower.io/)
33 | bower_components
34 |
35 | # node-waf configuration
36 | .lock-wscript
37 |
38 | # Compiled binary addons (https://nodejs.org/api/addons.html)
39 | build/Release
40 |
41 | # Dependency directories
42 | node_modules/
43 | jspm_packages/
44 |
45 | # Snowpack dependency directory (https://snowpack.dev/)
46 | web_modules/
47 |
48 | # TypeScript cache
49 | *.tsbuildinfo
50 |
51 | # Optional npm cache directory
52 | .npm
53 |
54 | # Optional eslint cache
55 | .eslintcache
56 |
57 | # Optional stylelint cache
58 | .stylelintcache
59 |
60 | # Microbundle cache
61 | .rpt2_cache/
62 | .rts2_cache_cjs/
63 | .rts2_cache_es/
64 | .rts2_cache_umd/
65 |
66 | # Optional REPL history
67 | .node_repl_history
68 |
69 | # Output of 'npm pack'
70 | *.tgz
71 |
72 | # Yarn Integrity file
73 | .yarn-integrity
74 |
75 | # dotenv environment variable files
76 | .env
77 | .env.development.local
78 | .env.test.local
79 | .env.production.local
80 | .env.local
81 |
82 | # parcel-bundler cache (https://parceljs.org/)
83 | .cache
84 | .parcel-cache
85 |
86 | # Next.js build output
87 | .next
88 | out
89 |
90 | # Nuxt.js build / generate output
91 | .nuxt
92 | dist
93 |
94 | # Gatsby files
95 | .cache/
96 | # Comment in the public line in if your project uses Gatsby and not Next.js
97 | # https://nextjs.org/blog/next-9-1#public-directory-support
98 | # public
99 |
100 | # vuepress build output
101 | .vuepress/dist
102 |
103 | # vuepress v2.x temp and cache directory
104 | .temp
105 | .cache
106 |
107 | # vitepress build output
108 | **/.vitepress/dist
109 |
110 | # vitepress cache directory
111 | **/.vitepress/cache
112 |
113 | # Docusaurus cache and generated files
114 | .docusaurus
115 |
116 | # Serverless directories
117 | .serverless/
118 |
119 | # FuseBox cache
120 | .fusebox/
121 |
122 | # DynamoDB Local files
123 | .dynamodb/
124 |
125 | # TernJS port file
126 | .tern-port
127 |
128 | # Stores VSCode versions used for testing VSCode extensions
129 | .vscode-test
130 |
131 | # yarn v2
132 | .yarn/cache
133 | .yarn/unplugged
134 | .yarn/build-state.yml
135 | .yarn/install-state.gz
136 | .pnp.*
--------------------------------------------------------------------------------
/model_context_protocol/READIT.md:
--------------------------------------------------------------------------------
1 | # Model Context Protocol
2 |
3 |
4 |
5 |
6 |
7 |
8 | A protocol for seamless integration between LLM applications and external data sources
9 |
10 |
11 |
12 | Documentation |
13 | Specification |
14 | Discussions
15 |
16 |
17 | The Model ContextProtocol (MCP) is an open protocol that enables seamless integration between LLM applications and external data sources and tools. Whether you're building an AI-powered IDE, enhancing a chat interface, or creating custom AI workflows, MCP provides a standardized way to connect LLMs with the context they need.
18 |
19 | ## Getting Started
20 |
21 | - 📚 Read the [Documentation](https://modelcontextprotocol.io) for guides and tutorials
22 | - 🔍 Review the [Specification](https://spec.modelcontextprotocol.io) for protocol details
23 | - 💻 Use our SDKs to start building:
24 | - [TypeScript SDK](https://github.com/modelcontextprotocol/typescript-sdk)
25 | - [Python SDK](https://github.com/modelcontextprotocol/python-sdk)
26 | - [Kotlin SDK](https://github.com/modelcontextprotocol/kotlin-sdk)
27 |
28 | ## Project Structure
29 |
30 | - [specification](https://github.com/modelcontextprotocol/specification) - Protocol specification and documentation
31 | - [typescript-sdk](https://github.com/modelcontextprotocol/typescript-sdk) - TypeScript implementation
32 | - [python-sdk](https://github.com/modelcontextprotocol/python-sdk) - Python implementation
33 | - [kotlin-sdk](https://github.com/modelcontextprotocol/kotlin-sdk) - Kotlin implementation
34 | - [docs](https://github.com/modelcontextprotocol/docs) - User documentation and guides
35 | - [create-python-server](https://github.com/modelcontextprotocol/create-python-server) - Python server template
36 | - [create-typescript-server](https://github.com/modelcontextprotocol/create-typescript-server) - TypeScript server template
37 | - [create-kotlin-server](https://github.com/modelcontextprotocol/kotlin-sdk/tree/main/samples/kotlin-mcp-server) - Kotlin sample server
38 | - [servers](https://github.com/modelcontextprotocol/servers) - List of maintained servers
39 |
40 | ## Contributing
41 |
42 | We welcome contributions of all kinds! Whether you want to fix bugs, improve documentation, or propose new features, please see our [contributing guide](CONTRIBUTING.md) to get started.
43 |
44 | Have questions? Join the discussion in our [community forum](https://github.com/orgs/modelcontextprotocol/discussions).
45 |
46 | ## About
47 |
48 | The Model Context Protocol is an open source project run by [Anthropic, PBC.](https://anthropic.com) and open to contributions from the entire community.
--------------------------------------------------------------------------------
/langchain-mcp-adapters/clients/langgraph_client.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | from contextlib import asynccontextmanager
4 | from typing import Annotated, Sequence, TypedDict
5 |
6 | from langchain_core.messages import BaseMessage
7 | from langchain_mcp_adapters.client import MultiServerMCPClient
8 | from langgraph.checkpoint.memory import MemorySaver
9 | from langgraph.graph import END, START, StateGraph
10 | from langgraph.graph.message import add_messages
11 | from langgraph.prebuilt import ToolNode, tools_condition
12 | from util import get_servers_dir
13 | from llm import deepseek_model as model
14 |
15 | class State(TypedDict):
16 | messages: Annotated[Sequence[BaseMessage], add_messages]
17 |
18 | # Make the graph with MCP context
19 | async def make_graph():
20 | mcp_client = MultiServerMCPClient(
21 | {
22 | "math": {
23 | "command": "python",
24 | # Make sure to update to the full absolute path to your math_server.py file
25 | "args": [os.path.join(get_servers_dir(), "math_server.py")],
26 | "transport": "stdio",
27 | },
28 | "weather": {
29 | # make sure you start your weather server on port 8000
30 | "url": f"http://localhost:{os.getenv('MCP_SERVER_PORT')}/mcp",
31 | "transport": "streamable_http",
32 | }
33 | }
34 | )
35 |
36 | mcp_tools = await mcp_client.get_tools()
37 | print(f"Available tools: {[tool.name for tool in mcp_tools]}")
38 |
39 | llm_with_tool = model.bind_tools(mcp_tools)
40 |
41 | def call_model(state: State):
42 | messages = state["messages"]
43 | response = llm_with_tool.invoke(messages)
44 | return {"messages": [response]}
45 |
46 | # Compile application and test
47 | graph_builder = StateGraph(State)
48 | graph_builder.add_node(call_model)
49 | graph_builder.add_node("tool", ToolNode(mcp_tools))
50 |
51 | graph_builder.add_edge(START, "call_model")
52 |
53 | # Decide whether to retrieve
54 | graph_builder.add_conditional_edges(
55 | "call_model",
56 | # Assess agent decision
57 | tools_condition,
58 | {
59 | # Translate the condition outputs to nodes in our graph
60 | "tools": "tool",
61 | END: END,
62 | },
63 | )
64 | graph_builder.add_edge("tool", "call_model")
65 |
66 | graph = graph_builder.compile()
67 | graph.name = "Tool Agent"
68 |
69 | return graph
70 |
71 | # Run the graph with question
72 | async def main():
73 | graph = await make_graph()
74 | result = await graph.ainvoke({"messages": "what is the weather in nyc?"})
75 | print(result["messages"][-1].content)
76 | result = await graph.ainvoke({"messages": "what's (3 + 5) x 12?"})
77 | print(result["messages"][-1].content)
78 |
79 | asyncio.run(main())
80 |
--------------------------------------------------------------------------------
/A2A/agents/langgraph/__main__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | import click
5 |
6 | from agents.langgraph.agent import CurrencyAgent
7 | from agents.langgraph.task_manager import AgentTaskManager
8 | from common.server import A2AServer
9 | from common.types import (
10 | AgentCapabilities,
11 | AgentCard,
12 | AgentSkill,
13 | MissingAPIKeyError,
14 | )
15 | from common.utils.push_notification_auth import PushNotificationSenderAuth
16 | from dotenv import load_dotenv
17 |
18 |
19 | load_dotenv()
20 |
21 | logging.basicConfig(level=logging.INFO)
22 | logger = logging.getLogger(__name__)
23 |
24 |
25 | @click.command()
26 | @click.option('--host', 'host', default='localhost')
27 | @click.option('--port', 'port', default=10000)
28 | def main(host, port):
29 | """Starts the Currency Agent server."""
30 | try:
31 | if not os.getenv('GOOGLE_API_KEY'):
32 | raise MissingAPIKeyError(
33 | 'GOOGLE_API_KEY environment variable not set.'
34 | )
35 |
36 | capabilities = AgentCapabilities(streaming=True, pushNotifications=True)
37 | skill = AgentSkill(
38 | id='convert_currency',
39 | name='Currency Exchange Rates Tool',
40 | description='Helps with exchange values between various currencies',
41 | tags=['currency conversion', 'currency exchange'],
42 | examples=['What is exchange rate between USD and GBP?'],
43 | )
44 | agent_card = AgentCard(
45 | name='Currency Agent',
46 | description='Helps with exchange rates for currencies',
47 | url=f'http://{host}:{port}/',
48 | version='1.0.0',
49 | defaultInputModes=CurrencyAgent.SUPPORTED_CONTENT_TYPES,
50 | defaultOutputModes=CurrencyAgent.SUPPORTED_CONTENT_TYPES,
51 | capabilities=capabilities,
52 | skills=[skill],
53 | )
54 |
55 | notification_sender_auth = PushNotificationSenderAuth()
56 | notification_sender_auth.generate_jwk()
57 | server = A2AServer(
58 | agent_card=agent_card,
59 | task_manager=AgentTaskManager(
60 | agent=CurrencyAgent(),
61 | notification_sender_auth=notification_sender_auth,
62 | ),
63 | host=host,
64 | port=port,
65 | )
66 |
67 | server.app.add_route(
68 | '/.well-known/jwks.json',
69 | notification_sender_auth.handle_jwks_endpoint,
70 | methods=['GET'],
71 | )
72 |
73 | logger.info(f'Starting server on {host}:{port}')
74 | server.start()
75 | except MissingAPIKeyError as e:
76 | logger.error(f'Error: {e}')
77 | exit(1)
78 | except Exception as e:
79 | logger.error(f'An error occurred during server startup: {e}')
80 | exit(1)
81 |
82 |
83 | if __name__ == '__main__':
84 | main()
85 |
--------------------------------------------------------------------------------
/langgraph/helloworld.py:
--------------------------------------------------------------------------------
1 | """
2 | A minimal example of using langgraph to create a simple conversational agent.
3 | This example demonstrates the basic concepts of langgraph including:
4 | - Creating nodes (functions)
5 | - Building a graph
6 | - Handling state
7 | - Running the agent
8 | """
9 |
10 | from typing import Dict, TypedDict, Annotated, List
11 | from langgraph.graph import StateGraph
12 | from litellm import completion
13 | from config import *
14 |
15 | # Define our state schema
16 | class AgentState(TypedDict):
17 | messages: list[str]
18 | current_response: str
19 |
20 | # Node functions
21 | def get_llm_response(state: AgentState) -> AgentState:
22 | """Get response from LLM based on conversation history."""
23 | try:
24 | # Combine messages into a prompt
25 | messages = [
26 | {
27 | "role": "system",
28 | "content": "You are a helpful assistant. Keep responses brief and friendly."
29 | }
30 | ]
31 |
32 | for msg in state["messages"]:
33 | messages.append({"role": "user", "content": msg})
34 |
35 | # Get response from LLM using config
36 | model_config = get_model_config()
37 | response = completion(
38 | messages=messages,
39 | api_base=get_api_base(),
40 | api_key=get_api_key(),
41 | **model_config
42 | )
43 |
44 | state["current_response"] = response.choices[0].message.content
45 | return state
46 | except Exception as e:
47 | state["current_response"] = f"Error: {str(e)}"
48 | return state
49 |
50 | def format_response(state: AgentState) -> AgentState:
51 | """Format the response and add it to message history."""
52 | response = state["current_response"]
53 | state["messages"].append(response)
54 | return state
55 |
56 | # Build the graph
57 | def build_graph() -> StateGraph:
58 | """Build the workflow graph."""
59 | # Create new workflow with state
60 | workflow = StateGraph(AgentState)
61 |
62 | # Add nodes
63 | workflow.add_node("get_response", get_llm_response)
64 | workflow.add_node("format", format_response)
65 |
66 | # Add edges
67 | workflow.add_edge("get_response", "format")
68 |
69 | # Set entry and finish points
70 | workflow.set_entry_point("get_response")
71 | workflow.set_finish_point("format")
72 |
73 | return workflow
74 |
75 | def main():
76 | # Initialize the graph
77 | graph = build_graph().compile()
78 |
79 | # Initialize state
80 | state = {
81 | "messages": ["Tell me a short joke"],
82 | "current_response": ""
83 | }
84 |
85 | # Run the graph
86 | result = graph.invoke(state)
87 | print("User:", state["messages"][0])
88 | print("Assistant:", result["messages"][-1])
89 |
90 | if __name__ == "__main__":
91 | try:
92 | main()
93 | except ValueError as e:
94 | print(str(e))
95 |
--------------------------------------------------------------------------------
/A2A/agents/crewai/__main__.py:
--------------------------------------------------------------------------------
1 | """This file serves as the main entry point for the application.
2 |
3 | It initializes the A2A server, defines the agent's capabilities,
4 | and starts the server to handle incoming requests.
5 | """
6 |
7 | import logging
8 | import os
9 |
10 | import click
11 |
12 | from agent import ImageGenerationAgent
13 | from common.server import A2AServer
14 | from common.types import (
15 | AgentCapabilities,
16 | AgentCard,
17 | AgentSkill,
18 | MissingAPIKeyError,
19 | )
20 | from dotenv import load_dotenv
21 | from task_manager import AgentTaskManager
22 |
23 |
24 | load_dotenv()
25 |
26 | logging.basicConfig(level=logging.INFO)
27 | logger = logging.getLogger(__name__)
28 |
29 |
30 | @click.command()
31 | @click.option('--host', 'host', default='localhost')
32 | @click.option('--port', 'port', default=10001)
33 | def main(host, port):
34 | """Entry point for the A2A + CrewAI Image generation sample."""
35 | try:
36 | if not os.getenv('GOOGLE_API_KEY') and not os.getenv(
37 | 'GOOGLE_GENAI_USE_VERTEXAI'
38 | ):
39 | raise MissingAPIKeyError(
40 | 'GOOGLE_API_KEY or Vertex AI environment variables not set.'
41 | )
42 |
43 | capabilities = AgentCapabilities(streaming=False)
44 | skill = AgentSkill(
45 | id='image_generator',
46 | name='Image Generator',
47 | description=(
48 | 'Generate stunning, high-quality images on demand and leverage'
49 | ' powerful editing capabilities to modify, enhance, or completely'
50 | ' transform visuals.'
51 | ),
52 | tags=['generate image', 'edit image'],
53 | examples=['Generate a photorealistic image of raspberry lemonade'],
54 | )
55 |
56 | agent_card = AgentCard(
57 | name='Image Generator Agent',
58 | description=(
59 | 'Generate stunning, high-quality images on demand and leverage'
60 | ' powerful editing capabilities to modify, enhance, or completely'
61 | ' transform visuals.'
62 | ),
63 | url=f'http://{host}:{port}/',
64 | version='1.0.0',
65 | defaultInputModes=ImageGenerationAgent.SUPPORTED_CONTENT_TYPES,
66 | defaultOutputModes=ImageGenerationAgent.SUPPORTED_CONTENT_TYPES,
67 | capabilities=capabilities,
68 | skills=[skill],
69 | )
70 |
71 | server = A2AServer(
72 | agent_card=agent_card,
73 | task_manager=AgentTaskManager(agent=ImageGenerationAgent()),
74 | host=host,
75 | port=port,
76 | )
77 | logger.info(f'Starting server on {host}:{port}')
78 | server.start()
79 | except MissingAPIKeyError as e:
80 | logger.error(f'Error: {e}')
81 | exit(1)
82 | except Exception as e:
83 | logger.error(f'An error occurred during server startup: {e}')
84 | exit(1)
85 |
86 |
87 | if __name__ == '__main__':
88 | main()
89 |
--------------------------------------------------------------------------------
/A2A/agents/marvin/README.md:
--------------------------------------------------------------------------------
1 | # Marvin Contact Extractor Agent (A2A Sample)
2 |
3 | This sample demonstrates an agent using the [Marvin](https://github.com/prefecthq/marvin) framework to extract structured contact information from text, integrated with the Agent2Agent (A2A) protocol.
4 |
5 | ## Overview
6 |
7 | The agent receives text, attempts to extract contact details (name, email, phone, etc.) into a structured format using Marvin. It manages conversational state across multiple turns to gather required information (name, email) before confirming the extracted data. The agent's response includes both a textual summary/question and the structured data via A2A.
8 |
9 |
10 | ## Key Components
11 |
12 | - **Marvin `ExtractorAgent` (`agent.py`)**: Core logic using `marvin` for extraction and managing multi-turn state via a dictionary.
13 | - **A2A `AgentTaskManager` (`task_manager.py`)**: Integrates the agent with the A2A protocol, managing task state (including streaming via SSE) and response formatting.
14 | - **A2A Server (`__main__.py`)**: Hosts the agent and task manager.
15 |
16 | ## Prerequisites
17 |
18 | - Python 3.12+
19 | - [uv](https://docs.astral.sh/uv/getting-started/installation/)
20 | - `OPENAI_API_KEY` (or other LLM provider creds supported by pydantic-ai)
21 |
22 | ## Setup & Running
23 |
24 | 1. Navigate to the Python samples directory:
25 | ```bash
26 | cd samples/python/agents/marvin
27 | ```
28 |
29 | 2. Set an LLM provider API key:
30 | ```bash
31 | export OPENAI_API_KEY=your_api_key_here
32 | ```
33 |
34 | 3. Set up the Python environment:
35 | ```bash
36 | uv venv
37 | source .venv/bin/activate
38 | uv sync
39 | ```
40 |
41 | 4. Run the Marvin agent server:
42 | ```bash
43 | # Default host/port (localhost:10030)
44 | MARVIN_DATABASE_URL=sqlite+aiosqlite:///test.db MARVIN_LOG_LEVEL=DEBUG uv run .
45 |
46 | # Custom host/port
47 | # uv run . --host 0.0.0.0 --port 8080
48 | ```
49 |
50 | Without `MARVIN_DATABASE_URL` set, conversation history will not be persisted by session id.
51 |
52 | 5. In a separate terminal, run an A2A client (e.g., the sample CLI):
53 | ```bash
54 | # Ensure the environment is active (source .venv/bin/activate)
55 | cd samples/python/hosts/cli
56 | uv run . --agent http://localhost:10030 # Use the correct agent URL/port
57 | ```
58 |
59 |
60 | ## Extracted Data Structure
61 |
62 | The structured data returned in the `DataPart` is defined as:
63 |
64 | ```python
65 | class ContactInfo(BaseModel):
66 | name: str = Field(description="Person's first and last name")
67 | email: EmailStr
68 | phone: str = Field(description="standardized phone number")
69 | organization: str | None = Field(None, description="org if mentioned")
70 | role: str | None = Field(None, description="title or role if mentioned")
71 | ```
72 |
73 | with a validator to render things nicely if you want and maybe serialize weird things.
74 |
75 | ## Learn More
76 |
77 | - [Marvin Documentation](https://www.askmarvin.ai/)
78 | - [Marvin GitHub Repository](https://github.com/prefecthq/marvin)
79 | - [A2A Protocol Documentation](https://google.github.io/A2A/#/documentation)
80 |
--------------------------------------------------------------------------------
/A2A/agents/llama_index_file_chat/__main__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | import click
5 |
6 | from agents.llama_index_file_chat.agent import ParseAndChat
7 | from agents.llama_index_file_chat.task_manager import LlamaIndexTaskManager
8 | from common.server import A2AServer
9 | from common.types import (
10 | AgentCapabilities,
11 | AgentCard,
12 | AgentSkill,
13 | MissingAPIKeyError,
14 | )
15 | from common.utils.push_notification_auth import PushNotificationSenderAuth
16 | from dotenv import load_dotenv
17 |
18 |
19 | load_dotenv()
20 |
21 | logging.basicConfig(level=logging.INFO)
22 | logger = logging.getLogger(__name__)
23 |
24 |
25 | @click.command()
26 | @click.option('--host', 'host', default='localhost')
27 | @click.option('--port', 'port', default=10010)
28 | def main(host, port):
29 | """Starts the Currency Agent server."""
30 | try:
31 | if not os.getenv('GOOGLE_API_KEY'):
32 | raise MissingAPIKeyError(
33 | 'GOOGLE_API_KEY environment variable not set.'
34 | )
35 | if not os.getenv('LLAMA_CLOUD_API_KEY'):
36 | raise MissingAPIKeyError(
37 | 'LLAMA_CLOUD_API_KEY environment variable not set.'
38 | )
39 |
40 | capabilities = AgentCapabilities(streaming=True, pushNotifications=True)
41 |
42 | skill = AgentSkill(
43 | id='parse_and_chat',
44 | name='Parse and Chat',
45 | description='Parses a file and then chats with a user using the parsed content as context.',
46 | tags=['parse', 'chat', 'file', 'llama_parse'],
47 | examples=['What does this file talk about?'],
48 | )
49 |
50 | agent_card = AgentCard(
51 | name='Parse and Chat',
52 | description='Parses a file and then chats with a user using the parsed content as context.',
53 | url=f'http://{host}:{port}/',
54 | version='1.0.0',
55 | defaultInputModes=LlamaIndexTaskManager.SUPPORTED_INPUT_TYPES,
56 | defaultOutputModes=LlamaIndexTaskManager.SUPPORTED_OUTPUT_TYPES,
57 | capabilities=capabilities,
58 | skills=[skill],
59 | )
60 |
61 | notification_sender_auth = PushNotificationSenderAuth()
62 | notification_sender_auth.generate_jwk()
63 | server = A2AServer(
64 | agent_card=agent_card,
65 | task_manager=LlamaIndexTaskManager(
66 | agent=ParseAndChat(),
67 | notification_sender_auth=notification_sender_auth,
68 | ),
69 | host=host,
70 | port=port,
71 | )
72 |
73 | server.app.add_route(
74 | '/.well-known/jwks.json',
75 | notification_sender_auth.handle_jwks_endpoint,
76 | methods=['GET'],
77 | )
78 |
79 | logger.info(f'Starting server on {host}:{port}')
80 | server.start()
81 | except MissingAPIKeyError as e:
82 | logger.error(f'Error: {e}')
83 | exit(1)
84 | except Exception as e:
85 | logger.error(f'An error occurred during server startup: {e}')
86 | exit(1)
87 |
88 |
89 | if __name__ == '__main__':
90 | main()
91 |
--------------------------------------------------------------------------------
/langgraphjs/helloworld_ollama.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * A minimal example of using LangGraph.js to create a simple ReAct agent.
3 | * This example demonstrates the basic concepts of LangGraph including:
4 | * - Creating a stateful agent
5 | * - Using tools
6 | * - Graph-based workflow
7 | * - State management
8 | */
9 | import { ChatOllama } from "@langchain/ollama";
10 | import { createReactAgent } from "@langchain/langgraph/prebuilt";
11 | import { MemorySaver } from "@langchain/langgraph";
12 | import { tool } from "@langchain/core/tools";
13 | import { z } from "zod";
14 | import dotenv from "dotenv";
15 |
16 | // Load environment variables from .env file
17 | dotenv.config();
18 |
19 | // Get configuration from environment variables
20 | const MODEL_NAME = process.env.MODEL_NAME || "qwen2.5"; // model must support tools
21 | const TEMPERATURE = parseFloat(process.env.TEMPERATURE || "0.7");
22 | const OLLAMA_BASE_URL = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
23 |
24 | // Define a simple search tool
25 | const search = tool(
26 | async ({ query }) => {
27 | // This is a mock implementation
28 | if (query.toLowerCase().includes("weather")) {
29 | return "It's currently sunny and 75°F.";
30 | }
31 | return "No relevant information found.";
32 | },
33 | {
34 | name: "search",
35 | description: "Search for real-time information.",
36 | schema: z.object({
37 | query: z.string().describe("The search query to use."),
38 | }),
39 | }
40 | );
41 |
42 | async function main() {
43 | try {
44 | // Initialize the language model
45 | const model = new ChatOllama({
46 | model: MODEL_NAME,
47 | temperature: TEMPERATURE,
48 | baseUrl: OLLAMA_BASE_URL,
49 | });
50 |
51 | // Initialize tools
52 | const tools = [search];
53 |
54 | // Initialize memory for state persistence
55 | const checkpointer = new MemorySaver();
56 |
57 | // Create the ReAct agent
58 | const app = createReactAgent({
59 | llm: model,
60 | tools,
61 | checkpointSaver: checkpointer,
62 | });
63 |
64 | console.log("Agent initialized! Let's have a conversation.\n");
65 |
66 | // First interaction
67 | const result = await app.invoke(
68 | {
69 | messages: [
70 | {
71 | role: "user",
72 | content: "What's the weather like today?",
73 | },
74 | ],
75 | },
76 | { configurable: { thread_id: "demo-123" } }
77 | );
78 |
79 | console.log("User: What's the weather like today?");
80 | console.log(`Assistant: ${result.messages.at(-1)?.content}\n`);
81 |
82 | // Follow-up question with memory
83 | const followup = await app.invoke(
84 | {
85 | messages: [
86 | {
87 | role: "user",
88 | content: "Is that a good temperature?",
89 | },
90 | ],
91 | },
92 | { configurable: { thread_id: "demo-123" } }
93 | );
94 |
95 | console.log("User: Is that a good temperature?");
96 | console.log(`Assistant: ${followup.messages.at(-1)?.content}\n`);
97 |
98 | } catch (error) {
99 | console.error("Error:", error);
100 | }
101 | }
102 |
103 | main();
--------------------------------------------------------------------------------
/A2A/demo/ui/service/client/client.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from typing import Any
4 |
5 | import httpx
6 |
7 | from service.types import (
8 | AgentClientHTTPError,
9 | AgentClientJSONError,
10 | CreateConversationRequest,
11 | CreateConversationResponse,
12 | GetEventRequest,
13 | GetEventResponse,
14 | JSONRPCRequest,
15 | ListAgentRequest,
16 | ListAgentResponse,
17 | ListConversationRequest,
18 | ListConversationResponse,
19 | ListMessageRequest,
20 | ListMessageResponse,
21 | ListTaskRequest,
22 | ListTaskResponse,
23 | PendingMessageRequest,
24 | PendingMessageResponse,
25 | RegisterAgentRequest,
26 | RegisterAgentResponse,
27 | SendMessageRequest,
28 | SendMessageResponse,
29 | )
30 |
31 |
32 | class ConversationClient:
33 | def __init__(self, base_url):
34 | self.base_url = base_url.rstrip('/')
35 |
36 | async def send_message(
37 | self, payload: SendMessageRequest
38 | ) -> SendMessageResponse:
39 | return SendMessageResponse(**await self._send_request(payload))
40 |
41 | async def _send_request(self, request: JSONRPCRequest) -> dict[str, Any]:
42 | async with httpx.AsyncClient() as client:
43 | try:
44 | response = await client.post(
45 | self.base_url + '/' + request.method,
46 | json=request.model_dump(),
47 | )
48 | response.raise_for_status()
49 | return response.json()
50 | except httpx.HTTPStatusError as e:
51 | raise AgentClientHTTPError(
52 | e.response.status_code, str(e)
53 | ) from e
54 | except json.JSONDecodeError as e:
55 | raise AgentClientJSONError(str(e)) from e
56 |
57 | async def create_conversation(
58 | self, payload: CreateConversationRequest
59 | ) -> CreateConversationResponse:
60 | return CreateConversationResponse(**await self._send_request(payload))
61 |
62 | async def list_conversation(
63 | self, payload: ListConversationRequest
64 | ) -> ListConversationResponse:
65 | return ListConversationResponse(**await self._send_request(payload))
66 |
67 | async def get_events(self, payload: GetEventRequest) -> GetEventResponse:
68 | return GetEventResponse(**await self._send_request(payload))
69 |
70 | async def list_messages(
71 | self, payload: ListMessageRequest
72 | ) -> ListMessageResponse:
73 | return ListMessageResponse(**await self._send_request(payload))
74 |
75 | async def get_pending_messages(
76 | self, payload: PendingMessageRequest
77 | ) -> PendingMessageResponse:
78 | return PendingMessageResponse(**await self._send_request(payload))
79 |
80 | async def list_tasks(self, payload: ListTaskRequest) -> ListTaskResponse:
81 | return ListTaskResponse(**await self._send_request(payload))
82 |
83 | async def register_agent(
84 | self, payload: RegisterAgentRequest
85 | ) -> RegisterAgentResponse:
86 | return RegisterAgentResponse(**await self._send_request(payload))
87 |
88 | async def list_agents(self, payload: ListAgentRequest) -> ListAgentResponse:
89 | return ListAgentResponse(**await self._send_request(payload))
90 |
--------------------------------------------------------------------------------
/langgraph-platform/agents/react_agent/tools.py:
--------------------------------------------------------------------------------
1 | """This module provides example tools for web scraping, search functionality, and content creation.
2 |
3 | It includes tools for general search, finance research, blog research, social media research.
4 |
5 | These tools are intended as free examples to get started. For production use,
6 | consider implementing more robust and specialized tools tailored to your needs.
7 | """
8 |
9 | from typing import Callable, Optional, cast, Any
10 |
11 | from langchain_community.tools.tavily_search import TavilySearchResults
12 | from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
13 | from langchain_core.tools import tool
14 | from datetime import datetime
15 |
16 | @tool
17 | async def finance_research(ticker_symbol: str) -> Optional[list[dict[str, Any]]]:
18 | """Search for finance research, must be a ticker symbol."""
19 | wrapped = YahooFinanceNewsTool()
20 | result = await wrapped.ainvoke({"query": ticker_symbol})
21 | return cast(list[dict[str, Any]], result)
22 |
23 | @tool
24 | async def advanced_research_tool(query: str) -> Optional[list[dict[str, Any]]]:
25 | """Perform in-depth research for blog content.
26 |
27 | This tool conducts comprehensive web searches with higher result limits and
28 | deeper analysis, ideal for creating well-researched blog posts backed by
29 | authoritative sources.
30 | """
31 | # Using Tavily with higher result count for more comprehensive research
32 | wrapped = TavilySearchResults(
33 | max_results=10, # Default to 10 if not specified
34 | search_depth="advanced" # More thorough search
35 | )
36 | result = await wrapped.ainvoke({"query": query})
37 | return cast(list[dict[str, Any]], result)
38 |
39 | @tool
40 | async def basic_research_tool(query: str) -> Optional[list[dict[str, Any]]]:
41 | """Research trending topics for social media content.
42 |
43 | This tool performs quick searches optimized for trending and viral content,
44 | returning concise results ideal for social media post creation.
45 | """
46 | # Using Tavily with lower result count and quicker search for social content
47 | wrapped = TavilySearchResults(
48 | max_results=5, # Default to 3 if not specified
49 | search_depth="basic", # Faster, less comprehensive search
50 | include_raw_content=False, # Just the highlights
51 | include_images=True # Social posts often benefit from images
52 | )
53 | result = await wrapped.ainvoke({"query": f"trending {query}"})
54 | return cast(list[dict[str, Any]], result)
55 |
56 | @tool
57 | async def get_todays_date() -> str:
58 | """Get the current date."""
59 | return datetime.now().strftime("%Y-%m-%d")
60 |
61 |
62 | def get_tools(selected_tools: list[str]) -> list[Callable[..., Any]]:
63 | """Convert a list of tool names to actual tool functions."""
64 | tools = []
65 | for tool in selected_tools:
66 | if tool == "finance_research":
67 | tools.append(finance_research)
68 | elif tool == "advanced_research_tool":
69 | tools.append(advanced_research_tool)
70 | elif tool == "basic_research_tool":
71 | tools.append(basic_research_tool)
72 | elif tool == "get_todays_date":
73 | tools.append(get_todays_date)
74 |
75 | return tools
--------------------------------------------------------------------------------
/A2A/demo/ui/state/state.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 |
3 | from typing import Any, Literal
4 |
5 | import mesop as me
6 |
7 | from pydantic.dataclasses import dataclass
8 |
9 |
10 | ContentPart = str | dict[str, Any]
11 |
12 |
13 | @dataclass
14 | class StateConversation:
15 | """StateConversation provides mesop state compliant view of a conversation"""
16 |
17 | conversation_id: str = ''
18 | conversation_name: str = ''
19 | is_active: bool = True
20 | message_ids: list[str] = dataclasses.field(default_factory=list)
21 |
22 |
23 | @dataclass
24 | class StateMessage:
25 | """StateMessage provides mesop state compliant view of a message"""
26 |
27 | message_id: str = ''
28 | role: str = ''
29 | # Each content entry is a content, media type pair.
30 | content: list[tuple[ContentPart, str]] = dataclasses.field(
31 | default_factory=list
32 | )
33 |
34 |
35 | @dataclass
36 | class StateTask:
37 | """StateTask provides mesop state compliant view of task"""
38 |
39 | task_id: str = ''
40 | session_id: str | None = None
41 | state: str | None = None
42 | message: StateMessage = dataclasses.field(default_factory=StateMessage)
43 | artifacts: list[list[tuple[ContentPart, str]]] = dataclasses.field(
44 | default_factory=list
45 | )
46 |
47 |
48 | @dataclass
49 | class SessionTask:
50 | """SessionTask organizes tasks based on conversation"""
51 |
52 | session_id: str = ''
53 | task: StateTask = dataclasses.field(default_factory=StateTask)
54 |
55 |
56 | @dataclass
57 | class StateEvent:
58 | """StateEvent provides mesop state compliant view of event"""
59 |
60 | conversation_id: str = ''
61 | actor: str = ''
62 | role: str = ''
63 | id: str = ''
64 | # Each entry is a pair of (content, media type)
65 | content: list[tuple[ContentPart, str]] = dataclasses.field(
66 | default_factory=list
67 | )
68 |
69 |
70 | @me.stateclass
71 | class AppState:
72 | """Mesop Application State"""
73 |
74 | sidenav_open: bool = False
75 | theme_mode: Literal['system', 'light', 'dark'] = 'system'
76 |
77 | current_conversation_id: str = ''
78 | conversations: list[StateConversation]
79 | messages: list[StateMessage]
80 | task_list: list[SessionTask] = dataclasses.field(default_factory=list)
81 | background_tasks: dict[str, str] = dataclasses.field(default_factory=dict)
82 | message_aliases: dict[str, str] = dataclasses.field(default_factory=dict)
83 | # This is used to track the data entered in a form
84 | completed_forms: dict[str, dict[str, Any] | None] = dataclasses.field(
85 | default_factory=dict
86 | )
87 | # This is used to track the message sent to agent with form data
88 | form_responses: dict[str, str] = dataclasses.field(default_factory=dict)
89 | polling_interval: int = 1
90 |
91 | # Added for API key management
92 | api_key: str = ''
93 | uses_vertex_ai: bool = False
94 | api_key_dialog_open: bool = False
95 |
96 |
97 | @me.stateclass
98 | class SettingsState:
99 | """Settings State"""
100 |
101 | output_mime_types: list[str] = dataclasses.field(
102 | default_factory=lambda: [
103 | 'image/*',
104 | 'text/plain',
105 | ]
106 | )
107 |
--------------------------------------------------------------------------------
/langchain-mcp-adapters/clients/langgraph_client_wconfig.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | import json
4 | from pathlib import Path
5 | from contextlib import asynccontextmanager
6 | from typing import Annotated, Sequence, TypedDict
7 |
8 | from langchain_core.messages import BaseMessage
9 | from langchain_mcp_adapters.client import MultiServerMCPClient
10 | from langgraph.checkpoint.memory import MemorySaver
11 | from langgraph.graph import END, START, StateGraph
12 | from langgraph.graph.message import add_messages
13 | from langgraph.prebuilt import ToolNode, tools_condition
14 | from util import get_servers_dir
15 | from llm import deepseek_model as model
16 |
17 | class State(TypedDict):
18 | """State definition for the graph"""
19 | messages: Annotated[Sequence[BaseMessage], add_messages]
20 |
21 | # Make the graph with MCP context
22 | async def make_graph():
23 | """Create and configure the LangGraph with MCP tools from configuration file"""
24 | # Read mcpServers.json configuration
25 | config_path = Path(__file__).parent / "mcpServers.json"
26 | with open(config_path, "r") as f:
27 | server_config = json.load(f)
28 |
29 | # Replace environment variables for path and url
30 | server_config["mcpServers"]["math"]["args"][0] = server_config["mcpServers"]["math"]["args"][0].replace("$SERVERS_DIR", get_servers_dir())
31 | server_config["mcpServers"]["weather"]["url"] = server_config["mcpServers"]["weather"]["url"].replace("$MCP_SERVER_PORT", os.getenv('MCP_SERVER_PORT', '8000'))
32 |
33 | print(f"Server configuration: {server_config['mcpServers']}")
34 |
35 | mcp_client = MultiServerMCPClient(server_config["mcpServers"])
36 | mcp_tools = await mcp_client.get_tools()
37 | print(f"Available tools: {[tool.name for tool in mcp_tools]}")
38 |
39 | llm_with_tool = model.bind_tools(mcp_tools)
40 |
41 | def call_model(state: State):
42 | """Call the language model with the current state"""
43 | messages = state["messages"]
44 | response = llm_with_tool.invoke(messages)
45 | return {"messages": [response]}
46 |
47 | # Compile application and test
48 | graph_builder = StateGraph(State)
49 | graph_builder.add_node("call_model", call_model)
50 | graph_builder.add_node("tool", ToolNode(mcp_tools))
51 |
52 | graph_builder.add_edge(START, "call_model")
53 |
54 | # Decide whether to retrieve
55 | graph_builder.add_conditional_edges(
56 | "call_model",
57 | # Assess agent decision
58 | tools_condition,
59 | {
60 | # Translate the condition outputs to nodes in our graph
61 | "tools": "tool",
62 | END: END,
63 | },
64 | )
65 | graph_builder.add_edge("tool", "call_model")
66 |
67 | graph = graph_builder.compile()
68 | graph.name = "Tool Agent"
69 |
70 | return graph
71 |
72 | # Run the graph with question
73 | async def main():
74 | """Main function to run the agent with test questions"""
75 | graph = await make_graph()
76 |
77 | # Test weather query
78 | result = await graph.ainvoke({"messages": "what is the weather in nyc?"})
79 | print(result["messages"][-1].content)
80 |
81 | # Test math query
82 | result = await graph.ainvoke({"messages": "what's (3 + 5) x 12?"})
83 | print(result["messages"][-1].content)
84 |
85 | if __name__ == "__main__":
86 | asyncio.run(main())
--------------------------------------------------------------------------------
/langchain-sandbox/README.md:
--------------------------------------------------------------------------------
1 | # 🛡️ LangChain Sandbox
2 |
3 | A secure environment for running Python code using Pyodide (WebAssembly) and Deno.
4 |
5 | ## Features
6 | The sandbox consists of two main components:
7 |
8 | - `pyodide-sandbox-js`: JavaScript/TypeScript module using Deno to provide the core sandboxing functionality.
9 | - `sandbox-py`: Contains `PyodideSandbox` which just wraps the JavaScript/TypeScript module and executes it as a subprocess.
10 |
11 |
12 | ## 🚀 Quick Start
13 |
14 | 1. Install Node.js and Deno (required): https://docs.deno.com/runtime/getting_started/installation/
15 |
16 | ```bash
17 | npm install -g deno
18 | ```
19 |
20 | 2. Install dependencies:
21 |
22 | ```bash
23 | pip install -r requirements.txt
24 | ```
25 |
26 | 3. Run code in the sandbox independently:
27 | ```shell
28 | python helloworld.py
29 | # or helloworld_stateful.py
30 | ```
31 |
32 | Example output:
33 |
34 | ```shell
35 | CodeExecutionResult(result=None, stdout='Loading numpyLoaded numpy[1 2 3]', stderr=None, status='success', execution_time=1.5542199611663818, session_metadata={'created': '2025-06-07T06:21:49.539Z', 'lastModified': '2025-06-07T06:21:50.015Z', 'packages': ['numpy']}, session_bytes=None)
36 | Loading numpyLoaded numpy[1 2 3]
37 | ```
38 |
39 | # References
40 |
41 | - [GitHub Repository](https://github.com/langchain-ai/langchain-sandbox)
42 |
43 | 4. Run the sandbox as a tool for agent:
44 | ```shell
45 | python tool_react.py
46 | # or tool_codeact.py
47 | # or stateful_tool_react.py
48 | ```
49 |
50 | Example output:
51 |
52 | ```shell
53 | {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_BdlVK2dByXS0rCsfPyTfaqP7', 'function': {'arguments': '{"code":"import math\\nradius = 5\\narea = math.pi * (radius ** 2)\\nprint(area)"}', 'name': 'python_code_sandbox'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 38, 'prompt_tokens': 121, 'total_tokens': 159, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': None, 'id': 'chatcmpl-BgN62muHRjLcBvqN54mj4E3f2rTju', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run--63fa3547-d17f-49b7-8a1e-5af3f2a92e94-0', tool_calls=[{'name': 'python_code_sandbox', 'args': {'code': 'import math\nradius = 5\narea = math.pi * (radius ** 2)\nprint(area)'}, 'id': 'call_BdlVK2dByXS0rCsfPyTfaqP7', 'type': 'tool_call'}], usage_metadata={'input_tokens': 121, 'output_tokens': 38, 'total_tokens': 159, 'input_token_details': {}, 'output_token_details': {}})]}}
54 |
55 |
56 | {'tools': {'messages': [ToolMessage(content='78.53981633974483', name='python_code_sandbox', id='b2965f55-bdf1-47f7-bad4-d47069023d37', tool_call_id='call_BdlVK2dByXS0rCsfPyTfaqP7')]}}
57 |
58 |
59 | {'agent': {'messages': [AIMessage(content='The area of a circle with a radius of 5 is approximately 78.54.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 176, 'total_tokens': 195, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': None, 'id': 'chatcmpl-BgN655VPIN4K9HDoJyXX0bpkqsX4F', 'finish_reason': 'stop', 'logprobs': None}, id='run--e5dac62d-6c8b-4185-9b19-48579f534751-0', usage_metadata={'input_tokens': 176, 'output_tokens': 19, 'total_tokens': 195, 'input_token_details': {}, 'output_token_details': {}})]}}
60 | ```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # awesome-agent-quickstart
2 |
3 | Your fast lane to AI Agent development!
4 |
5 | This repository helps you bypass setup complexities and dive straight into latest AI Agent frameworks. Go from zero to running your first agent in minutes, whether you're interested in LangGraph, AutoGen, Smolagents, or other popular frameworks.
6 |
7 | ## 🎯 Features
8 |
9 | - ⚡ Zero configuration needed - get started in minutes
10 | - 🎓 Learn by example - all examples are runnable
11 | - 🔄 Model-agnostic - support any LLM
12 | - 🛠️ Centralized configuration - shared settings across framework
13 |
14 | ## 🚀 Supported Frameworks
15 |
16 | Ready-to-run templates for:
17 |
18 | ### Flow control
19 | - LangChain
20 | - LangGraph (Python&JS)
21 | - Smolagents
22 |
23 | ### Role-playing
24 | - LangGraph-Swarm
25 | - OpenAI Swarm
26 | - AutoGen
27 | - CrewAI
28 | - OpenAI Agents SDK
29 |
30 | ### Tools
31 | - LangChain MCP Adapters
32 | - Browser-use
33 |
34 |
35 | ## 📁 Project Structure
36 |
37 | Each directory is a self-contained example of an agent framework. For example:
38 |
39 | ```
40 | awesome-agent-quickstart/
41 | ├── langgraph/ # Framework name
42 | │ ├── config.py # Common configurations (model params, API settings&checking)
43 | │ ├── helloworld.py # Basic example: Simple conversational agent
44 | │ ├── requirements.txt # Dependency management
45 | │ └── .env.example # Environment variables template
46 | │ └── READIT.md # The framework's original README
47 | │ └── README.md # A step by step guide to use the framework
48 | ```
49 |
50 | ## 🐍 Setup
51 |
52 | Since some frameworks reply on the latest Python features, we recommend create a virtual environment:
53 |
54 | ```
55 | conda create -n agents-quickstart python=3.13
56 | conda activate agents-quickstart
57 | ```
58 |
59 | ## 🤝 Contributing
60 |
61 | Contributions for more agent framework examples are welcome! Please ensure:
62 | 1. Create examples under the respective framework directory
63 | 2. Use common configurations from `config.py`
64 | 3. Provide clear documentation and comments
65 | 4. Include `requirements.txt` and `.env.example`
66 | 5. Follow project code style and best practices
67 |
68 | ## 📝 Development Guidelines
69 |
70 | 1. Code Structure
71 | - Follow modular design
72 | - Separate configuration from logic
73 | - Include proper error handling
74 | - Keep dependency files up-to-date
75 |
76 | 2. Documentation
77 | - Add docstrings for main functions and classes
78 | - Include usage examples
79 | - Explain key concepts and design decisions
80 |
81 | 3. Security
82 | - Use environment variables for sensitive data
83 | - Include rate limiting considerations
84 | - Add proper input validation
85 |
86 | See [Contributing Guidelines](CONTRIBUTING.md) for more details.
87 |
88 | ## 📃 License
89 |
90 | MIT License - see [LICENSE](LICENSE)
91 |
92 | ## 🌟 Community
93 |
94 | - ⭐ Star us on GitHub
95 | - 🐛 [Report issues](https://github.com/ababaidotai/awesome-agent-quickstart/issues)
96 | - 📧 Contact: ababai.ai@outlook.com
97 |
98 | ## 🙏 Acknowledgments
99 |
100 | Made with ❤️ by the AI community, for the AI community.
101 |
102 | - Thanks to all contributors!
103 | - Thanks to the framework development teams!
104 | - Thanks to the LLM community!
--------------------------------------------------------------------------------
/langgraphjs/helloworld_high_level.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * A minimal example of using LangGraph.js high level API to create a simple ReAct agent.
3 | * This example demonstrates the basic concepts of LangGraph including:
4 | * - Creating a stateful agent
5 | * - Using tools
6 | * - Graph-based workflow
7 | * - State management
8 | */
9 |
10 | import { ChatAnthropic } from "@langchain/anthropic";
11 | import { createReactAgent } from "@langchain/langgraph/prebuilt";
12 | import { MemorySaver } from "@langchain/langgraph";
13 | import { tool } from "@langchain/core/tools";
14 | import { z } from "zod";
15 | import dotenv from "dotenv";
16 |
17 | // Load environment variables from .env file
18 | dotenv.config();
19 |
20 | // Get configuration from environment variables
21 | const MODEL_NAME = process.env.MODEL_NAME || "claude-3-5-sonnet-20241022";
22 | const TEMPERATURE = parseFloat(process.env.TEMPERATURE || "0.7");
23 | const ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY;
24 | const ANTHROPIC_API_BASE = process.env.ANTHROPIC_API_BASE;
25 |
26 | // Define a simple search tool
27 | const search = tool(
28 | async ({ query }) => {
29 | // This is a mock implementation
30 | if (query.toLowerCase().includes("weather")) {
31 | return "It's currently sunny and 75°F.";
32 | }
33 | return "No relevant information found.";
34 | },
35 | {
36 | name: "search",
37 | description: "Search for real-time information.",
38 | schema: z.object({
39 | query: z.string().describe("The search query to use."),
40 | }),
41 | }
42 | );
43 |
44 | async function main() {
45 | try {
46 | // Initialize the language model with environment variables
47 | const model = new ChatAnthropic({
48 | modelName: MODEL_NAME,
49 | temperature: TEMPERATURE,
50 | apiKey: ANTHROPIC_API_KEY,
51 | anthropicApiUrl: ANTHROPIC_API_BASE,
52 | });
53 |
54 | // Initialize tools
55 | const tools = [search];
56 |
57 | // Initialize memory for state persistence
58 | const checkpointer = new MemorySaver();
59 |
60 | // Create the ReAct agent
61 | const app = createReactAgent({
62 | llm: model,
63 | tools,
64 | checkpointSaver: checkpointer,
65 | });
66 |
67 | console.log("Agent initialized! Let's have a conversation.\n");
68 |
69 | // First interaction
70 | const result = await app.invoke(
71 | {
72 | messages: [
73 | {
74 | role: "user",
75 | content: "What's the weather like today?",
76 | },
77 | ],
78 | },
79 | { configurable: { thread_id: "demo-123" } }
80 | );
81 |
82 | console.log("User: What's the weather like today?");
83 | console.log(`Assistant: ${result.messages.at(-1)?.content}\n`);
84 |
85 | // Follow-up question with memory
86 | const followup = await app.invoke(
87 | {
88 | messages: [
89 | {
90 | role: "user",
91 | content: "Is that a good temperature?",
92 | },
93 | ],
94 | },
95 | { configurable: { thread_id: "demo-123" } }
96 | );
97 |
98 | console.log("User: Is that a good temperature?");
99 | console.log(`Assistant: ${followup.messages.at(-1)?.content}\n`);
100 |
101 | } catch (error) {
102 | console.error("Error:", error);
103 | }
104 | }
105 |
106 | main();
--------------------------------------------------------------------------------
/A2A/agents/crewai/README.md:
--------------------------------------------------------------------------------
1 | ## CrewAI Agent with A2A Protocol
2 |
3 | This sample demonstrates a simple image generation agent built with [CrewAI](https://www.crewai.com/open-source) and exposed through the A2A protocol.
4 |
5 | ## How It Works
6 |
7 | This agent utilizes CrewAI and the Google Gemini API to generate images based on text prompts. The A2A protocol enables standardized interaction with the agent, allowing clients to send requests and receive images as artifacts.
8 |
9 | ```mermaid
10 | sequenceDiagram
11 | participant Client as A2A Client
12 | participant Server as A2A Server
13 | participant Agent as CrewAI Agent
14 | participant API as Gemini API
15 |
16 | Client->>Server: Send task with text prompt
17 | Server->>Agent: Forward prompt to image agent
18 | Note over Server,Agent: Optional: Simulated streaming updates
19 | Agent->>API: Generate image using Gemini
20 | API->>Agent: Return generated image
21 | Agent->>Server: Store image and return ID
22 | Server->>Client: Respond with image artifact
23 | ```
24 |
25 | ## Key Components
26 |
27 | - **CrewAI Agent**: Image generation agent with specialized tools
28 | - **A2A Server**: Provides standardized protocol for interacting with the agent
29 | - **Image Generation**: Uses Gemini API to create images from text descriptions
30 | - **Cache System**: Stores generated images for retrieval (in-memory or file-based)
31 |
32 | ## Prerequisites
33 |
34 | - Python 3.12 or higher
35 | - [UV](https://docs.astral.sh/uv/) package manager (recommended)
36 | - Google API Key (for Gemini access)
37 |
38 | ## Setup & Running
39 |
40 | 1. Navigate to the samples directory:
41 |
42 | ```bash
43 | cd samples/python/agents/crewai
44 | ```
45 |
46 | 2. Create an environment file with your API key (or Vertex AI credentials):
47 |
48 | ```bash
49 | echo "GOOGLE_API_KEY=your_api_key_here" > .env
50 | ```
51 |
52 | 3. Set up the Python environment:
53 |
54 | ```bash
55 | uv python pin 3.12
56 | uv venv
57 | source .venv/bin/activate
58 | ```
59 |
60 | 4. Run the agent with desired options:
61 |
62 | ```bash
63 | # Basic run
64 | uv run .
65 |
66 | # On custom host/port
67 | uv run . --host 0.0.0.0 --port 8080
68 | ```
69 |
70 | 5. Run the A2A client:
71 |
72 | In a separate terminal:
73 | ```bash
74 | # Connect to the agent (specify the agent URL with correct port)
75 | cd samples/python/hosts/cli
76 | uv run . --agent http://localhost:10001
77 |
78 | # If you changed the port when starting the agent, use that port instead
79 | # uv run . --agent http://localhost:YOUR_PORT
80 | ```
81 |
82 | Or run the [demo app](/A2A/A2A/demo/README.md)
83 |
84 | ## Features & Improvements
85 |
86 | **Features:**
87 |
88 | - Text-to-image generation using Google Gemini
89 | - Support for modifying existing images using references
90 | - Robust error handling with automatic retries
91 | - Optional file-based cache persistence
92 | - Improved artifact ID extraction from queries
93 |
94 | **Limitations:**
95 |
96 | - No true streaming (CrewAI doesn't natively support it)
97 | - Limited agent interactions (no multi-turn conversations)
98 |
99 | ## Learn More
100 |
101 | - [A2A Protocol Documentation](https://google.github.io/A2A/#/documentation)
102 | - [CrewAI Documentation](https://docs.crewai.com/introduction)
103 | - [Google Gemini API](https://ai.google.dev/gemini-api)
104 |
--------------------------------------------------------------------------------
/model_context_protocol/weather/weather.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 | import httpx
3 | from mcp.server.fastmcp import FastMCP
4 |
5 | # Initialize FastMCP server
6 | mcp = FastMCP("weather")
7 |
8 | # Constants
9 | NWS_API_BASE = "https://api.weather.gov"
10 | USER_AGENT = "weather-app/1.0"
11 |
12 | # Helper functions for querying and formatting the data from the National Weather Service API
13 | async def make_nws_request(url: str) -> dict[str, Any] | None:
14 | """Make a request to the NWS API with proper error handling."""
15 | headers = {
16 | "User-Agent": USER_AGENT,
17 | "Accept": "application/geo+json"
18 | }
19 | async with httpx.AsyncClient() as client:
20 | try:
21 | response = await client.get(url, headers=headers, timeout=30.0)
22 | response.raise_for_status()
23 | return response.json()
24 | except Exception:
25 | return None
26 |
27 | def format_alert(feature: dict) -> str:
28 | """Format an alert feature into a readable string."""
29 | props = feature["properties"]
30 | return f"""
31 | Event: {props.get('event', 'Unknown')}
32 | Area: {props.get('areaDesc', 'Unknown')}
33 | Severity: {props.get('severity', 'Unknown')}
34 | Description: {props.get('description', 'No description available')}
35 | Instructions: {props.get('instruction', 'No specific instructions provided')}
36 | """
37 |
38 | # Tools for the weather agent
39 | @mcp.tool()
40 | async def get_alerts(state: str) -> str:
41 | """Get weather alerts for a US state.
42 |
43 | Args:
44 | state: Two-letter US state code (e.g. CA, NY)
45 | """
46 | url = f"{NWS_API_BASE}/alerts/active/area/{state}"
47 | data = await make_nws_request(url)
48 |
49 | if not data or "features" not in data:
50 | return "Unable to fetch alerts or no alerts found."
51 |
52 | if not data["features"]:
53 | return "No active alerts for this state."
54 |
55 | alerts = [format_alert(feature) for feature in data["features"]]
56 | return "\n---\n".join(alerts)
57 |
58 | @mcp.tool()
59 | async def get_forecast(latitude: float, longitude: float) -> str:
60 | """Get weather forecast for a location.
61 |
62 | Args:
63 | latitude: Latitude of the location
64 | longitude: Longitude of the location
65 | """
66 | # First get the forecast grid endpoint
67 | points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
68 | points_data = await make_nws_request(points_url)
69 |
70 | if not points_data:
71 | return "Unable to fetch forecast data for this location."
72 |
73 | # Get the forecast URL from the points response
74 | forecast_url = points_data["properties"]["forecast"]
75 | forecast_data = await make_nws_request(forecast_url)
76 |
77 | if not forecast_data:
78 | return "Unable to fetch detailed forecast."
79 |
80 | # Format the periods into a readable forecast
81 | periods = forecast_data["properties"]["periods"]
82 | forecasts = []
83 | for period in periods[:5]: # Only show next 5 periods
84 | forecast = f"""
85 | {period['name']}:
86 | Temperature: {period['temperature']}°{period['temperatureUnit']}
87 | Wind: {period['windSpeed']} {period['windDirection']}
88 | Forecast: {period['detailedForecast']}
89 | """
90 | forecasts.append(forecast)
91 |
92 | return "\n---\n".join(forecasts)
93 |
94 | # Finally, let’s initialize and run the server:
95 | if __name__ == "__main__":
96 | # Initialize and run the server
97 | mcp.run(transport='stdio')
98 |
--------------------------------------------------------------------------------
/langgraph-codeact/helloworld.py:
--------------------------------------------------------------------------------
1 | import builtins
2 | import contextlib
3 | import io
4 | import math
5 | from typing import Any
6 |
7 | from langchain.chat_models import init_chat_model
8 | from langgraph.checkpoint.memory import MemorySaver
9 |
10 | from langgraph_codeact import create_codeact
11 |
12 |
13 | def eval(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str, Any]]:
14 | # Store original keys before execution
15 | original_keys = set(_locals.keys())
16 |
17 | try:
18 | with contextlib.redirect_stdout(io.StringIO()) as f:
19 | exec(code, builtins.__dict__, _locals)
20 | result = f.getvalue()
21 | if not result:
22 | result = ""
23 | except Exception as e:
24 | result = f"Error during execution: {repr(e)}"
25 |
26 | # Determine new variables created during execution
27 | new_keys = set(_locals.keys()) - original_keys
28 | new_vars = {key: _locals[key] for key in new_keys}
29 | return result, new_vars
30 |
31 |
32 | def add(a: float, b: float) -> float:
33 | """Add two numbers together."""
34 | return a + b
35 |
36 |
37 | def multiply(a: float, b: float) -> float:
38 | """Multiply two numbers together."""
39 | return a * b
40 |
41 |
42 | def divide(a: float, b: float) -> float:
43 | """Divide two numbers."""
44 | return a / b
45 |
46 |
47 | def subtract(a: float, b: float) -> float:
48 | """Subtract two numbers."""
49 | return a - b
50 |
51 |
52 | def sin(a: float) -> float:
53 | """Take the sine of a number."""
54 | return math.sin(a)
55 |
56 |
57 | def cos(a: float) -> float:
58 | """Take the cosine of a number."""
59 | return math.cos(a)
60 |
61 |
62 | def radians(a: float) -> float:
63 | """Convert degrees to radians."""
64 | return math.radians(a)
65 |
66 |
67 | def exponentiation(a: float, b: float) -> float:
68 | """Raise one number to the power of another."""
69 | return a**b
70 |
71 |
72 | def sqrt(a: float) -> float:
73 | """Take the square root of a number."""
74 | return math.sqrt(a)
75 |
76 |
77 | def ceil(a: float) -> float:
78 | """Round a number up to the nearest integer."""
79 | return math.ceil(a)
80 |
81 |
82 | tools = [
83 | add,
84 | multiply,
85 | divide,
86 | subtract,
87 | sin,
88 | cos,
89 | radians,
90 | exponentiation,
91 | sqrt,
92 | ceil,
93 | ]
94 |
95 | model = init_chat_model("gpt-4o-mini", model_provider="openai")
96 |
97 | code_act = create_codeact(model, tools, eval)
98 | agent = code_act.compile(checkpointer=MemorySaver())
99 |
100 | if __name__ == "__main__":
101 | messages = [
102 | {
103 | "role": "user",
104 | "content": "A batter hits a baseball at 45.847 m/s at an angle of 23.474° above the horizontal. The outfielder, who starts facing the batter, picks up the baseball as it lands, then throws it back towards the batter at 24.12 m/s at an angle of 39.12 degrees. How far is the baseball from where the batter originally hit it? Assume zero air resistance.",
105 | }
106 | ]
107 | for typ, chunk in agent.stream(
108 | {"messages": messages},
109 | stream_mode=["values", "messages"],
110 | config={"configurable": {"thread_id": 1}},
111 | ):
112 | if typ == "messages":
113 | print(chunk[0].content, end="")
114 | elif typ == "values":
115 | print("\n\n---answer---\n\n", chunk)
--------------------------------------------------------------------------------
/langgraphjs/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph.js Examples
2 |
3 | Welcome to the LangGraph.js examples! This directory contains ready-to-run examples demonstrating how to use LangGraph.js for building stateful, multi-actor applications with LLMs.
4 |
5 | ## ⚡ Quick Start (5-Minute Setup)
6 |
7 | Let's create your first LangGraph.js agent! We'll start with a simple ReAct agent example.
8 |
9 | 1. Install dependencies:
10 | ```bash
11 | npm install
12 | ```
13 |
14 | 2. Configure environment:
15 | ```bash
16 | cp .env.example .env
17 | ```
18 |
19 | Edit `.env` with your settings:
20 | ```ini
21 | # Anthropic API Key
22 | ANTHROPIC_API_KEY=your-api-key-here
23 |
24 | # Optional: LangSmith for observability
25 | LANGSMITH_API_KEY=your-langsmith-key-here
26 | LANGSMITH_TRACING=true
27 | ```
28 |
29 | 3. Run your first agent:
30 | ```bash
31 | # Run the high-level agent
32 | npm start
33 |
34 | # Run the low-level agent
35 | npm run start-low
36 |
37 | # Run the Ollama agent
38 | npm run start-ollama
39 | ```
40 |
41 | 4. Expected output:
42 | ```shell
43 | > langgraph-quickstart@1.0.0 start—ollama
44 | > tsx helloworld_ollama.ts
45 |
46 | Agent initialized! Let's have a conversation.
47 |
48 | User: What's the weather like today?
49 | Assistant: Today it's sunny with a temperature of 75°F.
50 |
51 | User: Is that a good temperature?
52 | Assistant: A temperature of 75°F is generally considered quite pleasant, neither too hot nor too cold. It’s perfect for outdoor activities!
53 | ```
54 |
55 | ## 🚀 Available Examples
56 |
57 | 1. `helloworld.ts` - Basic ReAct agent
58 | - Learn the fundamentals of LangGraph.js
59 | - Understand state management
60 | - See tool usage in action
61 | - Experience conversation memory
62 |
63 | ## 💡 Key Features
64 |
65 | - **Stateful Agents**: Create agents that maintain state between interactions
66 | - **Tool Integration**: Easy to add and use custom tools
67 | - **Memory Management**: Built-in support for conversation memory
68 | - **Type Safety**: Full TypeScript support
69 | - **Observability**: Optional LangSmith integration for debugging
70 |
71 | ## 🔧 Customization
72 |
73 | 1. Add custom tools:
74 | ```typescript
75 | const customTool = tool(
76 | async ({ query }) => {
77 | // Implement your tool logic
78 | return "Tool response";
79 | },
80 | {
81 | name: "custom_tool",
82 | description: "Tool description",
83 | schema: z.object({
84 | query: z.string().describe("Input description"),
85 | }),
86 | }
87 | );
88 | ```
89 |
90 | 2. Configure the agent:
91 | ```typescript
92 | const app = createReactAgent({
93 | llm: model,
94 | tools: [customTool],
95 | checkpointSaver: new MemorySaver(),
96 | });
97 | ```
98 |
99 | 3. Add conversation memory:
100 | ```typescript
101 | const result = await app.invoke(
102 | {
103 | messages: [{ role: "user", content: "Your question" }]
104 | },
105 | { configurable: { thread_id: "unique-id" } }
106 | );
107 | ```
108 |
109 | ## ⚠️ Important Notes
110 |
111 | 1. Ensure `.env` file is properly configured
112 | 2. API keys should never be committed to version control
113 | 3. Use TypeScript for better development experience
114 | 4. Consider enabling LangSmith for debugging
115 |
116 | ## 🤝 Next Steps
117 |
118 | - Explore the example code in detail
119 | - Add your own custom tools
120 | - Implement more complex conversation flows
121 | - Enable LangSmith for debugging
122 |
123 | ## 📚 Additional Resources
124 |
125 | - [LangGraph.js Documentation](https://langchain-ai.github.io/langgraphjs/)
126 | - [Anthropic Claude Documentation](https://docs.anthropic.com/claude/)
--------------------------------------------------------------------------------
/A2A/demo/ui/service/types.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated, Literal
2 |
3 | from common.types import (
4 | AgentCard,
5 | JSONRPCRequest,
6 | JSONRPCResponse,
7 | Message,
8 | Task,
9 | )
10 | from pydantic import BaseModel, Field, TypeAdapter
11 |
12 |
13 | class Conversation(BaseModel):
14 | conversation_id: str
15 | is_active: bool
16 | name: str = ''
17 | task_ids: list[str] = Field(default_factory=list)
18 | messages: list[Message] = Field(default_factory=list)
19 |
20 |
21 | class Event(BaseModel):
22 | id: str
23 | actor: str = ''
24 | # TODO: Extend to support internal concepts for models, like function calls.
25 | content: Message
26 | timestamp: float
27 |
28 |
29 | class SendMessageRequest(JSONRPCRequest):
30 | method: Literal['message/send'] = 'message/send'
31 | params: Message
32 |
33 |
34 | class ListMessageRequest(JSONRPCRequest):
35 | method: Literal['message/list'] = 'message/list'
36 | # This is the conversation id
37 | params: str
38 |
39 |
40 | class ListMessageResponse(JSONRPCResponse):
41 | result: list[Message] | None = None
42 |
43 |
44 | class MessageInfo(BaseModel):
45 | message_id: str
46 | conversation_id: str
47 |
48 |
49 | class SendMessageResponse(JSONRPCResponse):
50 | result: Message | MessageInfo | None = None
51 |
52 |
53 | class GetEventRequest(JSONRPCRequest):
54 | method: Literal['events/get'] = 'events/get'
55 |
56 |
57 | class GetEventResponse(JSONRPCResponse):
58 | result: list[Event] | None = None
59 |
60 |
61 | class ListConversationRequest(JSONRPCRequest):
62 | method: Literal['conversation/list'] = 'conversation/list'
63 |
64 |
65 | class ListConversationResponse(JSONRPCResponse):
66 | result: list[Conversation] | None = None
67 |
68 |
69 | class PendingMessageRequest(JSONRPCRequest):
70 | method: Literal['message/pending'] = 'message/pending'
71 |
72 |
73 | class PendingMessageResponse(JSONRPCResponse):
74 | result: list[tuple[str, str]] | None = None
75 |
76 |
77 | class CreateConversationRequest(JSONRPCRequest):
78 | method: Literal['conversation/create'] = 'conversation/create'
79 |
80 |
81 | class CreateConversationResponse(JSONRPCResponse):
82 | result: Conversation | None = None
83 |
84 |
85 | class ListTaskRequest(JSONRPCRequest):
86 | method: Literal['task/list'] = 'task/list'
87 |
88 |
89 | class ListTaskResponse(JSONRPCResponse):
90 | result: list[Task] | None = None
91 |
92 |
93 | class RegisterAgentRequest(JSONRPCRequest):
94 | method: Literal['agent/register'] = 'agent/register'
95 | # This is the base url of the agent card
96 | params: str | None = None
97 |
98 |
99 | class RegisterAgentResponse(JSONRPCResponse):
100 | result: str | None = None
101 |
102 |
103 | class ListAgentRequest(JSONRPCRequest):
104 | method: Literal['agent/list'] = 'agent/list'
105 |
106 |
107 | class ListAgentResponse(JSONRPCResponse):
108 | result: list[AgentCard] | None = None
109 |
110 |
111 | AgentRequest = TypeAdapter(
112 | Annotated[
113 | SendMessageRequest | ListConversationRequest,
114 | Field(discriminator='method'),
115 | ]
116 | )
117 |
118 |
119 | class AgentClientError(Exception):
120 | pass
121 |
122 |
123 | class AgentClientHTTPError(AgentClientError):
124 | def __init__(self, status_code: int, message: str):
125 | self.status_code = status_code
126 | self.message = message
127 | super().__init__(f'HTTP Error {status_code}: {message}')
128 |
129 |
130 | class AgentClientJSONError(AgentClientError):
131 | def __init__(self, message: str):
132 | self.message = message
133 | super().__init__(f'JSON Error: {message}')
134 |
--------------------------------------------------------------------------------
/swarm/README.md:
--------------------------------------------------------------------------------
1 | # Swarm Examples
2 |
3 | Welcome to the Swarm examples! This directory contains ready-to-run examples demonstrating how to use Swarm for building decentralized multi-agent systems with emergent behaviors.
4 |
5 | ## ⚡ Quick Start (5-Minute Setup)
6 |
7 | Let's create your first Swarm agent system! We'll start with a simple example and then explore more advanced scenarios.
8 |
9 | 1. Install dependencies:
10 | ```bash
11 | cd swarm
12 | pip install -r requirements.txt
13 | ```
14 |
15 | 2. Set up environment:
16 | ```bash
17 | cp env.py.example env.py
18 | ```
19 |
20 | Edit `env.py` with your settings:
21 | ```python
22 | # OpenAI API configurations
23 | OPENAI_API_KEY = "your-api-key-here"
24 | ```
25 |
26 | 3. Run your first swarm:
27 | ```bash
28 | python helloworld.py
29 | ```
30 |
31 | 4. Expected output:
32 | ```shell
33 | Hope glimmers brightly,
34 | New paths converge gracefully,
35 | What can I assist?
36 | ```
37 |
38 | ## 🚀 Available Examples
39 |
40 | 1. `helloworld.py` - Basic swarm behavior
41 | - Learn the fundamentals of Swarm
42 | - Understand agent communication patterns
43 | - See emergent behavior in action
44 |
45 | 2. `collaborative_task.py` - Multi-agent collaboration
46 | - Create a swarm of specialized agents
47 | - Implement decentralized task distribution
48 | - Handle complex problem-solving scenarios
49 |
50 | 3. `emergent_behavior.py` - Emergent intelligence
51 | - Observe collective intelligence
52 | - Study swarm dynamics
53 | - Analyze group decision making
54 |
55 | ## 💡 Key Features
56 |
57 | - **Decentralized System**: Create autonomous agents that work together
58 | - **Emergent Behavior**: Watch collective intelligence emerge from simple rules
59 | - **Scalable Architecture**: Easy to add or remove agents dynamically
60 | - **Flexible Configuration**: Customize agent behaviors and interactions
61 | - **Fault Tolerance**: Built-in resilience through decentralization
62 |
63 | ## 🔧 Customization
64 |
65 | 1. Configure agent behavior:
66 | ```python
67 | from swarm import Agent, Behavior
68 |
69 | class CustomBehavior(Behavior):
70 | def act(self, context):
71 | # Define agent behavior here
72 | pass
73 |
74 | agent = Agent(behavior=CustomBehavior())
75 | ```
76 |
77 | 2. Adjust swarm parameters:
78 | ```python
79 | from swarm import Swarm
80 |
81 | swarm = Swarm(
82 | num_agents=10,
83 | communication_range=2,
84 | decision_threshold=0.7
85 | )
86 | ```
87 |
88 | 3. Define interaction rules:
89 | ```python
90 | def interaction_rule(agent1, agent2):
91 | # Define how agents interact
92 | pass
93 |
94 | swarm.set_interaction_rule(interaction_rule)
95 | ```
96 |
97 | ## ⚠️ Important Notes
98 |
99 | 1. Ensure `env.py` is properly configured before running examples
100 | 2. API keys and sensitive information are added to `.gitignore`
101 | 3. Some examples may require specific model capabilities
102 | 4. Monitor resource usage when running large swarms
103 |
104 | ## 🤝 Next Steps
105 |
106 | - Start with `helloworld.py` to understand basic concepts
107 | - Experiment with different swarm sizes and behaviors
108 | - Try creating custom agent behaviors
109 | - Explore emergent patterns in larger swarms
110 |
111 | ## 📚 Additional Resources
112 |
113 | - [Swarm GitHub Repository](https://github.com/openai/swarm)
114 | - [Community Examples](https://github.com/openai/swarm/tree/main/examples)
115 | - [OpenAI API Documentation](https://platform.openai.com/docs/quickstart?language=python)
116 |
117 | ## 🛠️ Troubleshooting
118 |
119 | 1. Installation Issues:
120 | - Check Python version (3.10+ required)
121 | - Verify all dependencies are installed
122 | - Try creating a fresh virtual environment (conda recommended)
123 |
124 | 2. API Errors:
125 | - Verify API key in `env.py`
126 | - Ensure sufficient API credits
127 |
128 | 3. Performance Issues:
129 | - Reduce swarm size for testing
130 | - Monitor memory usage
131 | - Check network connectivity
132 |
--------------------------------------------------------------------------------