├── assets ├── 1_CrewAI_Input.jpg ├── 3_Agent_Response.jpg ├── AIAgentwithObservability.png ├── 2_CrewAI_LLM_Call_Response.jpg ├── langfuse_observability_trace.jpg └── agent_output_example.txt ├── aiagent ├── config │ ├── __init__.py │ └── settings.py ├── core │ ├── __init__.py │ ├── agent.py │ └── chatbot.py └── __init__.py ├── scripts ├── run_agent.py └── run_chatbot.py ├── pyproject.toml ├── main.py ├── .gitignore ├── langgraph_chatbot.py └── README.md /assets/1_CrewAI_Input.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/aiagent-with-observability/main/assets/1_CrewAI_Input.jpg -------------------------------------------------------------------------------- /assets/3_Agent_Response.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/aiagent-with-observability/main/assets/3_Agent_Response.jpg -------------------------------------------------------------------------------- /aiagent/config/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration module for AI Agent. 3 | """ 4 | 5 | from .settings import settings 6 | 7 | __all__ = ["settings"] -------------------------------------------------------------------------------- /assets/AIAgentwithObservability.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/aiagent-with-observability/main/assets/AIAgentwithObservability.png -------------------------------------------------------------------------------- /assets/2_CrewAI_LLM_Call_Response.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/aiagent-with-observability/main/assets/2_CrewAI_LLM_Call_Response.jpg -------------------------------------------------------------------------------- /assets/langfuse_observability_trace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/aiagent-with-observability/main/assets/langfuse_observability_trace.jpg -------------------------------------------------------------------------------- /aiagent/core/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core AI agent implementations. 3 | """ 4 | 5 | from .chatbot import create_chatbot_graph 6 | from .agent import create_crew_agent 7 | 8 | __all__ = ["create_chatbot_graph", "create_crew_agent"] -------------------------------------------------------------------------------- /aiagent/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | AI Agent with Observability Package 3 | 4 | This package provides AI agent implementations with comprehensive observability 5 | using LangGraph, CrewAI, OpenRouter, and Langfuse. 6 | """ 7 | 8 | __version__ = "0.1.0" 9 | __author__ = "AI Agent Team" 10 | __description__ = "AI agent implementations with observability" -------------------------------------------------------------------------------- /scripts/run_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Script to run the CrewAI agent. 4 | """ 5 | 6 | import logging 7 | from dotenv import load_dotenv 8 | 9 | from aiagent.core.agent import run_crew_agent 10 | 11 | # Configure logging 12 | logging.basicConfig( 13 | level=logging.INFO, 14 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 15 | ) 16 | 17 | load_dotenv() 18 | 19 | if __name__ == "__main__": 20 | run_crew_agent() -------------------------------------------------------------------------------- /scripts/run_chatbot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Script to run the LangGraph chatbot. 4 | """ 5 | 6 | import logging 7 | from dotenv import load_dotenv 8 | 9 | from aiagent.core.chatbot import run_chatbot 10 | 11 | # Configure logging 12 | logging.basicConfig( 13 | level=logging.INFO, 14 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 15 | ) 16 | 17 | load_dotenv() 18 | 19 | if __name__ == "__main__": 20 | run_chatbot() -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "aiagent-with-observability" 3 | version = "0.1.0" 4 | description = "AI Agent with comprehensive observability using LangGraph, CrewAI, and Langfuse" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "crewai>=1.6.1", 9 | "langchain>=1.1.0", 10 | "langchain-community>=0.4.1", 11 | "langchain-openai>=1.1.0", 12 | "langfuse>=3.10.3", 13 | "langgraph>=1.0.4", 14 | "litellm>=1.80.7", 15 | "openinference-instrumentation-crewai>=0.1.16", 16 | "python-dotenv>=1.2.1", 17 | "pydantic>=2.0.0", 18 | "pydantic-settings>=2.0.0", 19 | ] 20 | 21 | [build-system] 22 | requires = ["setuptools>=61.0", "wheel"] 23 | build-backend = "setuptools.build_meta" 24 | 25 | [tool.setuptools.packages.find] 26 | where = ["."] 27 | include = ["aiagent*"] 28 | -------------------------------------------------------------------------------- /aiagent/config/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Application settings and configuration. 3 | """ 4 | 5 | from os import getenv 6 | from pydantic import BaseSettings, Field 7 | 8 | 9 | class Settings(BaseSettings): 10 | """Application settings loaded from environment variables.""" 11 | 12 | # Langfuse settings 13 | langfuse_secret_key: str = Field(..., env="LANGFUSE_SECRET_KEY") 14 | langfuse_public_key: str = Field(..., env="LANGFUSE_PUBLIC_KEY") 15 | langfuse_base_url: str = Field("http://localhost:3000", env="LANGFUSE_BASE_URL") 16 | 17 | # OpenRouter settings 18 | openrouter_api_key: str = Field(..., env="OPENROUTER_API_KEY") 19 | openrouter_base_url: str = "https://openrouter.ai/api/v1" 20 | openrouter_model: str = "z-ai/glm-4.5-air:free" 21 | 22 | # Optional site settings for OpenRouter 23 | site_url: str | None = Field(None, env="YOUR_SITE_URL") 24 | site_name: str | None = Field(None, env="YOUR_SITE_NAME") 25 | 26 | # LLM settings 27 | temperature: float = 0.2 28 | 29 | class Config: 30 | env_file = ".env" 31 | case_sensitive = False 32 | 33 | 34 | settings = Settings() -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from crewai import Agent, Task, Crew 3 | from langchain_openai import ChatOpenAI 4 | from openinference.instrumentation.crewai import CrewAIInstrumentor 5 | 6 | load_dotenv() 7 | 8 | # Instrument CrewAI for observability 9 | CrewAIInstrumentor().instrument() 10 | 11 | # Initialize LLM 12 | llm = ChatOpenAI( 13 | model="openrouter/z-ai/glm-4.5-air:free", 14 | temperature=0.2) 15 | 16 | def main(): 17 | print("AI Agent Assistant! Describe a task for the agent to perform. Type 'exit' or 'quit' to end.") 18 | while True: 19 | user_input = input("Task: ") 20 | if user_input.lower() in ['exit', 'quit']: 21 | print("Goodbye!") 22 | break 23 | 24 | # Create agent 25 | agent = Agent( 26 | role="AI Assistant", 27 | goal="Complete the user's task effectively", 28 | backstory="You are a helpful AI assistant capable of performing various tasks.", 29 | llm=llm 30 | ) 31 | 32 | # Create task based on user input 33 | task = Task( 34 | description=user_input, 35 | expected_output="A clear and helpful response to the task.", 36 | agent=agent 37 | ) 38 | 39 | # Create and run crew 40 | crew = Crew(agents=[agent], tasks=[task]) 41 | result = crew.kickoff() 42 | print(f"Agent Result: {result}") 43 | 44 | if __name__ == "__main__": 45 | main() 46 | -------------------------------------------------------------------------------- /assets/agent_output_example.txt: -------------------------------------------------------------------------------- 1 | ╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | LiteLLM completion() model= z-ai/glm-4.5-air:free; provider = openrouter 10 | 14:46:20 - LiteLLM:INFO: utils.py:1308 - Wrapper: Completed Call, calling success_handler 11 | 2025-12-04 14:46:20,021 - LiteLLM - INFO - Wrapper: Completed Call, calling success_handler 12 | Agent Result: Langfuse is like a special dashboard or notebook that helps developers keep track of their AI applications. When developers build applications that use AI (like chatbots, content generators, or recommendation systems), Langfuse helps them see what's happening "under the hood." 13 | 14 | Think of it like a flight recorder for airplanes - it records everything that happens when the AI is working: 15 | - What questions users are asking 16 | - What information the AI is looking at 17 | - What decisions the AI is making 18 | - What answers the AI is giving back 19 | 20 | This helps developers when something goes wrong. If an AI gives a strange or incorrect answer, developers can look at Langfuse to see exactly what happened and fix the problem. It's also useful for improving the AI over time by seeing what kinds of questions it handles well and where it struggles. 21 | 22 | Langfuse is particularly helpful for teams building complex AI systems because it provides a centralized place to monitor and understand how their AI is performing across many different situations. 23 | 2025-12-04 14:46:20,038 - aiagent.core.agent - INFO - Task completed successfully 24 | Task: ╭───────────────────────────────────────────────────────────── Trace Batch Finalization ─────────────────────────────────────────────────────────────╮ 25 | │ ✅ Trace batch finalized with session ID: a2ce2f20-70dd-4300-a6c1-c3fb97bf6413 │ 26 | │ │ 27 | │ 🔗 View here: https://app.crewai.com/crewai_plus/ephemeral_trace_batches/a2ce2f20-70dd-4300-a6c1-c3fb97bf6413?access_code=TRACE-9e144713e1 │ 28 | │ 🔑 Access Code: TRACE-9e144713e1 │ -------------------------------------------------------------------------------- /aiagent/core/agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | CrewAI-based agent implementation with observability. 3 | """ 4 | 5 | import logging 6 | from crewai import Agent, Task, Crew 7 | from langchain_openai import ChatOpenAI 8 | from openinference.instrumentation.crewai import CrewAIInstrumentor 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def create_crew_llm(): 14 | """Create and configure the ChatOpenAI LLM for CrewAI.""" 15 | return ChatOpenAI( 16 | model="openrouter/z-ai/glm-4.5-air:free", 17 | temperature=0.2 18 | ) 19 | 20 | 21 | def create_crew_agent(task_description: str): 22 | """ 23 | Create and run a CrewAI agent for a specific task. 24 | 25 | Args: 26 | task_description: Description of the task to perform. 27 | 28 | Returns: 29 | Result of the agent execution. 30 | """ 31 | llm = create_crew_llm() 32 | 33 | agent = Agent( 34 | role="AI Assistant", 35 | goal="Complete the user's task effectively", 36 | backstory="You are a helpful AI assistant capable of performing various tasks.", 37 | llm=llm 38 | ) 39 | 40 | task = Task( 41 | description=task_description, 42 | expected_output="A clear and helpful response to the task.", 43 | agent=agent 44 | ) 45 | 46 | crew = Crew(agents=[agent], tasks=[task]) 47 | return crew.kickoff() 48 | 49 | 50 | def run_crew_agent(): 51 | """Run the interactive CrewAI agent.""" 52 | # Instrument CrewAI for observability 53 | CrewAIInstrumentor().instrument() 54 | 55 | logger.info("Starting CrewAI agent") 56 | print("AI Agent Assistant! Describe a task for the agent to perform. Type 'exit' or 'quit' to end.") 57 | 58 | while True: 59 | try: 60 | user_input = input("Task: ").strip() 61 | if not user_input: 62 | continue 63 | 64 | if user_input.lower() in ['exit', 'quit']: 65 | logger.info("User ended the agent session") 66 | print("Goodbye!") 67 | break 68 | 69 | logger.info(f"Processing task: {user_input[:50]}...") 70 | result = create_crew_agent(user_input) 71 | print(f"Agent Result: {result}") 72 | logger.info("Task completed successfully") 73 | 74 | except KeyboardInterrupt: 75 | logger.info("Agent interrupted by user") 76 | print("\nGoodbye!") 77 | break 78 | except Exception as e: 79 | logger.error(f"Error during agent execution: {e}") 80 | print(f"An error occurred: {e}") 81 | print("Please try again.") -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scipy/ 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # IDE 132 | .vscode/ 133 | .idea/ 134 | 135 | # OS 136 | .DS_Store 137 | Thumbs.db 138 | 139 | # Langfuse 140 | langfuse/ 141 | -------------------------------------------------------------------------------- /aiagent/core/chatbot.py: -------------------------------------------------------------------------------- 1 | """ 2 | LangGraph-based chatbot implementation with observability. 3 | """ 4 | 5 | import logging 6 | from typing import Annotated 7 | from os import getenv 8 | 9 | from langchain_core.messages import HumanMessage, AIMessage 10 | from langchain_openai import ChatOpenAI 11 | from langfuse.langchain import CallbackHandler 12 | from langgraph.graph import StateGraph 13 | from langgraph.graph.message import add_messages 14 | from typing_extensions import TypedDict 15 | 16 | # Configure logging 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class State(TypedDict): 21 | """ 22 | State definition for the LangGraph chatbot. 23 | 24 | The 'messages' key holds the conversation history, with the add_messages 25 | function ensuring new messages are appended rather than overwritten. 26 | """ 27 | messages: Annotated[list, add_messages] 28 | 29 | 30 | def create_chatbot_llm(): 31 | """Create and configure the ChatOpenAI LLM for the chatbot.""" 32 | headers = {} 33 | if getenv("YOUR_SITE_URL"): 34 | headers["HTTP-Referer"] = getenv("YOUR_SITE_URL") 35 | if getenv("YOUR_SITE_NAME"): 36 | headers["X-Title"] = getenv("YOUR_SITE_NAME") 37 | 38 | return ChatOpenAI( 39 | api_key=getenv("OPENROUTER_API_KEY"), 40 | base_url="https://openrouter.ai/api/v1", 41 | model="z-ai/glm-4.5-air:free", 42 | temperature=0.2, 43 | default_headers=headers if headers else None 44 | ) 45 | 46 | 47 | def create_chatbot_graph(): 48 | """ 49 | Create and configure the LangGraph chatbot. 50 | 51 | Returns: 52 | Compiled StateGraph ready for execution. 53 | """ 54 | llm = create_chatbot_llm() 55 | 56 | def chatbot(state: State): 57 | """ 58 | Chatbot node that processes the current state and generates a response. 59 | 60 | Args: 61 | state: Current conversation state containing messages. 62 | 63 | Returns: 64 | Updated state with the AI's response added to messages. 65 | """ 66 | return {"messages": [llm.invoke(state["messages"])]} 67 | 68 | graph_builder = StateGraph(State) 69 | graph_builder.add_node("chatbot", chatbot) 70 | graph_builder.set_entry_point("chatbot") 71 | graph_builder.set_finish_point("chatbot") 72 | 73 | return graph_builder.compile() 74 | 75 | 76 | def run_chatbot(): 77 | """Run the interactive chatbot.""" 78 | graph = create_chatbot_graph() 79 | langfuse_handler = CallbackHandler() 80 | 81 | messages = [] 82 | logger.info("Starting AI agent chatbot") 83 | print("Chat with the AI agent! Type 'exit' or 'quit' to end.") 84 | 85 | while True: 86 | try: 87 | user_input = input("You: ").strip() 88 | if not user_input: 89 | continue 90 | 91 | if user_input.lower() in ['exit', 'quit']: 92 | logger.info("User ended the chat session") 93 | print("Goodbye!") 94 | break 95 | 96 | messages.append(HumanMessage(content=user_input)) 97 | logger.info(f"Processing user message: {user_input[:50]}...") 98 | 99 | # Stream the response 100 | response_received = False 101 | for chunk in graph.stream({"messages": messages}, config={"callbacks": [langfuse_handler]}): 102 | if 'chatbot' in chunk: 103 | ai_message = chunk['chatbot']['messages'][-1] 104 | print(f"AI: {ai_message.content}") 105 | messages.append(ai_message) 106 | response_received = True 107 | logger.info("AI response generated successfully") 108 | break 109 | 110 | if not response_received: 111 | logger.warning("No response received from AI") 112 | print("AI: Sorry, I couldn't generate a response.") 113 | 114 | except KeyboardInterrupt: 115 | logger.info("Chat interrupted by user") 116 | print("\nGoodbye!") 117 | break 118 | except Exception as e: 119 | logger.error(f"Error during chat: {e}") 120 | print(f"An error occurred: {e}") 121 | print("Please try again.") -------------------------------------------------------------------------------- /langgraph_chatbot.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dotenv import load_dotenv 3 | from typing import Annotated 4 | from os import getenv 5 | 6 | from langchain_openai import ChatOpenAI 7 | from langchain_core.messages import HumanMessage 8 | from typing_extensions import TypedDict 9 | 10 | from langgraph.graph import StateGraph 11 | from langgraph.graph.message import add_messages 12 | from langfuse.langchain import CallbackHandler 13 | 14 | # Configure logging 15 | logging.basicConfig( 16 | level=logging.INFO, 17 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 18 | ) 19 | logger = logging.getLogger(__name__) 20 | 21 | load_dotenv() 22 | 23 | # Initialize Langfuse CallbackHandler for Langchain (tracing) 24 | langfuse_handler = CallbackHandler() 25 | 26 | class State(TypedDict): 27 | # Messages have the type "list". The `add_messages` function in the annotation defines how this state key should be updated 28 | # (in this case, it appends messages to the list, rather than overwriting them) 29 | messages: Annotated[list, add_messages] 30 | 31 | graph_builder = StateGraph(State) 32 | 33 | headers = {} 34 | if getenv("YOUR_SITE_URL"): 35 | headers["HTTP-Referer"] = getenv("YOUR_SITE_URL") 36 | if getenv("YOUR_SITE_NAME"): 37 | headers["X-Title"] = getenv("YOUR_SITE_NAME") 38 | 39 | llm = ChatOpenAI( 40 | api_key=getenv("OPENROUTER_API_KEY"), 41 | base_url="https://openrouter.ai/api/v1", 42 | model="z-ai/glm-4.5-air:free", 43 | temperature=0.2, 44 | default_headers=headers if headers else None 45 | ) 46 | 47 | # The chatbot node function takes the current State as input and returns an updated messages list. This is the basic pattern for all LangGraph node functions. 48 | def chatbot(state: State): 49 | return {"messages": [llm.invoke(state["messages"])]} 50 | 51 | # Add a "chatbot" node. Nodes represent units of work. They are typically regular python functions. 52 | graph_builder.add_node("chatbot", chatbot) 53 | 54 | # Add an entry point. This tells our graph where to start its work each time we run it. 55 | graph_builder.set_entry_point("chatbot") 56 | 57 | # Set a finish point. This instructs the graph "any time this node is run, you can exit." 58 | graph_builder.set_finish_point("chatbot") 59 | 60 | # To be able to run our graph, call "compile()" on the graph builder. This creates a "CompiledGraph" we can use invoke on our state. 61 | graph = graph_builder.compile() 62 | 63 | def main(): 64 | """Main function for the interactive chatbot.""" 65 | messages = [] 66 | logger.info("Starting AI agent chatbot") 67 | print("Chat with the AI agent! Type 'exit' or 'quit' to end.") 68 | 69 | while True: 70 | try: 71 | user_input = input("You: ").strip() 72 | if not user_input: 73 | continue 74 | 75 | if user_input.lower() in ['exit', 'quit']: 76 | logger.info("User ended the chat session") 77 | print("Goodbye!") 78 | break 79 | 80 | messages.append(HumanMessage(content=user_input)) 81 | logger.info(f"Processing user message: {user_input[:50]}...") 82 | 83 | # Stream the response 84 | response_received = False 85 | for chunk in graph.stream({"messages": messages}, config={"callbacks": [langfuse_handler]}): 86 | if 'chatbot' in chunk: 87 | ai_message = chunk['chatbot']['messages'][-1] 88 | print(f"AI: {ai_message.content}") 89 | messages.append(ai_message) 90 | response_received = True 91 | logger.info("AI response generated successfully") 92 | break # Assuming single response per turn 93 | 94 | if not response_received: 95 | logger.warning("No response received from AI") 96 | print("AI: Sorry, I couldn't generate a response.") 97 | 98 | except KeyboardInterrupt: 99 | logger.info("Chat interrupted by user") 100 | print("\nGoodbye!") 101 | break 102 | except Exception as e: 103 | logger.error(f"Error during chat: {e}") 104 | print(f"An error occurred: {e}") 105 | print("Please try again.") 106 | 107 | if __name__ == "__main__": 108 | main() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Agent with Observability 2 | 3 | [](https://python.org) [](https://opensource.org/licenses/MIT) [](https://langchain-ai.github.io/langgraph/) [](https://python.langchain.com/) [](https://www.crewai.com/) 4 | 5 | --- 6 | 7 |  8 | 9 | A comprehensive AI agent implementation using **LangGraph, CrewAI, OpenRouter, and Langfuse** for intelligent conversations and observability. 10 | 11 | [Quick Start](#setup) • [Features](#features) • [Installation](#installation) • [Usage](#usage) • [Documentation](#architecture) 12 | 13 | 14 | 15 | ## Features 16 | 17 | - **LangGraph Chatbot**: Interactive conversational AI using LangGraph state management 18 | - **CrewAI Agent**: Task-oriented AI agent for complex operations 19 | - **OpenRouter Integration**: Access to multiple LLM providers through OpenRouter API 20 | - **Langfuse Observability**: Full tracing and monitoring of AI interactions 21 | - **Local Langfuse**: Self-hosted observability platform 22 | 23 | ### Key Capabilities 24 | 25 | - Intelligent conversational AI with context awareness 26 | - Persistent conversation states using LangGraph 27 | - Multi-agent collaboration systems 28 | - End-to-end observability and monitoring 29 | - Easy integration with Docker and modern Python tooling 30 | - High-performance AI processing 31 | 32 | ## Setup 33 | 34 | ### Prerequisites 35 | 36 | - Python 3.12 or higher 37 | - uv package manager 38 | - Docker (for Langfuse) 39 | 40 | ### Installation 41 | 42 | 1. Clone the repository: 43 | 44 | ```bash 45 | git clone https://github.com/ashishpatel26/aiagent-with-observability.git 46 | cd aiagent-with-observability 47 | ``` 48 | 2. Install dependencies: 49 | 50 | ```bash 51 | uv sync 52 | ``` 53 | 3. Set up environment variables: 54 | 55 | ```bash 56 | cp .env.example .env 57 | # Edit .env with your API keys 58 | ``` 59 | 4. Install and start Langfuse locally: 60 | 61 | **Option 1: Using the included Langfuse setup** 62 | 63 | ```bash 64 | cd langfuse 65 | docker-compose up -d 66 | ``` 67 | 68 | **Option 2: Full installation from scratch** 69 | 70 | a. Install Docker and Docker Compose on your system. 71 | 72 | b. Clone the Langfuse repository: 73 | 74 | ```bash 75 | git clone https://github.com/langfuse/langfuse.git 76 | cd langfuse 77 | ``` 78 | 79 | c. Create environment file: 80 | 81 | ```bash 82 | cp .env.example .env 83 | # Edit .env with your configuration (database URLs, etc.) 84 | ``` 85 | 86 | d. Start the services: 87 | 88 | ```bash 89 | docker-compose up -d 90 | ``` 91 | 92 | e. Wait for services to be ready (check logs with `docker-compose logs`). 93 | 94 | f. Access Langfuse at http://localhost:3000 95 | 96 | g. Create an account and note down the API keys from the dashboard. 97 | 98 | ### Installation Sequence 99 | 100 | ```mermaid 101 | sequenceDiagram 102 | participant User 103 | participant Git 104 | participant uv 105 | participant Docker 106 | participant Langfuse 107 | 108 | User->>Git: git clone https://github.com/ashishpatel26/aiagent-with-observability.git 109 | User->>uv: uv sync 110 | User->>User: cp .env.example .env 111 | User->>User: Edit .env with API keys 112 | User->>Docker: docker-compose up -d (in langfuse/) 113 | Docker->>Langfuse: Start Langfuse services 114 | Langfuse->>User: Langfuse ready at localhost:3000 115 | User->>Langfuse: Create account & get API keys 116 | User->>User: Update .env with Langfuse keys 117 | ``` 118 | 119 | **Expected Output:** 120 | 121 | - `uv sync` should complete without errors, installing all dependencies 122 | - Docker containers should start successfully 123 | - Langfuse dashboard accessible at http://localhost:3000 124 | 125 | ### 🔑 Langfuse API Key Setup 126 | 127 | After starting Langfuse, you need to generate API keys for observability integration: 128 | 129 | 1. **Access Langfuse Dashboard** 130 | ```bash 131 | # Open your browser and go to: 132 | http://localhost:3000 133 | ``` 134 | 135 | 2. **Create an Account** 136 | - Click "Sign Up" or "Create Account" 137 | - Fill in your details (email, password) 138 | - Verify your email if required 139 | 140 | 3. **Generate API Keys** 141 | - Go to **Settings** → **API Keys** (or **Project Settings**) 142 | - Click **"Create new API key"** or **"Generate Keys"** 143 | - You'll see two keys: 144 | - **Public Key** (starts with `pk-`) 145 | - **Secret Key** (starts with `sk-`) 146 | 147 | 4. **Configure Environment Variables** 148 | ```bash 149 | # Edit your .env file with the generated keys: 150 | LANGFUSE_PUBLIC_KEY=pk-your-public-key-here 151 | LANGFUSE_SECRET_KEY=sk-your-secret-key-here 152 | LANGFUSE_BASE_URL=http://localhost:3000 153 | ``` 154 | 155 | 5. **Verify Configuration** 156 | ```bash 157 | # Test that keys work by running the agent: 158 | uv run scripts/run_agent.py 159 | ``` 160 | 161 | **🔒 Security Notes:** 162 | - Keep your `SECRET_KEY` secure and never commit it to version control 163 | - The `PUBLIC_KEY` is safe to share in client-side code 164 | - Use different keys for development/production environments 165 | 166 | ## Usage 167 | 168 | ### Interactive Chatbot 169 | 170 | Run the LangGraph-based chatbot: 171 | 172 | ```bash 173 | uv run scripts/run_chatbot.py 174 | ``` 175 | 176 | **Expected Output:** 177 | 178 | ```bash 179 | 2024-01-01 12:00:00,000 - aiagent.core.chatbot - INFO - Starting AI agent chatbot 180 | Chat with the AI agent! Type 'exit' or 'quit' to end. 181 | You: Hello, how are you? 182 | 2024-01-01 12:00:01,000 - aiagent.core.chatbot - INFO - Processing user message: Hello, how are you?... 183 | AI: Hello! I'm doing well, thank you for asking. How can I help you today? 184 | You: exit 185 | 2024-01-01 12:00:02,000 - aiagent.core.chatbot - INFO - User ended the chat session 186 | Goodbye! 187 | ``` 188 | 189 | This starts an interactive chat session where you can converse with the AI agent. All interactions are traced in Langfuse. 190 | 191 | ### Task-Oriented Agent 192 | 193 | Run the CrewAI-based agent: 194 | 195 | ```bash 196 | uv run scripts/run_agent.py 197 | ``` 198 | 199 | **Expected Output:** 200 | 201 | ```bash 202 | LiteLLM completion() model= z-ai/glm-4.5-air:free; provider = openrouter 203 | 14:46:20 - LiteLLM:INFO: utils.py:1308 - Wrapper: Completed Call, calling success_handler 204 | 2025-12-04 14:46:20,021 - LiteLLM - INFO - Wrapper: Completed Call, calling success_handler 205 | Agent Result: Langfuse is like a special dashboard or notebook that helps developers keep track of their AI applications. When developers build applications that use AI (like chatbots, content generators, or recommendation systems), Langfuse helps them see what's happening "under the hood." 206 | 207 | Think of it like a flight recorder for airplanes - it records everything that happens when the AI is working: 208 | - What questions users are asking 209 | - What information the AI is looking at 210 | - What decisions the AI is making 211 | - What answers the AI is giving back 212 | 213 | This helps developers when something goes wrong. If an AI gives a strange or incorrect answer, developers can look at Langfuse to see exactly what happened and fix the problem. It's also useful for improving the AI over time by seeing what kinds of questions it handles well and where it struggles. 214 | 215 | Langfuse is particularly helpful for teams building complex AI systems because it provides a centralized place to monitor and understand how their AI is performing across many different situations. 216 | 2025-12-04 14:46:20,038 - aiagent.core.agent - INFO - Task completed successfully 217 | Task: ╭───────────────────────────────────────────────────────────── Trace Batch Finalization ─────────────────────────────────────────────────────────────╮ 218 | │ ✅ Trace batch finalized with session ID: a2ce2f20-70dd-4300-a6c1-c3fb97bf6413 │ 219 | │ │ 220 | │ 🔗 View here: https://app.crewai.com/crewai_plus/ephemeral_trace_batches/a2ce2f20-70dd-4300-a6c1-c3fb97bf6413?access_code=TRACE-9e144713e1 │ 221 | │ 🔑 Access Code: TRACE-9e144713e1 │ 222 | ``` 223 | 224 | *See [assets/agent_output_example.txt](assets/agent_output_example.txt) for the complete output.* 225 | 226 | This executes a predefined task using the CrewAI framework with full observability enabled. The agent automatically synchronizes with CrewAI Plus for trace batch management, providing detailed execution insights and performance monitoring. 227 | 228 | ## 📸 Screenshots & Observability Demos 229 | 230 | ### 🔍 Langfuse Observability Dashboard 231 | 232 |
234 | Langfuse dashboard showing comprehensive AI agent observability traces
235 |
243 | CrewAI agent receiving and processing input tasks
244 | 245 | #### 2. LLM Call & Response 246 |
247 | CrewAI orchestrating LLM calls and processing responses
248 | 249 | #### 3. Final Agent Response 250 |
251 | Complete agent execution with final results and trace finalization
252 | 253 |