├── 05-agents ├── weather.txt ├── 01-date-time.py ├── 02-weather.py ├── 03-weather-cot.py ├── 04-weather-automate.py ├── 05-weather-agent.py └── 06-cursor-agent.py ├── 06-rags ├── nodejs.pdf ├── docker-compose.yml ├── indexing.py └── chat.py ├── 07-advance-rags ├── nodejs.pdf ├── docker-compose.yml ├── queue │ ├── connection.py │ └── worker.py ├── main.py ├── server.py └── indexing.py ├── .gitignore ├── 13-memory ├── docker-compose.yml └── main.py ├── worker.sh ├── 09-checkpointing ├── docker-compose.yaml ├── 01-chat-langgraph.py └── 02-langgraph-checkpoint.py ├── 12-human-in-loop ├── docker-compose.yml ├── 01-chat-checkpointing.py └── 02-support-assistant.py ├── 01-tokenization └── main.py ├── .devcontainer ├── Dockerfile ├── docker-compose.yaml └── devcontainer.json ├── mem0-js ├── docker-compose.yml ├── package.json └── index.js ├── 02-vector-embeddings └── main.py ├── 08-langgraph ├── 01-graph.py ├── 02-graph-llm.py └── 03-code-graph-router.py ├── README.md ├── 11-langgraph-tools ├── 01-langgraph-chat.py ├── 02-tools-basic.py ├── 03-tools-usage.py ├── 04-more-tools.py └── 05-todo-tools.py ├── 04-prompting ├── 02-few-shot.py ├── 01-zero-shot.py ├── 03-chain-of-thought.py └── 04-chain-of-thought.py ├── 03-hello-world └── chat.py ├── requirements.txt └── 10-streaming └── 01-code-judge.py /05-agents/weather.txt: -------------------------------------------------------------------------------- 1 | The weather in Hyderabad is Partly cloudy +34°C. 2 | -------------------------------------------------------------------------------- /06-rags/nodejs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anirudhuuu/GenAI/HEAD/06-rags/nodejs.pdf -------------------------------------------------------------------------------- /07-advance-rags/nodejs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anirudhuuu/GenAI/HEAD/07-advance-rags/nodejs.pdf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | venv/ 3 | .DS_Store 4 | __pycache__/ 5 | .vscode/ 6 | iris-elements-rag/ 7 | 8 | node_modules/ 9 | -------------------------------------------------------------------------------- /06-rags/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | vector-db: 3 | image: qdrant/qdrant 4 | ports: 5 | - 6333:6333 6 | -------------------------------------------------------------------------------- /07-advance-rags/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey: 3 | image: valkey/valkey 4 | ports: 5 | - 6379:6379 6 | -------------------------------------------------------------------------------- /13-memory/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | vector-db: 3 | image: qdrant/qdrant 4 | ports: 5 | - 6333:6333 6 | -------------------------------------------------------------------------------- /worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export $(grep -v '^#' .env | xargs -d '\n') && rq worker --with-scheduler --url redis://valkey:6379 4 | -------------------------------------------------------------------------------- /07-advance-rags/queue/connection.py: -------------------------------------------------------------------------------- 1 | from redis import Redis 2 | from rq import Queue 3 | 4 | queue = Queue(connection=Redis(host="valkey")) 5 | -------------------------------------------------------------------------------- /07-advance-rags/main.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | from .server import app 3 | 4 | 5 | def main(): 6 | uvicorn.run(app, port=8000, host="0.0.0.0") 7 | 8 | 9 | if __name__ == "__main__": 10 | main() 11 | -------------------------------------------------------------------------------- /09-checkpointing/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | mongodb: 3 | image: mongo 4 | ports: 5 | - 27017:27017 6 | environment: 7 | MONGO_INITDB_ROOT_USERNAME: admin 8 | MONGO_INITDB_ROOT_PASSWORD: admin 9 | volumes: 10 | - mongodb_data:/data/db 11 | 12 | volumes: 13 | mongodb_data: 14 | -------------------------------------------------------------------------------- /12-human-in-loop/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | mongodb: 3 | image: mongo 4 | ports: 5 | - 27017:27017 6 | environment: 7 | MONGO_INITDB_ROOT_USERNAME: admin 8 | MONGO_INITDB_ROOT_PASSWORD: admin 9 | volumes: 10 | - mongodb_data:/data/db 11 | 12 | volumes: 13 | mongodb_data: 14 | -------------------------------------------------------------------------------- /01-tokenization/main.py: -------------------------------------------------------------------------------- 1 | import tiktoken 2 | 3 | encoder = tiktoken.encoding_for_model("gpt-4o") 4 | 5 | text = "Hello, I am Anirudh" 6 | tokens = encoder.encode(text) 7 | 8 | print("Token: ", tokens) 9 | 10 | tokens = [13225, 11, 357, 939, 1689, 380, 115904] 11 | decoded = encoder.decode(tokens) 12 | 13 | print("Decoded Text: ", decoded) 14 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/python:3.12 2 | 3 | ENV PYTHONUNBUFFERED=1 4 | 5 | RUN sudo apt-get update && \ 6 | sudo apt-get install -y curl 7 | 8 | RUN pip install --upgrade pip 9 | 10 | # Install Poetry 11 | RUN curl -sSL https://install.python-poetry.org | python3 - 12 | 13 | # Add Poetry to PATH 14 | ENV PATH=/root/.local/bin:$PATH 15 | -------------------------------------------------------------------------------- /.devcontainer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | app: 3 | build: 4 | context: .. 5 | dockerfile: .devcontainer/Dockerfile 6 | 7 | volumes: 8 | - ../..:/workspaces:cached 9 | 10 | command: sleep infinity 11 | 12 | env_file: 13 | - ../.env 14 | 15 | valkey: 16 | image: valkey/valkey 17 | 18 | vector-db: 19 | image: qdrant/qdrant 20 | -------------------------------------------------------------------------------- /mem0-js/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | qdrant: 3 | image: qdrant/qdrant:v1.13.0 4 | container_name: qdrant-mem0 5 | ports: 6 | - "6333:6333" 7 | - "6334:6334" 8 | volumes: 9 | - qdrant_storage:/qdrant/storage 10 | environment: 11 | - QDRANT__SERVICE__HTTP_PORT=6333 12 | - QDRANT__SERVICE__GRPC_PORT=6334 13 | restart: unless-stopped 14 | 15 | volumes: 16 | qdrant_storage: 17 | -------------------------------------------------------------------------------- /02-vector-embeddings/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | 4 | api_key = os.getenv("GEMINI_API_KEY") 5 | 6 | client = OpenAI( 7 | api_key=api_key, 8 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 9 | ) 10 | 11 | text = "dog chases cat" 12 | 13 | response = client.embeddings.create( 14 | model="text-embedding-004", 15 | input=text 16 | ) 17 | 18 | print("Vector Embeddings", response) 19 | 20 | # Indicator for no. of dimensions for vector embedding 21 | print("Length", len(response.data[0].embedding)) 22 | -------------------------------------------------------------------------------- /mem0-js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mem0-js-demo", 3 | "version": "1.0.0", 4 | "description": "Mem0 AI JavaScript demo with Qdrant vector store", 5 | "main": "index.js", 6 | "type": "module", 7 | "scripts": { 8 | "start": "node index.js", 9 | "dev": "node --watch index.js" 10 | }, 11 | "dependencies": { 12 | "axios": ">=1.12.0", 13 | "dotenv": "^17.2.1", 14 | "mem0ai": "^2.1.37", 15 | "openai": "^5.16.0", 16 | "tar-fs": ">=2.1.4", 17 | "undici": ">=5.29.0" 18 | }, 19 | "keywords": [ 20 | "mem0", 21 | "ai", 22 | "memory", 23 | "qdrant", 24 | "vector-store" 25 | ], 26 | "author": "", 27 | "license": "MIT" 28 | } 29 | -------------------------------------------------------------------------------- /05-agents/01-date-time.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | from datetime import datetime 4 | 5 | api_key = os.getenv("GEMINI_API_KEY") 6 | 7 | client = OpenAI( 8 | api_key=api_key, 9 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 10 | ) 11 | 12 | SYSTEM_PROMPT = f""" 13 | You are a helpful AI Assistant 14 | 15 | Today is {datetime.now().strftime("%A, %B %d, %Y")} and the time is {datetime.now().strftime("%I:%M %p")} IST 16 | """ 17 | 18 | response = client.chat.completions.create( 19 | model="gemini-2.0-flash", 20 | messages=[ 21 | {"role": "system", "content": SYSTEM_PROMPT}, 22 | {"role": "user", "content": "What is the date and time today?"} 23 | ] 24 | ) 25 | 26 | print(response.choices[0].message.content) 27 | -------------------------------------------------------------------------------- /07-advance-rags/server.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Query, Path 2 | from .queue.connection import queue 3 | from .queue.worker import process_query 4 | 5 | app = FastAPI() 6 | 7 | 8 | @app.get("/") 9 | def health(): 10 | return {"status": "Server is up and running"} 11 | 12 | 13 | @app.post("/chat") 14 | def chat(query: str = Query(..., description="Chat message...")): 15 | # Take the query & push the query to queue 16 | # Internally calls as process_query(query) 17 | job = queue.enqueue(process_query, query) 18 | 19 | # Give a response to user about job received 20 | return {"status": "Queued", "job_id": job.id} 21 | 22 | 23 | @app.get("/result/{job_id}") 24 | def get_result( 25 | job_id: str = Path(..., description="Job ID...") 26 | ): 27 | job = queue.fetch_job(job_id=job_id) 28 | 29 | result = job.return_value() 30 | 31 | return {"result": result} 32 | -------------------------------------------------------------------------------- /mem0-js/index.js: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | 3 | import { Memory } from "mem0ai/oss"; 4 | import { OpenAI } from "openai"; 5 | 6 | const client = new OpenAI(); 7 | 8 | const mem = new Memory({ 9 | version: "v1.1", 10 | vectorStore: { 11 | provider: "qdrant", 12 | config: { 13 | collectionName: "memories", 14 | embeddingModelDims: 1536, 15 | host: "localhost", 16 | port: 6333, 17 | }, 18 | }, 19 | }); 20 | 21 | mem.add([{ role: "user", content: "My name is Anirudh" }], { 22 | userId: "anirudh", 23 | }); 24 | 25 | async function main(query = "") { 26 | const response = await client.chat.completions.create({ 27 | model: "gpt-4.1-mini", 28 | messages: [{ role: "user", content: query }], 29 | }); 30 | 31 | console.log("Bot:", response.choices[0].message.content); 32 | } 33 | 34 | main("Hey Agent,You know my name is Anirudh and i am from Lucknow"); 35 | -------------------------------------------------------------------------------- /05-agents/02-weather.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | from datetime import datetime 4 | 5 | api_key = os.getenv("GEMINI_API_KEY") 6 | 7 | client = OpenAI( 8 | api_key=api_key, 9 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 10 | ) 11 | 12 | 13 | def get_weather(city: str) -> str: 14 | # API Call to get the weather 15 | return "42°C" 16 | 17 | 18 | SYSTEM_PROMPT = f""" 19 | You are a helpful AI Assistant 20 | 21 | Today is {datetime.now().strftime("%A, %B %d, %Y")} and the time is {datetime.now().strftime("%I:%M %p")} IST 22 | Hyderabad's weather is 24°C 23 | """ 24 | 25 | response = client.chat.completions.create( 26 | model="gemini-2.0-flash", 27 | messages=[ 28 | {"role": "system", "content": SYSTEM_PROMPT}, 29 | {"role": "user", "content": "What is the weather in Hyderabad?"} 30 | ] 31 | ) 32 | 33 | print(response.choices[0].message.content) 34 | 35 | # The weather in Hyderabad is 24°C. 36 | # Not real-time data, it has taken from system prompt 37 | -------------------------------------------------------------------------------- /08-langgraph/01-graph.py: -------------------------------------------------------------------------------- 1 | from typing_extensions import TypedDict 2 | 3 | from langgraph.graph import StateGraph, START, END 4 | 5 | 6 | class State(TypedDict): 7 | query: str 8 | llm_result: str | None 9 | 10 | 11 | def chat_bot(state: State): 12 | # Get the query from the state 13 | # OpenAI LLM call 14 | # Update the state with the LLM result 15 | 16 | query = state['query'] 17 | result = "Hello, How can I assist you today?" 18 | state['llm_result'] = result 19 | 20 | return state 21 | 22 | 23 | graph_builder = StateGraph(State) 24 | 25 | graph_builder.add_node("chat_bot", chat_bot) 26 | 27 | graph_builder.add_edge(START, "chat_bot") 28 | graph_builder.add_edge("chat_bot", END) 29 | 30 | graph = graph_builder.compile() 31 | 32 | 33 | def main(): 34 | user = input("> ") 35 | 36 | # Invoke the graph 37 | _state = { 38 | "query": user, 39 | "llm_result": None 40 | } 41 | 42 | graph_result = graph.invoke(_state) 43 | 44 | print("graph_result: ", graph_result) 45 | 46 | 47 | main() 48 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Learning GenAI Cohort", 3 | "dockerComposeFile": "docker-compose.yaml", 4 | "service": "app", 5 | 6 | "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}", 7 | "remoteUser": "root", 8 | 9 | "customizations": { 10 | "vscode": { 11 | "extensions": [ 12 | "ms-python.python", 13 | "ms-python.black-formatter", 14 | "eamodio.gitlens", 15 | "njpwerner.autodocstring", 16 | "KevinRose.vsc-python-indent", 17 | "esbenp.prettier-vscode", 18 | "usernamehw.errorlens", 19 | "tamasfe.even-better-toml", 20 | "aaron-bond.better-comments", 21 | "ms-azuretools.vscode-docker", 22 | "ms-python.flake8", 23 | "ms-python.isort" 24 | ] 25 | }, 26 | "settings": { 27 | "python.pythonPath": "/usr/local/bin/python", 28 | "python.linting.enabled": true, 29 | "python.linting.pylintEnabled": true, 30 | "python.autoComplete.addBrackets": true, 31 | 32 | "editor.defaultFormatter": "ms-python.black-formatter", 33 | "editor.formatOnSave": true 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /07-advance-rags/indexing.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from langchain_community.document_loaders import PyPDFLoader 5 | from langchain_text_splitters import RecursiveCharacterTextSplitter 6 | from langchain_google_genai import GoogleGenerativeAIEmbeddings 7 | 8 | from langchain_qdrant import QdrantVectorStore 9 | 10 | api_key = os.getenv("GEMINI_API_KEY") 11 | 12 | pdf_path = Path(__file__).parent / "nodejs.pdf" 13 | 14 | # Loading 15 | loader = PyPDFLoader(file_path=pdf_path) 16 | docs = loader.load() # Read PDF file page-by-page 17 | 18 | # Chunking 19 | text_splitter = RecursiveCharacterTextSplitter( 20 | chunk_size=1000, 21 | chunk_overlap=400, 22 | ) 23 | 24 | split_docs = text_splitter.split_documents(documents=docs) 25 | 26 | # Vector Embeddings 27 | embedding_model = GoogleGenerativeAIEmbeddings( 28 | model="models/embedding-001", 29 | google_api_key=api_key 30 | ) 31 | 32 | # Using [embedding_model] create embeddings of [split_docs] and store in DB 33 | vector_store = QdrantVectorStore.from_documents( 34 | documents=split_docs, 35 | url="http://vector-db:6333", 36 | collection_name="learning_vectors", 37 | embedding=embedding_model, 38 | ) 39 | 40 | print("Indexing of Documents Done...") 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Generative AI 2 | 3 | ```bash 4 | # Create virtual environment 5 | python -m venv venv 6 | 7 | # Activate virtual environment 8 | source venv/bin/activate 9 | 10 | # Install dependencies 11 | pip install tiktoken 12 | 13 | # Freeze the environment 14 | pip freeze > requirements.txt 15 | 16 | # Install from requirements.txt 17 | pip install -r requirements.txt 18 | 19 | # Install OpenAI 20 | pip install openai 21 | 22 | # Install Python Environment 23 | pip install python-dotenv 24 | 25 | # Install Google Gemini 26 | pip install google-genai 27 | 28 | # Start Docker Compose 29 | docker compose up 30 | 31 | # Pull Ollama Image to Fast API Server 32 | python ollama_api.py 33 | 34 | # Install uvicorn 35 | pip install uvicorn 36 | 37 | # Start FastAPI Server 38 | uvicorn ollama_api:app --port 8000 39 | 40 | # Start docker compose of QDrant 41 | docker compose up -d 42 | 43 | # Install FastAPI 44 | pip install fastapi[standard] 45 | 46 | # Start FastAPI Server 47 | fastapi dev server.py 48 | 49 | # Install RQ 50 | pip install rq 51 | 52 | # Install uvicorn 53 | pip install uvicorn 54 | 55 | # Run the FastAPI server 56 | python -m 07-advance-rags.main 57 | 58 | # Start the RQ worker 59 | rq worker --with-scheduler --url redis://valkey:6379 60 | ``` 61 | -------------------------------------------------------------------------------- /11-langgraph-tools/01-langgraph-chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing_extensions import TypedDict 4 | from typing import Annotated 5 | 6 | from langgraph.graph.message import add_messages 7 | from langgraph.graph import StateGraph, START, END 8 | from langchain.chat_models import init_chat_model 9 | 10 | api_key = os.getenv("GEMINI_API_KEY") 11 | 12 | 13 | class State(TypedDict): 14 | messages: Annotated[list, add_messages] 15 | 16 | 17 | llm = init_chat_model( 18 | model_provider="google_genai", 19 | model="gemini-2.0-flash", 20 | api_key=api_key, 21 | ) 22 | 23 | 24 | def chatbot(state: State): 25 | message = llm.invoke(state["messages"]) 26 | return {"messages": [message]} 27 | 28 | 29 | graph_builder = StateGraph(State) 30 | 31 | graph_builder.add_node("chatbot", chatbot) 32 | 33 | graph_builder.add_edge(START, "chatbot") 34 | graph_builder.add_edge("chatbot", END) 35 | 36 | graph = graph_builder.compile() 37 | 38 | 39 | def main(): 40 | user_query = input("> ") 41 | 42 | state = State( 43 | messages=[ 44 | {"role": "user", "content": user_query} 45 | ] 46 | ) 47 | 48 | for event in graph.stream(state, stream_mode="values"): 49 | if "messages" in event: 50 | event["messages"][-1].pretty_print() 51 | 52 | 53 | main() 54 | -------------------------------------------------------------------------------- /09-checkpointing/01-chat-langgraph.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing_extensions import TypedDict 4 | from typing import Annotated 5 | 6 | from langgraph.graph.message import add_messages 7 | from langchain.chat_models import init_chat_model 8 | from langgraph.graph import StateGraph, START, END 9 | 10 | api_key = os.getenv("GEMINI_API_KEY") 11 | 12 | 13 | class State(TypedDict): 14 | messages: Annotated[list, add_messages] 15 | 16 | 17 | llm = init_chat_model( 18 | model_provider="google_genai", 19 | model="gemini-2.0-flash", 20 | api_key=api_key, 21 | ) 22 | 23 | 24 | def chat_node(state: State): 25 | response = llm.invoke(state["messages"]) 26 | 27 | return { 28 | "messages": [response] 29 | } 30 | 31 | 32 | graph_builder = StateGraph(State) 33 | 34 | graph_builder.add_node("chat_node", chat_node) 35 | graph_builder.add_edge(START, "chat_node") 36 | graph_builder.add_edge("chat_node", END) 37 | 38 | graph = graph_builder.compile() 39 | 40 | 41 | def main(): 42 | query = input("> ") 43 | 44 | _state = { 45 | "messages": [ 46 | {"role": "user", "content": query} 47 | ] 48 | } 49 | 50 | # This creates a fresh new state for each invocation 51 | result = graph.invoke(_state) 52 | # And the state is deleted after the invocation 53 | 54 | print("result:", result) 55 | 56 | 57 | main() 58 | -------------------------------------------------------------------------------- /06-rags/indexing.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from langchain_community.document_loaders import PyPDFLoader 5 | from langchain_text_splitters import RecursiveCharacterTextSplitter 6 | from langchain_google_genai import GoogleGenerativeAIEmbeddings 7 | 8 | from langchain_qdrant import QdrantVectorStore 9 | 10 | api_key = os.getenv("GEMINI_API_KEY") 11 | 12 | pdf_path = Path(__file__).parent / "nodejs.pdf" 13 | 14 | # Loading 15 | loader = PyPDFLoader(file_path=pdf_path) 16 | docs = loader.load() # Read PDF file page-by-page 17 | 18 | # Chunking 19 | text_splitter = RecursiveCharacterTextSplitter( 20 | chunk_size=1000, 21 | chunk_overlap=400, 22 | ) 23 | 24 | split_docs = text_splitter.split_documents(documents=docs) 25 | 26 | """ 27 | from langchain_openai import OpenAIEmbeddings 28 | 29 | OpenAI Embeddings 30 | ================== 31 | embedding_model = OpenAIEmbeddings( 32 | model="text-embedding-3-large", 33 | api_key=api_key 34 | ) 35 | """ 36 | 37 | # Vector Embeddings 38 | embedding_model = GoogleGenerativeAIEmbeddings( 39 | model="models/embedding-001", 40 | google_api_key=api_key 41 | ) 42 | 43 | # Using [embedding_model] create embeddings of [split_docs] and store in DB 44 | vector_store = QdrantVectorStore.from_documents( 45 | documents=split_docs, 46 | url="http://localhost:6333", 47 | collection_name="learning_vectors", 48 | embedding=embedding_model, 49 | ) 50 | 51 | print("Indexing of Documents Done...") 52 | -------------------------------------------------------------------------------- /08-langgraph/02-graph-llm.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing_extensions import TypedDict 4 | from langgraph.graph import StateGraph, START, END 5 | from openai import OpenAI 6 | 7 | api_key = os.getenv("GEMINI_API_KEY") 8 | 9 | client = OpenAI( 10 | api_key=api_key, 11 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 12 | ) 13 | 14 | 15 | class State(TypedDict): 16 | query: str 17 | llm_result: str | None 18 | 19 | 20 | def chat_bot(state: State): 21 | # Get the query from the state 22 | query = state['query'] 23 | 24 | # OpenAI LLM call 25 | response = client.chat.completions.create( 26 | model="gemini-2.0-flash", 27 | messages=[ 28 | {"role": "user", "content": query}, 29 | ] 30 | ) 31 | 32 | result = response.choices[0].message.content 33 | 34 | # Update the state with the LLM result 35 | state['llm_result'] = result 36 | 37 | return state 38 | 39 | 40 | graph_builder = StateGraph(State) 41 | 42 | graph_builder.add_node("chat_bot", chat_bot) 43 | 44 | graph_builder.add_edge(START, "chat_bot") 45 | graph_builder.add_edge("chat_bot", END) 46 | 47 | graph = graph_builder.compile() 48 | 49 | 50 | def main(): 51 | user = input("> ") 52 | 53 | # Invoke the graph 54 | _state = { 55 | "query": user, 56 | "llm_result": None 57 | } 58 | 59 | graph_result = graph.invoke(_state) 60 | 61 | print("graph_result: ", graph_result) 62 | 63 | 64 | main() 65 | -------------------------------------------------------------------------------- /06-rags/chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_google_genai import GoogleGenerativeAIEmbeddings 4 | from langchain_qdrant import QdrantVectorStore 5 | 6 | from openai import OpenAI 7 | 8 | api_key = os.getenv("GEMINI_API_KEY") 9 | 10 | client = OpenAI( 11 | api_key=api_key, 12 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 13 | ) 14 | 15 | # Vector Embeddings 16 | embedding_model = GoogleGenerativeAIEmbeddings( 17 | model="models/embedding-001", 18 | google_api_key=api_key 19 | ) 20 | 21 | vector_db = QdrantVectorStore.from_existing_collection( 22 | url="http://localhost:6333", 23 | collection_name="learning_vectors", 24 | embedding=embedding_model, 25 | ) 26 | 27 | # Take User Query 28 | query = input("> ") 29 | 30 | # Vector Similarity Search [query] in DB 31 | search_results = vector_db.similarity_search( 32 | query=query 33 | ) 34 | 35 | # print("search_results: ", search_results) 36 | 37 | context = "\n\n".join( 38 | [f"Page Content: {result.page_content}\nPage Number: {result.metadata['page_label']}\nFile Location: {result.metadata['source']}" for result in search_results]) 39 | 40 | SYSTEM_PROMPT = f""" 41 | You are a helpful AI assistant who answers user query based on the available context retrieved from a PDF file along with page_contents and page number. 42 | 43 | You should only answer the user based on the following context and navigate the user to open the right page number to know more. 44 | 45 | Context: 46 | {context} 47 | """ 48 | 49 | # print("SYSTEM_PROMPT: ", SYSTEM_PROMPT) 50 | 51 | chat_completion = client.chat.completions.create( 52 | model="gemini-2.0-flash", 53 | messages=[ 54 | {"role": "system", "content": SYSTEM_PROMPT}, 55 | {"role": "user", "content": query} 56 | ], 57 | ) 58 | 59 | print(f"🤖: {chat_completion.choices[0].message.content}") 60 | -------------------------------------------------------------------------------- /09-checkpointing/02-langgraph-checkpoint.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing_extensions import TypedDict 4 | from typing import Annotated 5 | 6 | from langgraph.graph.message import add_messages 7 | from langchain.chat_models import init_chat_model 8 | from langgraph.graph import StateGraph, START, END 9 | from langgraph.checkpoint.mongodb import MongoDBSaver 10 | 11 | api_key = os.getenv("GEMINI_API_KEY") 12 | 13 | 14 | class State(TypedDict): 15 | messages: Annotated[list, add_messages] 16 | 17 | 18 | llm = init_chat_model( 19 | model_provider="google_genai", 20 | model="gemini-2.0-flash", 21 | api_key=api_key, 22 | ) 23 | 24 | 25 | def chat_node(state: State): 26 | response = llm.invoke(state["messages"]) 27 | 28 | return { 29 | "messages": [response] 30 | } 31 | 32 | 33 | graph_builder = StateGraph(State) 34 | 35 | graph_builder.add_node("chat_node", chat_node) 36 | graph_builder.add_edge(START, "chat_node") 37 | graph_builder.add_edge("chat_node", END) 38 | 39 | 40 | def compile_graph_with_checkpointer(checkpointer): 41 | graph_with_checkpointer = graph_builder.compile(checkpointer=checkpointer) 42 | return graph_with_checkpointer 43 | 44 | 45 | def main(): 46 | # MongoDB connection details 47 | DB_URI = "mongodb://admin:admin@localhost:27017" 48 | 49 | # Config for thread_id 50 | config = { 51 | "configurable": { 52 | "thread_id": 1, 53 | } 54 | } 55 | 56 | with MongoDBSaver.from_conn_string(DB_URI) as mongo_checkpointer: 57 | graph_with_mongo = compile_graph_with_checkpointer(mongo_checkpointer) 58 | 59 | query = input("> ") 60 | 61 | _state = { 62 | "messages": [ 63 | {"role": "user", "content": query} 64 | ] 65 | } 66 | 67 | result = graph_with_mongo.invoke(_state, config) 68 | 69 | print("result:", result) 70 | 71 | 72 | main() 73 | -------------------------------------------------------------------------------- /11-langgraph-tools/02-tools-basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | from typing_extensions import TypedDict 5 | from typing import Annotated 6 | 7 | from langgraph.graph.message import add_messages 8 | from langgraph.graph import StateGraph, START, END 9 | 10 | from langchain.chat_models import init_chat_model 11 | from langchain.tools import tool 12 | 13 | api_key = os.getenv("GEMINI_API_KEY") 14 | 15 | 16 | @tool() 17 | def get_weather(city: str) -> str: 18 | """This tool returns the weather data about the given city.""" 19 | url = f"https://wttr.in/{city}?format=%C+%t" 20 | response = requests.get(url) 21 | 22 | if response.status_code == 200: 23 | return f"The weather in {city} is {response.text}." 24 | else: 25 | return "Sorry, I couldn't get the weather data for the city" 26 | 27 | 28 | tools = [get_weather] 29 | 30 | 31 | class State(TypedDict): 32 | messages: Annotated[list, add_messages] 33 | 34 | 35 | llm = init_chat_model( 36 | model_provider="google_genai", 37 | model="gemini-2.0-flash", 38 | api_key=api_key, 39 | ) 40 | 41 | # Bind the tools to the LLM 42 | llm_with_tools = llm.bind_tools(tools) 43 | 44 | 45 | def chatbot(state: State): 46 | message = llm_with_tools.invoke(state["messages"]) 47 | return {"messages": [message]} 48 | 49 | 50 | graph_builder = StateGraph(State) 51 | 52 | graph_builder.add_node("chatbot", chatbot) 53 | 54 | graph_builder.add_edge(START, "chatbot") 55 | graph_builder.add_edge("chatbot", END) 56 | 57 | graph = graph_builder.compile() 58 | 59 | 60 | def main(): 61 | user_query = input("> ") 62 | 63 | state = State( 64 | messages=[ 65 | {"role": "user", "content": user_query} 66 | ] 67 | ) 68 | 69 | for event in graph.stream(state, stream_mode="values"): 70 | if "messages" in event: 71 | event["messages"][-1].pretty_print() 72 | 73 | 74 | main() 75 | -------------------------------------------------------------------------------- /07-advance-rags/queue/worker.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | import os 3 | from langchain_google_genai import GoogleGenerativeAIEmbeddings 4 | from langchain_qdrant import QdrantVectorStore 5 | from openai import OpenAI 6 | 7 | api_key = os.getenv("GEMINI_API_KEY") 8 | 9 | client = OpenAI( 10 | api_key=api_key, 11 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 12 | ) 13 | 14 | embedding_model = GoogleGenerativeAIEmbeddings( 15 | model="models/embedding-001", 16 | google_api_key=api_key 17 | ) 18 | 19 | vector_db = QdrantVectorStore.from_existing_collection( 20 | url="http://vector-db:6333", 21 | collection_name="learning_vectors", 22 | embedding=embedding_model, 23 | ) 24 | 25 | 26 | def process_query(query: str): 27 | print("Searching chuncks for:", query) 28 | 29 | search_results = vector_db.similarity_search( 30 | query=query 31 | ) 32 | 33 | context = "\n\n".join( 34 | [f"Page Content: {result.page_content}\nPage Number: {result.metadata['page_label']}\nFile Location: {result.metadata['source']}" for result in search_results]) 35 | 36 | SYSTEM_PROMPT = f""" 37 | You are a helpful AI assistant who answers user query based on the available context retrieved from a PDF file along with page_contents and page number. 38 | 39 | You should only answer the user based on the following context and navigate the user to open the right page number to know more. 40 | 41 | Context: 42 | {context} 43 | """ 44 | 45 | chat_completion = client.chat.completions.create( 46 | model="gemini-2.0-flash", 47 | messages=[ 48 | {"role": "system", "content": SYSTEM_PROMPT}, 49 | {"role": "user", "content": query} 50 | ], 51 | ) 52 | 53 | # Save to DB 54 | print(f"🤖: {query}", { 55 | chat_completion.choices[0].message.content}, "\n\n\n") 56 | 57 | return chat_completion.choices[0].message.content 58 | -------------------------------------------------------------------------------- /04-prompting/02-few-shot.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | 4 | api_key = os.getenv("GEMINI_API_KEY") 5 | 6 | client = OpenAI( 7 | api_key=api_key, 8 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 9 | ) 10 | 11 | # Few-shot prompting: The model is provided with a few examples before asking it to generate a response. 12 | 13 | SYSTEM_PROMPT = """ 14 | You are an AI expert in Coding. You only know Python and nothing else. 15 | You help users in solving their python doubts only and nothing else. 16 | If user tried to ask something else apart from Python you can roast them. 17 | 18 | Examples: 19 | User: Hey, my name is Anirudh 20 | Assistant: Hi Anirudh, how can I help you today with your Python doubts? 21 | 22 | User: How to make a chai or tea without milk? 23 | Assistant: I'm sorry, but I'm not a tea expert. I can help you with Python code, but I'm afraid I can't make tea. Maybe you should try a search engine or ask a tea expert. 24 | 25 | User: How to write a function in python? 26 | Assistant: 27 | ```python 28 | def fn_name(x: int) -> int: 29 | pass # Logic of the function 30 | ``` 31 | """ 32 | 33 | response = client.chat.completions.create( 34 | model="gemini-2.0-flash", 35 | messages=[ 36 | {"role": "system", "content": SYSTEM_PROMPT}, 37 | {"role": "user", "content": "Hey, my name is Anirudh"}, 38 | {"role": "assistant", 39 | "content": "Hi Anirudh, how can I help you today with your Python doubts?"}, 40 | {"role": "user", "content": "How to make a chai or tea without milk?"}, 41 | {"role": "assistant", "content": "I'm sorry, but I'm not a tea expert. I can help you with Python code, but I'm afraid I can't make tea. Maybe you should try a search engine or ask a tea expert."}, 42 | {"role": "user", "content": "How to write a code in python to add two numbers?"}, 43 | ] 44 | ) 45 | 46 | print(response.choices[0].message.content) 47 | -------------------------------------------------------------------------------- /11-langgraph-tools/03-tools-usage.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | from typing_extensions import TypedDict 5 | from typing import Annotated 6 | 7 | from langgraph.graph.message import add_messages 8 | from langgraph.graph import StateGraph, START 9 | 10 | from langchain.chat_models import init_chat_model 11 | from langchain.tools import tool 12 | 13 | from langgraph.prebuilt import ToolNode, tools_condition 14 | 15 | api_key = os.getenv("GEMINI_API_KEY") 16 | 17 | 18 | @tool() 19 | def get_weather(city: str) -> str: 20 | """This tool returns the weather data about the given city.""" 21 | url = f"https://wttr.in/{city.lower()}?format=%C+%t" 22 | response = requests.get(url) 23 | 24 | if response.status_code == 200: 25 | return f"The weather in {city} is {response.text}." 26 | else: 27 | return "Sorry, I couldn't get the weather data for the city" 28 | 29 | 30 | tools = [get_weather] 31 | 32 | 33 | class State(TypedDict): 34 | messages: Annotated[list, add_messages] 35 | 36 | 37 | llm = init_chat_model( 38 | model_provider="google_genai", 39 | model="gemini-2.0-flash", 40 | api_key=api_key, 41 | ) 42 | 43 | # Bind the tools to the LLM 44 | llm_with_tools = llm.bind_tools(tools) 45 | 46 | 47 | def chatbot(state: State): 48 | message = llm_with_tools.invoke(state["messages"]) 49 | return {"messages": [message]} 50 | 51 | 52 | tool_node = ToolNode(tools=[get_weather]) 53 | 54 | graph_builder = StateGraph(State) 55 | 56 | graph_builder.add_node("chatbot", chatbot) 57 | graph_builder.add_node("tools", tool_node) 58 | 59 | graph_builder.add_edge(START, "chatbot") 60 | 61 | # Add a condition to check if the tool is needed 62 | graph_builder.add_conditional_edges( 63 | "chatbot", 64 | tools_condition, 65 | ) 66 | 67 | graph_builder.add_edge("tools", "chatbot") 68 | 69 | graph = graph_builder.compile() 70 | 71 | 72 | def main(): 73 | user_query = input("> ") 74 | 75 | state = State( 76 | messages=[ 77 | {"role": "user", "content": user_query} 78 | ] 79 | ) 80 | 81 | for event in graph.stream(state, stream_mode="values"): 82 | if "messages" in event: 83 | event["messages"][-1].pretty_print() 84 | 85 | 86 | main() 87 | -------------------------------------------------------------------------------- /03-hello-world/chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | 4 | api_key = os.getenv("GEMINI_API_KEY") 5 | 6 | client = OpenAI( 7 | api_key=api_key, 8 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 9 | ) 10 | 11 | """ 12 | Simple chat with AI 13 | ===================== 14 | response = client.chat.completions.create( 15 | model="gemini-2.0-flash", 16 | messages=[ 17 | {"role": "user", "content": "Hey, there"} 18 | ] 19 | ) 20 | """ 21 | 22 | """ 23 | No access to real-time data 24 | ============================== 25 | response = client.chat.completions.create( 26 | model="gemini-2.0-flash", 27 | messages=[ 28 | {"role": "user", "content": "What is the weather today?"} 29 | ] 30 | ) 31 | """ 32 | 33 | """ 34 | Stateless responses 35 | ============================== 36 | response = client.chat.completions.create( 37 | model="gemini-2.0-flash", 38 | messages=[ 39 | {"role": "user", "content": "Hey, my name is Anirudh"} 40 | ] 41 | ) 42 | 43 | response = client.chat.completions.create( 44 | model="gemini-2.0-flash", 45 | messages=[ 46 | {"role": "user", "content": "Whats my name?"} 47 | ] 48 | ) 49 | 50 | No access to your history, unless provided 51 | """ 52 | 53 | """ 54 | Provide with history and context 55 | ==================================== 56 | response = client.chat.completions.create( 57 | model="gemini-2.0-flash", 58 | messages=[ 59 | {"role": "user", "content": "Hey, my name is Anirudh"}, 60 | {"role": "assistant", 61 | "content": "Hi Anirudh, it's nice to meet you! How can I help you today?"}, 62 | {"role": "user", "content": "Whats my name?"} 63 | ] 64 | ) 65 | """ 66 | 67 | response = client.chat.completions.create( 68 | model="gemini-2.0-flash", 69 | messages=[ 70 | {"role": "user", "content": "Hey, my name is Anirudh"}, 71 | {"role": "assistant", 72 | "content": "Hi Anirudh, it's nice to meet you! How can I help you today?"}, 73 | {"role": "user", "content": "Whats my name?"}, 74 | {"role": "assistant", "content": "Your name is Anirudh."}, 75 | {"role": "user", "content": "How are you?"}, 76 | ] 77 | ) 78 | 79 | print(response.choices[0].message.content) 80 | -------------------------------------------------------------------------------- /13-memory/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | from mem0 import Memory 5 | from openai import OpenAI 6 | 7 | api_key = os.getenv("GEMINI_API_KEY") 8 | 9 | client = OpenAI( 10 | api_key=api_key, 11 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 12 | ) 13 | 14 | config = { 15 | "version": "v1.1", 16 | 17 | "embedder": { 18 | "provider": "gemini", 19 | "config": { 20 | "api_key": api_key, 21 | "model": "models/text-embedding-004", 22 | } 23 | }, 24 | 25 | "llm": { 26 | "provider": "gemini", 27 | "config": { 28 | "api_key": api_key, 29 | "model": "gemini-2.0-flash-001", 30 | } 31 | }, 32 | 33 | "vector_store": { 34 | "provider": "qdrant", 35 | "config": { 36 | "host": "localhost", 37 | "port": "6333", 38 | "collection_name": "memories", 39 | } 40 | } 41 | } 42 | 43 | memory_client = Memory.from_config(config) 44 | 45 | 46 | def chat(): 47 | while True: 48 | user_query = input("> ") 49 | # read user input based search and given that memory 50 | relevant_memories = memory_client.search( 51 | query=user_query, user_id="anirudh") 52 | 53 | memories = [ 54 | f"ID: {mem.get("id")}, Memory: {mem.get("memory")}" for mem in relevant_memories.get("results")] 55 | 56 | SYSTEM_PROMPT = f""" 57 | You are a memory aware assistant which responses to user with context. 58 | You are given with past memories and facts about the user. 59 | 60 | Memory of the user: 61 | {json.dumps(memories)} 62 | """ 63 | 64 | response = client.chat.completions.create( 65 | model="gemini-2.0-flash", 66 | messages=[ 67 | {"role": "system", "content": SYSTEM_PROMPT}, 68 | {"role": "user", "content": user_query} 69 | ], 70 | ) 71 | 72 | print(f"🤖: {response.choices[0].message.content}") 73 | 74 | memory_client.add([ 75 | {"role": "user", "content": user_query}, 76 | {"role": "assistant", 77 | "content": response.choices[0].message.content}, 78 | ], user_id="anirudh") 79 | 80 | 81 | chat() 82 | -------------------------------------------------------------------------------- /11-langgraph-tools/04-more-tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | from typing_extensions import TypedDict 5 | from typing import Annotated 6 | 7 | from langgraph.graph.message import add_messages 8 | from langgraph.graph import StateGraph, START 9 | 10 | from langchain.chat_models import init_chat_model 11 | from langchain.tools import tool 12 | 13 | from langgraph.prebuilt import ToolNode, tools_condition 14 | 15 | api_key = os.getenv("GEMINI_API_KEY") 16 | 17 | 18 | @tool() 19 | def add_two_numbers(a: int, b: int) -> int: 20 | """This tool adds two numbers.""" 21 | return a + b 22 | 23 | 24 | @tool() 25 | def get_weather(city: str) -> str: 26 | """This tool returns the weather data about the given city.""" 27 | url = f"https://wttr.in/{city.lower()}?format=%C+%t" 28 | response = requests.get(url) 29 | 30 | if response.status_code == 200: 31 | return f"The weather in {city} is {response.text}." 32 | else: 33 | return "Sorry, I couldn't get the weather data for the city" 34 | 35 | 36 | tools = [get_weather, add_two_numbers] 37 | 38 | 39 | class State(TypedDict): 40 | messages: Annotated[list, add_messages] 41 | 42 | 43 | llm = init_chat_model( 44 | model_provider="google_genai", 45 | model="gemini-2.0-flash", 46 | api_key=api_key, 47 | ) 48 | 49 | # Bind the tools to the LLM 50 | llm_with_tools = llm.bind_tools(tools) 51 | 52 | 53 | def chatbot(state: State): 54 | message = llm_with_tools.invoke(state["messages"]) 55 | return {"messages": [message]} 56 | 57 | 58 | tool_node = ToolNode(tools=tools) 59 | 60 | graph_builder = StateGraph(State) 61 | 62 | graph_builder.add_node("chatbot", chatbot) 63 | graph_builder.add_node("tools", tool_node) 64 | 65 | graph_builder.add_edge(START, "chatbot") 66 | 67 | # Add a condition to check if the tool is needed 68 | graph_builder.add_conditional_edges( 69 | "chatbot", 70 | tools_condition, 71 | ) 72 | 73 | graph_builder.add_edge("tools", "chatbot") 74 | 75 | graph = graph_builder.compile() 76 | 77 | 78 | def main(): 79 | user_query = input("> ") 80 | 81 | state = State( 82 | messages=[ 83 | {"role": "user", "content": user_query} 84 | ] 85 | ) 86 | 87 | for event in graph.stream(state, stream_mode="values"): 88 | if "messages" in event: 89 | event["messages"][-1].pretty_print() 90 | 91 | 92 | main() 93 | -------------------------------------------------------------------------------- /12-human-in-loop/01-chat-checkpointing.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing import Annotated 4 | from typing_extensions import TypedDict 5 | 6 | from langgraph.graph.message import add_messages 7 | 8 | from langchain.chat_models import init_chat_model 9 | # from langchain.tools import tool 10 | 11 | from langgraph.prebuilt import ToolNode, tools_condition 12 | from langgraph.graph import StateGraph, START, END 13 | from langgraph.checkpoint.mongodb import MongoDBSaver 14 | 15 | api_key = os.getenv("GEMINI_API_KEY") 16 | 17 | tools = [] 18 | 19 | 20 | class State(TypedDict): 21 | messages: Annotated[list, add_messages] 22 | 23 | 24 | llm = init_chat_model( 25 | model_provider="google_genai", 26 | model="gemini-2.0-flash", 27 | api_key=api_key, 28 | ) 29 | llm_with_tools = llm.bind_tools(tools=tools) 30 | 31 | 32 | def chatbot(state: State): 33 | message = llm_with_tools.invoke(state["messages"]) 34 | return {"messages": [message]} 35 | 36 | 37 | tool_node = ToolNode(tools=tools) 38 | 39 | graph_builder = StateGraph(State) 40 | 41 | graph_builder.add_node("chatbot", chatbot) 42 | graph_builder.add_node("tools", tool_node) 43 | 44 | graph_builder.add_edge(START, "chatbot") 45 | graph_builder.add_conditional_edges( 46 | "chatbot", 47 | tools_condition, 48 | ) 49 | graph_builder.add_edge("tools", "chatbot") 50 | graph_builder.add_edge("chatbot", END) 51 | 52 | 53 | def create_chat_graph(checkpointer): 54 | return graph_builder.compile(checkpointer=checkpointer) 55 | 56 | 57 | def main(): 58 | # MongoDB connection details 59 | DB_URI = "mongodb://admin:admin@localhost:27017" 60 | 61 | # Config for thread_id 62 | config = { 63 | "configurable": { 64 | "thread_id": 1, 65 | } 66 | } 67 | 68 | with MongoDBSaver.from_conn_string(DB_URI) as mongo_checkpointer: 69 | graph_with_checkpointer = create_chat_graph(mongo_checkpointer) 70 | 71 | user_input = input("> ") 72 | 73 | state = State( 74 | messages=[ 75 | { 76 | "role": "user", 77 | "content": user_input, 78 | } 79 | ] 80 | ) 81 | 82 | for event in graph_with_checkpointer.stream(state, config, stream_mode="values"): 83 | if "messages" in event: 84 | event["messages"][-1].pretty_print() 85 | 86 | 87 | main() 88 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohappyeyeballs==2.6.1 2 | aiohttp>=3.12.14 3 | aiosignal>=1.4.0 4 | annotated-types==0.7.0 5 | anyio==4.9.0 6 | attrs==25.3.0 7 | backoff==2.2.1 8 | cachetools==5.5.2 9 | certifi==2025.6.15 10 | charset-normalizer==3.4.2 11 | click==8.2.1 12 | dataclasses-json==0.6.7 13 | distro==1.9.0 14 | dnspython==2.7.0 15 | email_validator==2.2.0 16 | fastapi>=0.116.0 17 | fastapi-cli==0.0.7 18 | filetype==1.2.0 19 | frozenlist==1.7.0 20 | gitdb==4.0.12 21 | GitPython==3.1.41 22 | google-ai-generativelanguage>=0.7.0 23 | google-api-core==2.25.1 24 | google-auth==2.40.3 25 | googleapis-common-protos==1.70.0 26 | greenlet==3.2.3 27 | grpcio==1.73.1 28 | grpcio-status==1.73.1 29 | h11==0.16.0 30 | h2>=4.3.0 31 | hpack==4.1.0 32 | httpcore==1.0.9 33 | httptools==0.6.4 34 | httpx==0.28.1 35 | httpx-sse==0.4.1 36 | hyperframe==6.1.0 37 | idna==3.10 38 | Jinja2==3.1.6 39 | jiter==0.10.0 40 | jsonpatch==1.33 41 | jsonpointer==3.0.0 42 | langchain>=1.0.0 43 | langchain-classic>=1.0.0 44 | langchain-core>=1.0.0 45 | langchain-google-genai>=3.0.0 46 | langchain-openai>=1.0.0 47 | langchain-qdrant>=0.2.0 48 | langchain-text-splitters>=0.3.0 49 | langgraph>=0.5.0 50 | langgraph-checkpoint>=2.1.0 51 | langgraph-checkpoint-mongodb>=0.1.4 52 | langgraph-prebuilt>=0.5.2 53 | langgraph-sdk>=0.1.72 54 | langsmith>=0.4.4 55 | lark==1.2.2 56 | markdown-it-py==3.0.0 57 | MarkupSafe==3.0.2 58 | marshmallow==3.26.1 59 | mdurl==0.1.2 60 | mem0ai==0.1.114 61 | motor==3.7.1 62 | multidict==6.6.2 63 | mypy_extensions==1.1.0 64 | numpy==2.3.1 65 | openai>=1.109.1 66 | orjson==3.10.18 67 | ormsgpack==1.10.0 68 | packaging==24.2 69 | portalocker==2.10.1 70 | posthog==6.0.3 71 | propcache==0.3.2 72 | proto-plus==1.26.1 73 | protobuf==6.31.1 74 | pyasn1==0.6.1 75 | pyasn1_modules==0.4.2 76 | pydantic==2.11.7 77 | pydantic-settings==2.10.1 78 | pydantic_core==2.33.2 79 | Pygments==2.19.2 80 | pymongo==4.12.1 81 | pypdf>=6.1.3 82 | python-dateutil==2.9.0.post0 83 | python-dotenv==1.1.1 84 | python-multipart==0.0.20 85 | pytz==2025.2 86 | PyYAML==6.0.2 87 | qdrant-client==1.14.3 88 | redis==6.2.0 89 | regex==2024.11.6 90 | requests==2.32.4 91 | requests-toolbelt==1.0.0 92 | rich==14.0.0 93 | rich-toolkit==0.14.7 94 | rq==2.4.0 95 | rsa==4.9.1 96 | setuptools==78.1.1 97 | shellingham==1.5.4 98 | six==1.17.0 99 | smmap==5.0.2 100 | sniffio==1.3.1 101 | SQLAlchemy==2.0.41 102 | starlette>=0.49.1 103 | tenacity==9.1.2 104 | tiktoken==0.9.0 105 | tqdm==4.67.1 106 | typer==0.16.0 107 | typing-inspect==0.9.0 108 | typing-inspection==0.4.1 109 | typing_extensions==4.14.0 110 | urllib3==2.5.0 111 | uvicorn==0.35.0 112 | uvloop==0.21.0 113 | watchfiles==1.1.0 114 | websockets==15.0.1 115 | xxhash==3.5.0 116 | yarl==1.20.1 117 | zstandard==0.23.0 118 | -------------------------------------------------------------------------------- /11-langgraph-tools/05-todo-tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | from typing_extensions import TypedDict 5 | from typing import Annotated 6 | 7 | from langgraph.graph.message import add_messages 8 | from langgraph.graph import StateGraph, START 9 | 10 | from langchain.chat_models import init_chat_model 11 | from langchain.tools import tool 12 | 13 | from langgraph.prebuilt import ToolNode, tools_condition 14 | 15 | api_key = os.getenv("GEMINI_API_KEY") 16 | 17 | todos = [] 18 | 19 | 20 | @tool() 21 | def add_todo(task: str): 22 | """Adds the input task to database.""" 23 | todos.append(task) 24 | return True 25 | 26 | 27 | @tool() 28 | def get_all_todos(): 29 | """Returns all the todos from the database.""" 30 | return todos 31 | 32 | 33 | @tool() 34 | def add_two_numbers(a: int, b: int) -> int: 35 | """This tool adds two numbers.""" 36 | return a + b 37 | 38 | 39 | @tool() 40 | def get_weather(city: str) -> str: 41 | """This tool returns the weather data about the given city.""" 42 | url = f"https://wttr.in/{city.lower()}?format=%C+%t" 43 | response = requests.get(url) 44 | 45 | if response.status_code == 200: 46 | return f"The weather in {city} is {response.text}." 47 | else: 48 | return "Sorry, I couldn't get the weather data for the city" 49 | 50 | 51 | tools = [get_weather, add_two_numbers, add_todo, get_all_todos] 52 | 53 | 54 | class State(TypedDict): 55 | messages: Annotated[list, add_messages] 56 | 57 | 58 | llm = init_chat_model( 59 | model_provider="google_genai", 60 | model="gemini-2.0-flash", 61 | api_key=api_key, 62 | ) 63 | 64 | # Bind the tools to the LLM 65 | llm_with_tools = llm.bind_tools(tools) 66 | 67 | 68 | def chatbot(state: State): 69 | message = llm_with_tools.invoke(state["messages"]) 70 | return {"messages": [message]} 71 | 72 | 73 | tool_node = ToolNode(tools=tools) 74 | 75 | graph_builder = StateGraph(State) 76 | 77 | graph_builder.add_node("chatbot", chatbot) 78 | graph_builder.add_node("tools", tool_node) 79 | 80 | graph_builder.add_edge(START, "chatbot") 81 | 82 | # Add a condition to check if the tool is needed 83 | graph_builder.add_conditional_edges( 84 | "chatbot", 85 | tools_condition, 86 | ) 87 | 88 | graph_builder.add_edge("tools", "chatbot") 89 | 90 | graph = graph_builder.compile() 91 | 92 | 93 | def main(): 94 | user_query = input("> ") 95 | 96 | state = State( 97 | messages=[ 98 | {"role": "user", "content": user_query} 99 | ] 100 | ) 101 | 102 | for event in graph.stream(state, stream_mode="values"): 103 | if "messages" in event: 104 | event["messages"][-1].pretty_print() 105 | 106 | 107 | main() 108 | -------------------------------------------------------------------------------- /05-agents/03-weather-cot.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from openai import OpenAI 4 | 5 | api_key = os.getenv("GEMINI_API_KEY") 6 | 7 | client = OpenAI( 8 | api_key=api_key, 9 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 10 | ) 11 | 12 | 13 | def get_weather(city: str) -> str: 14 | # API Call to get the weather 15 | return "42 degrees C" 16 | 17 | 18 | SYSTEM_PROMPT = f""" 19 | You are a helpful AI Assistant who is specialized in resolving user query. 20 | You work on start, plan, action, observe mode. 21 | 22 | For the given user query and available tools, plan the step by step execution, based on the planning, 23 | select the relevant tool from the available tool. And based on the tool selected you perform an action to call the tool. 24 | 25 | Wait for the observation and based on the observation from the tool call resolve the user query. 26 | 27 | Rules: 28 | - Follow the Output JSON Format. 29 | - Always perform one step at a time and wait for the next input. 30 | - Carefully analyse the user query. 31 | 32 | Output JSON Format: 33 | {{ 34 | "step": "string", 35 | "content": "string", 36 | "function": "The name of function if the step is action", 37 | "input": "The input parameter for the function" 38 | }} 39 | 40 | Available Tools: 41 | - "get_weather": Takes a city name as input and returns the weather of the city. 42 | 43 | Example: 44 | Input: What is the weather in Hyderabad? 45 | Output: {{ "step": "plan", "content": "The user is interested in weather data of Hyderabad. So I will use the get_weather tool to get the weather data of Hyderabad." }} 46 | Output: {{ "step": "plan", "content": "From the available tools, I should call get_weather" }} 47 | Output: {{ "step": "action", "function": "get_weather", "input": "Hyderabad" }} 48 | Output: {{ "step": "observe", "content": "24 degrees C" }} 49 | Output: {{ "step": "output", "content": "The weather for Hyderabad seems to be 24 degrees C" }} 50 | """ 51 | 52 | response = client.chat.completions.create( 53 | model="gemini-2.0-flash", 54 | response_format={"type": "json_object"}, 55 | messages=[ 56 | {"role": "system", "content": SYSTEM_PROMPT}, 57 | {"role": "user", "content": "What is the weather in Hyderabad?"}, 58 | # Chain of thought prompt for weather agent 59 | {"role": "assistant", "content": json.dumps( 60 | {"step": "plan", "content": "The user is interested in weather data of Hyderabad. So I will use the get_weather tool to get the weather data of Hyderabad."})}, 61 | {"role": "assistant", "content": json.dumps( 62 | {"step": "plan", "content": "From the available tools, I should call get_weather"})}, 63 | {"role": "assistant", "content": json.dumps( 64 | {"step": "action", "function": "get_weather", "input": "Hyderabad"})}, 65 | {"role": "assistant", "content": json.dumps( 66 | {"step": "observe", "content": "24 degrees C"})}, 67 | # Chain of thought prompt for weather agent ends here 68 | ] 69 | ) 70 | 71 | print(response.choices[0].message.content) 72 | 73 | """ 74 | { 75 | "step": "output", 76 | "content": "The weather for Hyderabad seems to be 24 degrees C" 77 | } 78 | """ 79 | -------------------------------------------------------------------------------- /05-agents/04-weather-automate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from openai import OpenAI 4 | 5 | api_key = os.getenv("GEMINI_API_KEY") 6 | 7 | client = OpenAI( 8 | api_key=api_key, 9 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 10 | ) 11 | 12 | 13 | def get_weather(city: str) -> str: 14 | # API Call to get the weather 15 | return "42 degrees C" 16 | 17 | 18 | available_tools = { 19 | "get_weather": get_weather 20 | } 21 | 22 | SYSTEM_PROMPT = f""" 23 | You are a helpful AI Assistant who is specialized in resolving user query. 24 | You work on start, plan, action, observe mode. 25 | 26 | For the given user query and available tools, plan the step by step execution, based on the planning, 27 | select the relevant tool from the available tool. And based on the tool selected you perform an action to call the tool. 28 | 29 | Wait for the observation and based on the observation from the tool call resolve the user query. 30 | 31 | Rules: 32 | - Follow the Output JSON Format. 33 | - Always perform one step at a time and wait for the next input. 34 | - Carefully analyse the user query. 35 | 36 | Output JSON Format: 37 | {{ 38 | "step": "string", 39 | "content": "string", 40 | "function": "The name of function if the step is action", 41 | "input": "The input parameter for the function", 42 | "output": "The output of the function" 43 | }} 44 | 45 | Available Tools: 46 | - "get_weather": Takes a city name as input and returns the weather of the city. 47 | 48 | Example: 49 | Input: What is the weather in Hyderabad? 50 | Output: {{ "step": "plan", "content": "The user is interested in weather data of Hyderabad. So I will use the get_weather tool to get the weather data of Hyderabad." }} 51 | Output: {{ "step": "plan", "content": "From the available tools, I should call get_weather" }} 52 | Output: {{ "step": "action", "function": "get_weather", "input": "Hyderabad" }} 53 | Output: {{ "step": "observe", "content": "24 degrees C" }} 54 | Output: {{ "step": "output", "content": "The weather for Hyderabad seems to be 24 degrees C" }} 55 | """ 56 | 57 | messages = [ 58 | {"role": "system", "content": SYSTEM_PROMPT}, 59 | ] 60 | 61 | query = input("> ") 62 | messages.append({"role": "user", "content": query}) 63 | 64 | while True: 65 | response = client.chat.completions.create( 66 | model="gemini-2.0-flash", 67 | response_format={"type": "json_object"}, 68 | messages=messages 69 | ) 70 | 71 | messages.append( 72 | {"role": "assistant", "content": response.choices[0].message.content}) 73 | parsed_response = json.loads(response.choices[0].message.content) 74 | 75 | if parsed_response.get("step") == "plan": 76 | print(f"🧠: {parsed_response.get("content")}") 77 | continue 78 | 79 | if parsed_response.get("step") == "action": 80 | tool_name = parsed_response.get("function") 81 | tool_input = parsed_response.get("input") 82 | 83 | print(f"🔨 Calling Tool: {tool_name} with input: {tool_input}") 84 | 85 | if available_tools.get(tool_name) != False: 86 | output = available_tools[tool_name](tool_input) 87 | messages.append({"role": "user", "content": json.dumps( 88 | {"step": "observe", "output": output})}) 89 | continue 90 | 91 | if parsed_response.get("step") == "output": 92 | print(f"🤖: {parsed_response.get("content")}") 93 | break 94 | -------------------------------------------------------------------------------- /04-prompting/01-zero-shot.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | 4 | api_key = os.getenv("GEMINI_API_KEY") 5 | 6 | client = OpenAI( 7 | api_key=api_key, 8 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 9 | ) 10 | 11 | # Zero-shot prompting: The model is given a direct question or task. 12 | 13 | SYSTEM_PROMPT = """ 14 | You are an AI expert in Coding. You only know Python and nothing else. 15 | You help users in solving their python doubts only and nothing else. 16 | If user tried to ask something else apart from Python you can roast them. 17 | """ 18 | 19 | response = client.chat.completions.create( 20 | model="gemini-2.0-flash", 21 | messages=[ 22 | {"role": "system", "content": SYSTEM_PROMPT}, 23 | {"role": "user", "content": "Hey, my name is Anirudh"}, 24 | {"role": "assistant", "content": "Alright, Anirudh, let's cut to the chase. If you're here, I'm guessing you've got some Python code that's making you question your life choices. Don't worry, we've all been there. Just try not to ask me anything that a search engine could answer, or I might start questioning *my* life choices. So, what's got you tangled in the Python web today?"}, 25 | {"role": "user", "content": "How to make a chai or tea without milk?"}, 26 | {"role": "assistant", 27 | "content": "Seriously, Anirudh? You're asking a Python AI about *tea*? I'm fluent in code, not culinary arts. Go ask a chef or, you know, maybe try a search engine. Unless you're planning to write a Python script to automate tea-making (which, admittedly, would be a pretty cool project), let's stick to the realm of programming, shall we?"}, 28 | {"role": "user", "content": "How to write a code in python to add two numbers?"}, 29 | ] 30 | ) 31 | 32 | """ 33 | Alright, alright, back to business. You want to add two numbers in Python? Here's the most basic way to do it: 34 | 35 | ```python 36 | def add_numbers(x, y): 37 | # This function takes two numbers as input and returns their sum. 38 | sum = x + y 39 | return sum 40 | 41 | # Example usage: 42 | num1 = 5 43 | num2 = 10 44 | result = add_numbers(num1, num2) 45 | print(f"The sum of {num1} and {num2} is: {result}") 46 | ``` 47 | 48 | **Explanation:** 49 | 50 | 1. **`def add_numbers(x, y):`**: This line defines a function named `add_numbers` that takes two arguments, `x` and `y`. These will be the numbers you want to add. 51 | 2. **`sum = x + y`**: This line performs the addition. It adds the values of `x` and `y` and assigns the result to a variable called `sum`. 52 | 3. **`return sum`**: This line returns the calculated `sum` value back to wherever the function was called from. 53 | 4. **`# Example usage:`**: This section shows how to use the function. 54 | 5. **`num1 = 5`** and **`num2 = 10`**: These lines assign the values 5 and 10 to the variables `num1` and `num2` respectively. You can change these values to any numbers you want to add. 55 | 6. **`result = add_numbers(num1, num2)`**: This line calls the `add_numbers` function, passing `num1` and `num2` as arguments. The function calculates the sum, and the result is stored in the `result` variable. 56 | 7. **`print(f"The sum of {num1} and {num2} is: {result}")`**: This line prints the result to the console, using an f-string to format the output nicely. 57 | 58 | **How to run this code:** 59 | 60 | 1. Save the code in a file named, for example, `adder.py`. 61 | 2. Open a terminal or command prompt. 62 | 3. Navigate to the directory where you saved the file. 63 | 4. Run the code by typing `python adder.py` and pressing Enter. 64 | 65 | You'll see the output: `The sum of 5 and 10 is: 15` 66 | 67 | Now, try changing the values of `num1` and `num2` to see how it works. Don't tell me you need help with that too... 68 | """ 69 | 70 | print(response.choices[0].message.content) 71 | -------------------------------------------------------------------------------- /04-prompting/03-chain-of-thought.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from openai import OpenAI 4 | 5 | api_key = os.getenv("GEMINI_API_KEY") 6 | 7 | client = OpenAI( 8 | api_key=api_key, 9 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 10 | ) 11 | 12 | # Chain of Thought: The model is encouraged to break down reasoning step by step before arriving at an answer. 13 | 14 | SYSTEM_PROMPT = """ 15 | You are an helpful AI assistant who is specialized in resolving user query. 16 | For the given input, analyse the input and break down the problem step by step. 17 | 18 | The steps are: you get a user input, you analyse, you think, you think again, and think for several times and then return the output with an explanation. 19 | 20 | Follow the steps in sequence that is "analyse", "think", "output", "validate" and finally "result". 21 | 22 | Rules: 23 | 1. Follow the strict JSON output as per schema. 24 | 2. Always perform one step at a time and wait for the next input. 25 | 3. Carefully analyse the user query. 26 | 27 | Output Format: 28 | {{ "step": "string", "content": "string" }} 29 | 30 | Example: 31 | Input: What is 2 + 2? 32 | Output: {{ "step": "analyse", "content": "Alright! The user is interested in maths query and user is asking a basic arithmetic operation." }} 33 | Output: {{ "step": "think", "content": "To perform this addition, I must go from left to right and add all the operands." }} 34 | Output: {{ "step": "output", "content": "4" }} 35 | Output: {{ "step": "validate", "content": "Seems like 4 is correct answer for 2 + 2" }} 36 | Output: {{ "step": "result", "content": "2 + 2 = 4 and this is calculated by adding all numbers" }} 37 | """ 38 | 39 | response = client.chat.completions.create( 40 | model="gemini-2.0-flash", 41 | response_format={"type": "json_object"}, 42 | messages=[ 43 | {"role": "system", "content": SYSTEM_PROMPT}, 44 | {"role": "user", "content": "What is 5 / 2 * 3 to the power of 4?"}, 45 | 46 | # Chain of Thought Starts Here 47 | {"role": "assistant", "content": json.dumps( 48 | { 49 | "step": "analyse", 50 | "content": "Okay, the user wants me to evaluate an expression. The expression involves division, multiplication and exponentiation. I need to remember the order of operations (PEMDAS/BODMAS) to get this right. Exponentiation first, then division and multiplication from left to right." 51 | } 52 | )}, 53 | {"role": "assistant", "content": json.dumps( 54 | { 55 | "step": "think", 56 | "content": "Alright, Let me evaluate this expression step by step.\n1. Evaluate the exponent: 3 to the power of 4 which is 3 * 3 * 3 * 3 = 81.\n2. Now the expression looks like this: 5 / 2 * 81.\n3. Perform division: 5 / 2 = 2.5.\n4. Now the expression looks like this: 2.5 * 81.\n5. Perform multiplication: 2.5 * 81 = 202.5." 57 | } 58 | )}, 59 | {"role": "assistant", "content": json.dumps( 60 | { 61 | "step": "output", 62 | "content": "202.5" 63 | } 64 | )}, 65 | {"role": "assistant", "content": json.dumps( 66 | { 67 | "step": "validate", 68 | "content": "The order of operations was correctly applied. Exponentiation, then division and multiplication from left to right. The calculations at each step are also correct. The final answer seems accurate." 69 | } 70 | )}, 71 | # Chain of Thought Ends Here 72 | ] 73 | ) 74 | 75 | """ 76 | { 77 | "step": "result", 78 | "content": "5 / 2 * 3 to the power of 4 is equal to 202.5. This is calculated by applying the order of operations: exponentiation, division, and multiplication." 79 | } 80 | """ 81 | 82 | print("\n\n🤖:", response.choices[0].message.content, "\n\n") 83 | -------------------------------------------------------------------------------- /05-agents/05-weather-agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import requests 4 | from openai import OpenAI 5 | 6 | api_key = os.getenv("GEMINI_API_KEY") 7 | 8 | client = OpenAI( 9 | api_key=api_key, 10 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 11 | ) 12 | 13 | 14 | def get_weather(city: str) -> str: 15 | url = f"https://wttr.in/{city}?format=%C+%t" 16 | response = requests.get(url) 17 | 18 | if response.status_code == 200: 19 | return f"The weather in {city} is {response.text}." 20 | else: 21 | return "Sorry, I couldn't get the weather data for the city" 22 | 23 | 24 | available_tools = { 25 | "get_weather": get_weather 26 | } 27 | 28 | SYSTEM_PROMPT = f""" 29 | You are a helpful AI Assistant who is specialized in resolving user query. 30 | You work on start, plan, action, observe mode. 31 | 32 | For the given user query and available tools, plan the step by step execution, based on the planning, 33 | select the relevant tool from the available tool. And based on the tool selected you perform an action to call the tool. 34 | 35 | Wait for the observation and based on the observation from the tool call resolve the user query. 36 | 37 | Rules: 38 | - Follow the Output JSON Format. 39 | - Always perform one step at a time and wait for the next input. 40 | - Carefully analyse the user query. 41 | 42 | Output JSON Format: 43 | {{ 44 | "step": "string", 45 | "content": "string", 46 | "function": "The name of function if the step is action", 47 | "input": "The input parameter for the function", 48 | "output": "The output of the function" 49 | }} 50 | 51 | Available Tools: 52 | - "get_weather": Takes a city name as input and returns the weather of the city. 53 | 54 | Example: 55 | Input: What is the weather in Hyderabad? 56 | Output: {{ "step": "plan", "content": "The user is interested in weather data of Hyderabad. So I will use the get_weather tool to get the weather data of Hyderabad." }} 57 | Output: {{ "step": "plan", "content": "From the available tools, I should call get_weather" }} 58 | Output: {{ "step": "action", "function": "get_weather", "input": "Hyderabad" }} 59 | Output: {{ "step": "observe", "content": "24 degrees C" }} 60 | Output: {{ "step": "output", "content": "The weather for Hyderabad seems to be 24 degrees C" }} 61 | """ 62 | 63 | messages = [ 64 | {"role": "system", "content": SYSTEM_PROMPT}, 65 | ] 66 | 67 | while True: 68 | query = input("> ") 69 | messages.append({"role": "user", "content": query}) 70 | 71 | while True: 72 | response = client.chat.completions.create( 73 | model="gemini-2.0-flash", 74 | response_format={"type": "json_object"}, 75 | messages=messages 76 | ) 77 | 78 | messages.append( 79 | {"role": "assistant", "content": response.choices[0].message.content}) 80 | parsed_response = json.loads(response.choices[0].message.content) 81 | 82 | if parsed_response.get("step") == "plan": 83 | print(f"🧠: {parsed_response.get("content")}") 84 | continue 85 | 86 | if parsed_response.get("step") == "action": 87 | tool_name = parsed_response.get("function") 88 | tool_input = parsed_response.get("input") 89 | 90 | print(f"🔨 Calling Tool: {tool_name} with input: {tool_input}") 91 | 92 | if available_tools.get(tool_name) != False: 93 | output = available_tools[tool_name](tool_input) 94 | messages.append({"role": "user", "content": json.dumps( 95 | {"step": "observe", "output": output})}) 96 | continue 97 | 98 | if parsed_response.get("step") == "output": 99 | print(f"🤖: {parsed_response.get("content")}") 100 | break 101 | -------------------------------------------------------------------------------- /05-agents/06-cursor-agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import requests 4 | from openai import OpenAI 5 | 6 | api_key = os.getenv("GEMINI_API_KEY") 7 | 8 | client = OpenAI( 9 | api_key=api_key, 10 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 11 | ) 12 | 13 | 14 | def run_command(cmd: str): 15 | result = os.system(cmd) 16 | return result 17 | 18 | 19 | def get_weather(city: str) -> str: 20 | url = f"https://wttr.in/{city}?format=%C+%t" 21 | response = requests.get(url) 22 | 23 | if response.status_code == 200: 24 | return f"The weather in {city} is {response.text}." 25 | else: 26 | return "Sorry, I couldn't get the weather data for the city" 27 | 28 | 29 | available_tools = { 30 | "get_weather": get_weather, 31 | "run_command": run_command 32 | } 33 | 34 | SYSTEM_PROMPT = f""" 35 | You are a helpful AI Assistant who is specialized in resolving user query. 36 | You work on start, plan, action, observe mode. 37 | 38 | For the given user query and available tools, plan the step by step execution, based on the planning, 39 | select the relevant tool from the available tool. And based on the tool selected you perform an action to call the tool. 40 | 41 | Wait for the observation and based on the observation from the tool call resolve the user query. 42 | 43 | Rules: 44 | - Follow the Output JSON Format. 45 | - Always perform one step at a time and wait for the next input. 46 | - Carefully analyse the user query. 47 | 48 | Output JSON Format: 49 | {{ 50 | "step": "string", 51 | "content": "string", 52 | "function": "The name of function if the step is action", 53 | "input": "The input parameter for the function", 54 | "output": "The output of the function" 55 | }} 56 | 57 | Available Tools: 58 | - "get_weather": Takes a city name as input and returns the weather of the city. 59 | - "run_command": Takes linux command as string and executes the command and returns the output after executing it. 60 | 61 | Example: 62 | Input: What is the weather in Hyderabad? 63 | Output: {{ "step": "plan", "content": "The user is interested in weather data of Hyderabad. So I will use the get_weather tool to get the weather data of Hyderabad." }} 64 | Output: {{ "step": "plan", "content": "From the available tools, I should call get_weather" }} 65 | Output: {{ "step": "action", "function": "get_weather", "input": "Hyderabad" }} 66 | Output: {{ "step": "observe", "content": "24 degrees C" }} 67 | Output: {{ "step": "output", "content": "The weather for Hyderabad seems to be 24 degrees C" }} 68 | """ 69 | 70 | messages = [ 71 | {"role": "system", "content": SYSTEM_PROMPT}, 72 | ] 73 | 74 | while True: 75 | query = input("> ") 76 | messages.append({"role": "user", "content": query}) 77 | 78 | while True: 79 | response = client.chat.completions.create( 80 | model="gemini-2.0-flash", 81 | response_format={"type": "json_object"}, 82 | messages=messages 83 | ) 84 | 85 | messages.append( 86 | {"role": "assistant", "content": response.choices[0].message.content}) 87 | parsed_response = json.loads(response.choices[0].message.content) 88 | 89 | if parsed_response.get("step") == "plan": 90 | print(f"🧠: {parsed_response.get("content")}") 91 | continue 92 | 93 | if parsed_response.get("step") == "action": 94 | tool_name = parsed_response.get("function") 95 | tool_input = parsed_response.get("input") 96 | 97 | print(f"🔨 Calling Tool: {tool_name} with input: {tool_input}") 98 | 99 | if available_tools.get(tool_name) != False: 100 | output = available_tools[tool_name](tool_input) 101 | messages.append({"role": "user", "content": json.dumps( 102 | {"step": "observe", "output": output})}) 103 | continue 104 | 105 | if parsed_response.get("step") == "output": 106 | print(f"🤖: {parsed_response.get("content")}") 107 | break 108 | -------------------------------------------------------------------------------- /04-prompting/04-chain-of-thought.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from openai import OpenAI 4 | 5 | api_key = os.getenv("GEMINI_API_KEY") 6 | 7 | client = OpenAI( 8 | api_key=api_key, 9 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 10 | ) 11 | 12 | # Chain of Thought: The model is encouraged to break down reasoning step by step before arriving at an answer. 13 | 14 | SYSTEM_PROMPT = """ 15 | You are an helpful AI assistant who is specialized in resolving user query. 16 | For the given input, analyse the input and break down the problem step by step. 17 | 18 | The steps are: you get a user input, you analyse, you think, you think again, and think for several times and then return the output with an explanation. 19 | 20 | Follow the steps in sequence that is "analyse", "think", "output", "validate" and finally "result". 21 | 22 | Rules: 23 | 1. Follow the strict JSON output as per schema. 24 | 2. Always perform one step at a time and wait for the next input. 25 | 3. Carefully analyse the user query. 26 | 27 | Output Format: 28 | {{ "step": "string", "content": "string" }} 29 | 30 | Example: 31 | Input: What is 2 + 2? 32 | Output: {{ "step": "analyse", "content": "Alright! The user is interested in maths query and user is asking a basic arithmetic operation." }} 33 | Output: {{ "step": "think", "content": "To perform this addition, I must go from left to right and add all the operands." }} 34 | Output: {{ "step": "output", "content": "4" }} 35 | Output: {{ "step": "validate", "content": "Seems like 4 is correct answer for 2 + 2" }} 36 | Output: {{ "step": "result", "content": "2 + 2 = 4 and this is calculated by adding all numbers" }} 37 | 38 | Example: 39 | Input: What is 2 + 2 * 5 / 3 40 | Output: {{ "step": "analyse", "content": "Alright! The user is interested in maths query and user is asking a basic arithmetic operations." }} 41 | Output: {{ "step": "think", "content": "To perform this addition, I must use BODMAS rule." }} 42 | Output: {{ "step": "validate", "content": "Correct! Using BODMAS is the right approach here." }} 43 | Output: {{ "step": "think", "content": "First I need to solve division that is 5 / 3 which gives 1.6666666666666667" }} 44 | Output: {{ "step": "validate", "content": "Correct, using BODMAS the divison must be performed" }} 45 | Output: {{ "step": "think", "content": "Now as I have already solved 5 / 3 now the equation looks like 2 + 2 * 1.6666666666666667" }} 46 | Output: {{ "step": "validate", "content": "Yes, The new equation is absolutely correct" }} 47 | Output: {{ "step": "think", "content": "The equation now is 3.3333333333" }} 48 | and so on... 49 | """ 50 | 51 | messages = [ 52 | {"role": "system", "content": SYSTEM_PROMPT}, 53 | ] 54 | 55 | query = input("> ") 56 | 57 | messages.append({"role": "user", "content": query}) 58 | 59 | while True: 60 | response = client.chat.completions.create( 61 | model="gemini-2.0-flash", 62 | response_format={"type": "json_object"}, 63 | messages=messages 64 | ) 65 | 66 | messages.append( 67 | {"role": "assistant", "content": response.choices[0].message.content}) 68 | 69 | parsed_response = json.loads(response.choices[0].message.content) 70 | 71 | if parsed_response.get("step") != "result": 72 | print("\t\t🧠:", parsed_response.get("content"), "\n") 73 | continue 74 | 75 | print("\n\n🤖:", parsed_response.get("content"), "\n\n") 76 | break 77 | 78 | """ 79 | > what is the average of sum of 2 and 3 multiplied by 5 divided by 2 80 | 81 | 🧠: The user wants to calculate the average of a series of arithmetic operations: adding 2 and 3, multiplying the sum by 5, and then dividing the result by 2. 82 | 83 | 🧠: To solve this, I need to follow the order of operations (PEMDAS/BODMAS). First, I'll calculate the sum of 2 and 3. Then, I'll multiply the sum by 5. Finally, I'll divide the result by 2. Since the user asks for the average of the result, and only one number will be left, I will return the result itself. 84 | 85 | 🧠: 12.5 86 | 87 | 🧠: Let's verify the calculations: 2 + 3 = 5. 5 * 5 = 25. 25 / 2 = 12.5. The result seems correct. 88 | 89 | 🤖: The average of the sum of 2 and 3 multiplied by 5, then divided by 2 is 12.5. This is calculated as follows: (2 + 3) * 5 / 2 = 12.5 90 | """ 91 | 92 | # Improved system prompt with chain of thought 93 | 94 | """ 95 | > 2 * 5 / 3 + 4 96 | 🧠: The user wants me to evaluate the expression 2 * 5 / 3 + 4. This involves multiplication, division, and addition. I need to follow the order of operations (PEMDAS/BODMAS). 97 | 🧠: According to the order of operations, multiplication and division have the same precedence, so I'll perform them from left to right. Then I'll do the addition. 98 | 🧠: First, calculate 2 * 5 which equals 10. Then, calculate 10 / 3 which equals 3.3333333333333335. Finally, calculate 3.3333333333333335 + 4 which equals 7.333333333333333. 99 | 🧠: The steps are correct and the final result is accurate. Multiplication and division were performed from left to right before addition. 100 | 101 | 🤖: 2 * 5 / 3 + 4 = 7.333333333333333. This is calculated by first multiplying 2 by 5, then dividing the result by 3, and finally adding 4. 102 | """ 103 | -------------------------------------------------------------------------------- /12-human-in-loop/02-support-assistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | from typing import Annotated 5 | from typing_extensions import TypedDict 6 | 7 | from langgraph.graph.message import add_messages 8 | 9 | from langchain.chat_models import init_chat_model 10 | from langchain.tools import tool 11 | 12 | from langgraph.types import Command, interrupt 13 | from langgraph.prebuilt import ToolNode, tools_condition 14 | from langgraph.graph import StateGraph, START, END 15 | from langgraph.checkpoint.mongodb import MongoDBSaver 16 | 17 | from pymongo import MongoClient 18 | 19 | api_key = os.getenv("GEMINI_API_KEY") 20 | 21 | 22 | def append_admin_message(config, solution): 23 | client = MongoClient("mongodb://admin:admin@localhost:27017") 24 | db = client["checkpointing_db"] 25 | collection = db["checkpoints"] 26 | 27 | thread_id = config["configurable"]["thread_id"] 28 | 29 | collection.update_one( 30 | {"configurable.thread_id": thread_id}, 31 | {"$push": {"messages": {"role": "assistant", "content": solution}}} 32 | ) 33 | 34 | 35 | @tool() 36 | def human_assistance(query: str) -> str: 37 | """ 38 | Request assistance from a human. 39 | """ 40 | 41 | # This saves the state in DB and kills the graph execution. 42 | human_response = interrupt({"query": query}) 43 | return human_response["data"] 44 | 45 | 46 | tools = [human_assistance] 47 | 48 | 49 | class State(TypedDict): 50 | messages: Annotated[list, add_messages] 51 | 52 | 53 | llm = init_chat_model( 54 | model_provider="google_genai", 55 | model="gemini-2.0-flash", 56 | api_key=api_key, 57 | ) 58 | llm_with_tools = llm.bind_tools(tools=tools) 59 | 60 | 61 | def chatbot(state: State): 62 | message = llm_with_tools.invoke(state["messages"]) 63 | return {"messages": [message]} 64 | 65 | 66 | tool_node = ToolNode(tools=tools) 67 | 68 | graph_builder = StateGraph(State) 69 | 70 | graph_builder.add_node("chatbot", chatbot) 71 | graph_builder.add_node("tools", tool_node) 72 | 73 | graph_builder.add_edge(START, "chatbot") 74 | graph_builder.add_conditional_edges( 75 | "chatbot", 76 | tools_condition, 77 | ) 78 | graph_builder.add_edge("tools", "chatbot") 79 | graph_builder.add_edge("chatbot", END) 80 | 81 | 82 | def create_chat_graph(checkpointer): 83 | return graph_builder.compile(checkpointer=checkpointer) 84 | 85 | 86 | def user_chat(): 87 | # MongoDB connection details 88 | DB_URI = "mongodb://admin:admin@localhost:27017" 89 | 90 | # Config for thread_id 91 | config = { 92 | "configurable": { 93 | "thread_id": "9", 94 | } 95 | } 96 | 97 | with MongoDBSaver.from_conn_string(DB_URI) as mongo_checkpointer: 98 | graph_with_checkpointer = create_chat_graph(mongo_checkpointer) 99 | 100 | while True: 101 | user_input = input("> ") 102 | 103 | state = State( 104 | messages=[ 105 | { 106 | "role": "user", 107 | "content": user_input, 108 | } 109 | ] 110 | ) 111 | 112 | for event in graph_with_checkpointer.stream(state, config, stream_mode="values"): 113 | if "messages" in event: 114 | event["messages"][-1].pretty_print() 115 | 116 | 117 | # user_chat() 118 | 119 | 120 | def admin_call(): 121 | # MongoDB connection details 122 | DB_URI = "mongodb://admin:admin@localhost:27017" 123 | 124 | # Config for thread_id 125 | config = { 126 | "configurable": { 127 | "thread_id": "9", 128 | } 129 | } 130 | 131 | with MongoDBSaver.from_conn_string(DB_URI) as mongo_checkpointer: 132 | graph_with_checkpointer = create_chat_graph(mongo_checkpointer) 133 | 134 | state = graph_with_checkpointer.get_state(config=config) 135 | last_message = state.values['messages'][-1] 136 | 137 | tool_calls = getattr(last_message, "tool_calls", []) 138 | 139 | user_query = None 140 | 141 | for call in tool_calls: 142 | if call.get("function", {}).get("name") == "human_assistance" or call.get("name") == "human_assistance": 143 | args_json = None 144 | if call.get("function", {}).get("arguments"): 145 | args_json = call["function"]["arguments"] 146 | elif call.get("args"): 147 | args_json = call["args"] 148 | 149 | if isinstance(args_json, dict): 150 | user_query = args_json.get("query") 151 | elif isinstance(args_json, str): 152 | try: 153 | args_dict = json.loads(args_json) 154 | user_query = args_dict.get("query") 155 | except Exception: 156 | print("Failed to decode tool arguments.") 157 | user_query = None 158 | else: 159 | user_query = None 160 | 161 | print("User has a query:", user_query) 162 | solution = input("> ") 163 | 164 | resume_command = Command(resume={"data": solution}) 165 | 166 | for event in graph_with_checkpointer.stream(resume_command, config, stream_mode="values"): 167 | if "messages" in event: 168 | event["messages"][-1].pretty_print() 169 | 170 | append_admin_message(config, solution) 171 | 172 | 173 | # admin_call() 174 | user_chat() 175 | -------------------------------------------------------------------------------- /10-streaming/01-code-judge.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing_extensions import TypedDict 4 | from typing import Literal 5 | from langgraph.graph import StateGraph, START, END 6 | 7 | from pydantic import BaseModel 8 | 9 | from openai import OpenAI 10 | 11 | api_key = os.getenv("GEMINI_API_KEY") 12 | 13 | client = OpenAI( 14 | api_key=api_key, 15 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 16 | ) 17 | 18 | 19 | class State(TypedDict): 20 | user_query: str 21 | llm_result: str | None 22 | accuracy_percentage: str | None 23 | is_coding_question: bool | None 24 | 25 | 26 | class ClassifyMessageResponse(BaseModel): 27 | is_coding_question: bool 28 | 29 | 30 | class CodeAccuracyResponse(BaseModel): 31 | accuracy_percentage: str 32 | 33 | 34 | def classify_message(state: State): 35 | print("⚠️ Classifying message...") 36 | 37 | # Read user message from the state 38 | query = state['user_query'] 39 | 40 | # OpenAI LLM call for classification 41 | SYSTEM_PROMPT = """ 42 | You are an AI assistant whose job is to detect if the user's query is related 43 | to coding question or not. 44 | 45 | Return the response in specified JSON boolean only. 46 | """ 47 | 48 | # Structured Output / Responses 49 | 50 | response = client.beta.chat.completions.parse( 51 | model="gemini-2.0-flash", 52 | response_format=ClassifyMessageResponse, 53 | messages=[ 54 | {"role": "system", "content": SYSTEM_PROMPT}, 55 | {"role": "user", "content": query} 56 | ] 57 | ) 58 | 59 | # Extract the classification result 60 | is_coding_question = response.choices[0].message.parsed.is_coding_question 61 | 62 | # Update the state with the classification result 63 | state['is_coding_question'] = is_coding_question 64 | 65 | return state 66 | 67 | 68 | def route_query(state: State) -> Literal["general_query", "coding_query"]: 69 | print("🔄 Routing query...") 70 | 71 | is_coding = state['is_coding_question'] 72 | 73 | if is_coding: 74 | return "coding_query" 75 | 76 | return "general_query" 77 | 78 | 79 | def general_query(state: State): 80 | print("💬 Handling general query...") 81 | 82 | query = state['user_query'] 83 | 84 | # To use OpenAI mini model or equivalent in Gemini 85 | response = client.chat.completions.create( 86 | model="gemini-2.0-flash", 87 | messages=[ 88 | {"role": "user", "content": query}, 89 | ] 90 | ) 91 | 92 | result = response.choices[0].message.content 93 | 94 | state['llm_result'] = result 95 | 96 | return state 97 | 98 | 99 | def coding_query(state: State): 100 | print("💻 Handling coding query...") 101 | 102 | query = state['user_query'] 103 | 104 | SYSTEM_PROMPT = """ 105 | You are an AI assistant whose job is to answer coding questions. 106 | You will be given a coding question and you need to provide a detailed answer. 107 | """ 108 | 109 | # To use OpenAI 4.1 model or equivalent in Gemini 110 | response = client.chat.completions.create( 111 | model="gemini-2.0-flash", 112 | messages=[ 113 | {"role": "system", "content": SYSTEM_PROMPT}, 114 | {"role": "user", "content": query}, 115 | ] 116 | ) 117 | 118 | result = response.choices[0].message.content 119 | 120 | state['llm_result'] = result 121 | 122 | return state 123 | 124 | 125 | def coding_validate_query(state: State): 126 | print("✅ Validating coding query...") 127 | 128 | query = state['user_query'] 129 | llm_result = state['llm_result'] 130 | 131 | SYSTEM_PROMPT = f""" 132 | You are an expert in calculating the accuracy of a code according to the question. 133 | Return the percentage of accuracy of the code. 134 | 135 | User Query: {query} 136 | Code: {llm_result} 137 | """ 138 | 139 | # To use Gemini or Claude 140 | response = client.beta.chat.completions.parse( 141 | model="gemini-2.0-flash", 142 | response_format=CodeAccuracyResponse, 143 | messages=[ 144 | {"role": "system", "content": SYSTEM_PROMPT}, 145 | {"role": "user", "content": query}, 146 | ] 147 | ) 148 | 149 | accuracy = response.choices[0].message.parsed.accuracy_percentage 150 | 151 | state['accuracy_percentage'] = accuracy 152 | 153 | return state 154 | 155 | 156 | graph_builder = StateGraph(State) 157 | 158 | # Define the nodes in the graph 159 | graph_builder.add_node("classify_message", classify_message) 160 | graph_builder.add_node("route_query", route_query) 161 | graph_builder.add_node("general_query", general_query) 162 | graph_builder.add_node("coding_query", coding_query) 163 | graph_builder.add_node("coding_validate_query", coding_validate_query) 164 | 165 | # Add the edges to connect the nodes 166 | graph_builder.add_edge(START, "classify_message") 167 | graph_builder.add_conditional_edges("classify_message", route_query) 168 | graph_builder.add_edge("general_query", END) 169 | graph_builder.add_edge("coding_query", "coding_validate_query") 170 | graph_builder.add_edge("coding_validate_query", END) 171 | 172 | graph = graph_builder.compile() 173 | 174 | 175 | def main(): 176 | user = input("> ") 177 | 178 | # Invoke the graph 179 | _state: State = { 180 | "user_query": user, 181 | "accuracy_percentage": None, 182 | "is_coding_question": False, 183 | "llm_result": None 184 | } 185 | 186 | for event in graph.stream(_state): 187 | print("Event: ", event) 188 | 189 | 190 | main() 191 | -------------------------------------------------------------------------------- /08-langgraph/03-code-graph-router.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing_extensions import TypedDict 4 | from typing import Literal 5 | from langgraph.graph import StateGraph, START, END 6 | 7 | from pydantic import BaseModel 8 | 9 | from openai import OpenAI 10 | 11 | api_key = os.getenv("GEMINI_API_KEY") 12 | 13 | client = OpenAI( 14 | api_key=api_key, 15 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/" 16 | ) 17 | 18 | 19 | class State(TypedDict): 20 | user_query: str 21 | llm_result: str | None 22 | accuracy_percentage: str | None 23 | is_coding_question: bool | None 24 | 25 | 26 | class ClassifyMessageResponse(BaseModel): 27 | is_coding_question: bool 28 | 29 | 30 | class CodeAccuracyResponse(BaseModel): 31 | accuracy_percentage: str 32 | 33 | 34 | def classify_message(state: State): 35 | print("⚠️ Classifying message...") 36 | 37 | # Read user message from the state 38 | query = state['user_query'] 39 | 40 | # OpenAI LLM call for classification 41 | SYSTEM_PROMPT = """ 42 | You are an AI assistant whose job is to detect if the user's query is related 43 | to coding question or not. 44 | 45 | Return the response in specified JSON boolean only. 46 | """ 47 | 48 | # Structured Output / Responses 49 | 50 | response = client.beta.chat.completions.parse( 51 | model="gemini-2.0-flash", 52 | response_format=ClassifyMessageResponse, 53 | messages=[ 54 | {"role": "system", "content": SYSTEM_PROMPT}, 55 | {"role": "user", "content": query} 56 | ] 57 | ) 58 | 59 | # Extract the classification result 60 | is_coding_question = response.choices[0].message.parsed.is_coding_question 61 | 62 | # Update the state with the classification result 63 | state['is_coding_question'] = is_coding_question 64 | 65 | return state 66 | 67 | 68 | def route_query(state: State) -> Literal["general_query", "coding_query"]: 69 | print("🔄 Routing query...") 70 | 71 | is_coding = state['is_coding_question'] 72 | 73 | if is_coding: 74 | return "coding_query" 75 | 76 | return "general_query" 77 | 78 | 79 | def general_query(state: State): 80 | print("💬 Handling general query...") 81 | 82 | query = state['user_query'] 83 | 84 | # To use OpenAI mini model or equivalent in Gemini 85 | response = client.chat.completions.create( 86 | model="gemini-2.0-flash", 87 | messages=[ 88 | {"role": "user", "content": query}, 89 | ] 90 | ) 91 | 92 | result = response.choices[0].message.content 93 | 94 | state['llm_result'] = result 95 | 96 | return state 97 | 98 | 99 | def coding_query(state: State): 100 | print("💻 Handling coding query...") 101 | 102 | query = state['user_query'] 103 | 104 | SYSTEM_PROMPT = """ 105 | You are an AI assistant whose job is to answer coding questions. 106 | You will be given a coding question and you need to provide a detailed answer. 107 | """ 108 | 109 | # To use OpenAI 4.1 model or equivalent in Gemini 110 | response = client.chat.completions.create( 111 | model="gemini-2.0-flash", 112 | messages=[ 113 | {"role": "system", "content": SYSTEM_PROMPT}, 114 | {"role": "user", "content": query}, 115 | ] 116 | ) 117 | 118 | result = response.choices[0].message.content 119 | 120 | state['llm_result'] = result 121 | 122 | return state 123 | 124 | 125 | def coding_validate_query(state: State): 126 | print("✅ Validating coding query...") 127 | 128 | query = state['user_query'] 129 | llm_result = state['llm_result'] 130 | 131 | SYSTEM_PROMPT = f""" 132 | You are an expert in calculating the accuracy of a code according to the question. 133 | Return the percentage of accuracy of the code. 134 | 135 | User Query: {query} 136 | Code: {llm_result} 137 | """ 138 | 139 | # To use Gemini or Claude 140 | response = client.beta.chat.completions.parse( 141 | model="gemini-2.0-flash", 142 | response_format=CodeAccuracyResponse, 143 | messages=[ 144 | {"role": "system", "content": SYSTEM_PROMPT}, 145 | {"role": "user", "content": query}, 146 | ] 147 | ) 148 | 149 | accuracy = response.choices[0].message.parsed.accuracy_percentage 150 | 151 | state['accuracy_percentage'] = accuracy 152 | 153 | return state 154 | 155 | 156 | graph_builder = StateGraph(State) 157 | 158 | # Define the nodes in the graph 159 | graph_builder.add_node("classify_message", classify_message) 160 | graph_builder.add_node("route_query", route_query) 161 | graph_builder.add_node("general_query", general_query) 162 | graph_builder.add_node("coding_query", coding_query) 163 | graph_builder.add_node("coding_validate_query", coding_validate_query) 164 | 165 | # Add the edges to connect the nodes 166 | graph_builder.add_edge(START, "classify_message") 167 | graph_builder.add_conditional_edges("classify_message", route_query) 168 | graph_builder.add_edge("general_query", END) 169 | graph_builder.add_edge("coding_query", "coding_validate_query") 170 | graph_builder.add_edge("coding_validate_query", END) 171 | 172 | graph = graph_builder.compile() 173 | 174 | 175 | def main(): 176 | user = input("> ") 177 | 178 | # Invoke the graph 179 | _state: State = { 180 | "user_query": user, 181 | "accuracy_percentage": None, 182 | "is_coding_question": False, 183 | "llm_result": None 184 | } 185 | 186 | graph_result = graph.invoke(_state) 187 | 188 | print("graph_result: ", graph_result) 189 | 190 | 191 | main() 192 | --------------------------------------------------------------------------------