├── LICENSE ├── Langchian Fundamentals ├── 02 Retrieval Augmented Generation │ ├── 02 Document Loaders and Splitters │ │ └── document_loaders_splitters.py │ ├── 01 Vector Stores │ │ └── vector_stores.py │ ├── 03 RAG Pipeline │ │ └── rag_pipeline.py │ ├── 04 Embedding Models │ │ └── embedding_models.py │ └── README.md ├── 01 Core Components │ ├── 02 Prompts │ │ └── prompts.py │ ├── 03 Memory │ │ └── memory.py │ ├── 04 Document Loaders │ │ └── document_loaders.py │ ├── 01 Chains │ │ └── chains.py │ └── README.md ├── 03 AI Agents │ ├── 02 Agent Types │ │ └── agent_types.py │ ├── 03 Agent Reasoning │ │ └── agent_reasoning.py │ ├── 04 Custom Agents │ │ └── custom_agents.py │ ├── 01 Tool Integration │ │ └── tool_integration.py │ └── README.md ├── 04 Advanced Features │ ├── 04 Integration with APIs │ │ └── api_integration.py │ ├── 02 Evaluation Metrics │ │ └── evaluation_metrics.py │ ├── 03 Agent Optimization │ │ └── agent_optimization.py │ ├── 01 Custom Chains │ │ └── custom_chains.py │ └── README.md └── 05 Retail Applications │ ├── 02 Recommendation Systems │ └── recommendation_systems.py │ ├── 03 Review Analysis │ └── review_analysis.py │ ├── 01 Chatbots │ └── chatbots.py │ ├── 04 Query Answering │ └── query_answering.py │ └── README.md ├── README.md └── Langchain Interview Questions └── README.md /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 rohanmistry231 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Langchian Fundamentals/02 Retrieval Augmented Generation/02 Document Loaders and Splitters/document_loaders_splitters.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Document Loaders and Splitters] 2 | # Learn to process and split retail documents with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib pandas nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.text_splitter import CharacterTextSplitter 8 | import numpy as np 9 | import nltk 10 | 11 | def run_document_loaders_splitters_demo(): 12 | # %% [2. Synthetic Retail Document Data] 13 | document = Document( 14 | page_content="TechCorp Laptop Manual: The laptop features a 16GB RAM, Intel i7 processor, and 512GB SSD. It has a vibrant 15-inch display and a 10-hour battery life. Ideal for professionals and gamers.", 15 | metadata={"product": "TechCorp Laptop"} 16 | ) 17 | print("Synthetic Data: Retail document created") 18 | print(f"Document: {document.metadata['product']} - {document.page_content[:50]}...") 19 | 20 | # %% [3. Document Splitting] 21 | text_splitter = CharacterTextSplitter(chunk_size=50, chunk_overlap=10, separator=".") 22 | chunks = text_splitter.split_documents([document]) 23 | print("Document Splitter: Document chunks created") 24 | for i, chunk in enumerate(chunks): 25 | print(f"Chunk {i+1}: {chunk.page_content}") 26 | 27 | # %% [4. Visualization] 28 | chunk_lengths = [len(nltk.word_tokenize(chunk.page_content)) for chunk in chunks] 29 | plt.figure(figsize=(8, 4)) 30 | plt.bar(range(1, len(chunks) + 1), chunk_lengths, color='green') 31 | plt.title("Document Chunk Lengths") 32 | plt.xlabel("Chunk") 33 | plt.ylabel("Word Count") 34 | plt.savefig("document_loaders_splitters_output.png") 35 | print("Visualization: Chunk lengths saved as document_loaders_splitters_output.png") 36 | 37 | # %% [5. Interview Scenario: Document Loaders and Splitters] 38 | """ 39 | Interview Scenario: Document Loaders and Splitters 40 | Q: How do document splitters optimize RAG? 41 | A: Splitters break large documents into smaller chunks for efficient retrieval and processing by LLMs. 42 | Key: Balances chunk size with context retention. 43 | Example: CharacterTextSplitter(chunk_size=50, chunk_overlap=10) 44 | """ 45 | 46 | # Execute the demo 47 | if __name__ == "__main__": 48 | nltk.download('punkt', quiet=True) 49 | run_document_loaders_splitters_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/01 Core Components/02 Prompts/prompts.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Prompts] 2 | # Learn dynamic prompt engineering for retail queries with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.prompts import PromptTemplate 7 | from langchain.chains import LLMChain 8 | from langchain.llms import OpenAI 9 | import numpy as np 10 | import nltk 11 | 12 | def run_prompts_demo(): 13 | # %% [2. Synthetic Retail Query Data] 14 | queries = [ 15 | {"product": "TechCorp laptop", "question": "What are its features?"}, 16 | {"product": "TechCorp smartphone", "question": "How long is the battery life?"}, 17 | {"product": "TechCorp tablet", "question": "Is it good for students?"} 18 | ] 19 | print("Synthetic Data: Retail customer queries created") 20 | print(f"Queries: {queries}") 21 | 22 | # %% [3. Dynamic Prompt Engineering] 23 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 24 | prompt = PromptTemplate( 25 | input_variables=["product", "question"], 26 | template="You are a retail assistant. For the product {product}, answer: {question}" 27 | ) 28 | chain = LLMChain(llm=llm, prompt=prompt) 29 | 30 | responses = [chain.run(product=query["product"], question=query["question"]) for query in queries] 31 | print("Dynamic Prompts: Responses generated") 32 | for i, (query, response) in enumerate(zip(queries, responses)): 33 | print(f"Query {i+1}: {query['product']} - {query['question']}") 34 | print(f"Response: {response.strip()}") 35 | 36 | # %% [4. Visualization] 37 | response_lengths = [len(nltk.word_tokenize(resp)) for resp in responses] 38 | plt.figure(figsize=(8, 4)) 39 | plt.bar(range(1, len(queries) + 1), response_lengths, color='blue') 40 | plt.title("Prompt Response Lengths") 41 | plt.xlabel("Query") 42 | plt.ylabel("Word Count") 43 | plt.savefig("prompts_output.png") 44 | print("Visualization: Response lengths saved as prompts_output.png") 45 | 46 | # %% [5. Interview Scenario: Prompts] 47 | """ 48 | Interview Scenario: Prompts 49 | Q: What’s the role of prompt engineering in LangChain? 50 | A: Prompt engineering designs structured inputs to guide LLM responses, using templates for dynamic, context-specific queries. 51 | Key: Improves response relevance and consistency. 52 | Example: PromptTemplate(input_variables=["product", "question"], template="...") 53 | """ 54 | 55 | # Execute the demo 56 | if __name__ == "__main__": 57 | nltk.download('punkt', quiet=True) 58 | run_prompts_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/02 Retrieval Augmented Generation/01 Vector Stores/vector_stores.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Vector Stores] 2 | # Learn Faiss-based document retrieval for retail applications with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai faiss-cpu numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.vectorstores import FAISS 8 | from langchain.embeddings import OpenAIEmbeddings 9 | from sklearn.metrics.pairwise import cosine_similarity 10 | import numpy as np 11 | 12 | def run_vector_stores_demo(): 13 | # %% [2. Synthetic Retail Document Data] 14 | documents = [ 15 | Document(page_content="TechCorp Laptop: 16GB RAM, Intel i7, 512GB SSD.", metadata={"product": "Laptop"}), 16 | Document(page_content="TechCorp Smartphone: Long battery, vibrant display.", metadata={"product": "Smartphone"}), 17 | Document(page_content="TechCorp Tablet: Lightweight, 10-hour battery.", metadata={"product": "Tablet"}) 18 | ] 19 | query = "Find a laptop with good performance." 20 | print("Synthetic Data: Retail documents and query created") 21 | print(f"Documents: {[doc.metadata['product'] for doc in documents]}") 22 | print(f"Query: {query}") 23 | 24 | # %% [3. Faiss Vector Store] 25 | embeddings = OpenAIEmbeddings(api_key="your-openai-api-key") # Replace with your OpenAI API key 26 | vector_store = FAISS.from_documents(documents, embeddings) 27 | retrieved_docs = vector_store.similarity_search(query, k=2) 28 | print("Vector Store: Documents retrieved") 29 | for i, doc in enumerate(retrieved_docs): 30 | print(f"Retrieved {i+1}: {doc.metadata['product']} - {doc.page_content}") 31 | 32 | # %% [4. Visualization] 33 | query_embedding = embeddings.embed_query(query) 34 | doc_embeddings = [embeddings.embed_query(doc.page_content) for doc in documents] 35 | similarities = [cosine_similarity([query_embedding], [emb])[0][0] for emb in doc_embeddings] 36 | 37 | plt.figure(figsize=(8, 4)) 38 | plt.bar([doc.metadata['product'] for doc in documents], similarities, color='blue') 39 | plt.title("Document Similarity to Query") 40 | plt.xlabel("Product") 41 | plt.ylabel("Cosine Similarity") 42 | plt.savefig("vector_stores_output.png") 43 | print("Visualization: Document similarities saved as vector_stores_output.png") 44 | 45 | # %% [5. Interview Scenario: Vector Stores] 46 | """ 47 | Interview Scenario: Vector Stores 48 | Q: What’s the role of vector stores in RAG? 49 | A: Vector stores index document embeddings for efficient similarity-based retrieval, enhancing LLM responses. 50 | Key: Faiss enables fast nearest-neighbor search. 51 | Example: FAISS.from_documents(documents, embeddings) 52 | """ 53 | 54 | # Execute the demo 55 | if __name__ == "__main__": 56 | run_vector_stores_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/03 AI Agents/02 Agent Types/agent_types.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Agent Types] 2 | # Learn reactive, planning, and ReAct agents for retail tasks with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.agents import initialize_agent, Tool 7 | from langchain.llms import OpenAI 8 | from collections import Counter 9 | import numpy as np 10 | 11 | def run_agent_types_demo(): 12 | # %% [2. Synthetic Retail Query Data] 13 | query = "Handle a customer request for TechCorp laptop stock and discount." 14 | print("Synthetic Data: Retail query created") 15 | print(f"Query: {query}") 16 | 17 | # %% [3. Agent Types Comparison] 18 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 19 | 20 | def mock_stock_check(product): 21 | return f"Stock for {product}: 10 units." 22 | 23 | def mock_discount_calculator(product): 24 | return f"Discount for {product}: 15% off." 25 | 26 | tools = [ 27 | Tool(name="StockCheck", func=mock_stock_check, description="Check product stock"), 28 | Tool(name="DiscountCalculator", func=mock_discount_calculator, description="Calculate product discount") 29 | ] 30 | 31 | # Reactive Agent (Zero-Shot ReAct) 32 | reactive_agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False) 33 | reactive_response = reactive_agent.run(query) 34 | 35 | # Planning Agent (Simulated with more steps) 36 | planning_agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False) 37 | planning_response = planning_agent.run(f"Plan and execute: {query}") 38 | 39 | print("Agent Types: Responses generated") 40 | print(f"Reactive Agent Response: {reactive_response}") 41 | print(f"Planning Agent Response: {planning_response}") 42 | 43 | # %% [4. Visualization] 44 | response_lengths = [ 45 | len(reactive_response.split()), 46 | len(planning_response.split()) 47 | ] 48 | plt.figure(figsize=(8, 4)) 49 | plt.bar(['Reactive', 'Planning'], response_lengths, color=['blue', 'green']) 50 | plt.title("Agent Response Lengths by Type") 51 | plt.xlabel("Agent Type") 52 | plt.ylabel("Word Count") 53 | plt.savefig("agent_types_output.png") 54 | print("Visualization: Response lengths saved as agent_types_output.png") 55 | 56 | # %% [5. Interview Scenario: Agent Types] 57 | """ 58 | Interview Scenario: Agent Types 59 | Q: What’s the difference between reactive and planning agents? 60 | A: Reactive agents respond directly to queries, while planning agents break tasks into steps for complex scenarios. 61 | Key: ReAct combines reasoning and action. 62 | Example: initialize_agent(tools, llm, agent="zero-shot-react-description") 63 | """ 64 | 65 | # Execute the demo 66 | if __name__ == "__main__": 67 | run_agent_types_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/03 AI Agents/03 Agent Reasoning/agent_reasoning.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Agent Reasoning] 2 | # Learn autonomous decision-making for customer support with LangChain agents. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.agents import initialize_agent, Tool 7 | from langchain.llms import OpenAI 8 | from collections import Counter 9 | import numpy as np 10 | 11 | def run_agent_reasoning_demo(): 12 | # %% [2. Synthetic Retail Query Data] 13 | queries = [ 14 | "Customer asks if TechCorp laptop is in stock.", 15 | "Customer wants a discount on TechCorp smartphone.", 16 | "Customer needs help with TechCorp tablet warranty." 17 | ] 18 | print("Synthetic Data: Retail customer queries created") 19 | print(f"Queries: {queries}") 20 | 21 | # %% [3. Agent Reasoning] 22 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 23 | 24 | def mock_stock_check(query): 25 | return "Stock: 10 units available." 26 | 27 | def mock_discount_offer(query): 28 | return "Offer: 15% discount applied." 29 | 30 | def mock_warranty_info(query): 31 | return "Warranty: 1-year coverage." 32 | 33 | tools = [ 34 | Tool(name="StockCheck", func=mock_stock_check, description="Check product stock"), 35 | Tool(name="DiscountOffer", func=mock_discount_offer, description="Offer a discount"), 36 | Tool(name="WarrantyInfo", func=mock_warranty_info, description="Provide warranty details") 37 | ] 38 | 39 | agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False) 40 | responses = [agent.run(query) for query in queries] 41 | print("Agent Reasoning: Responses generated") 42 | for i, (query, response) in enumerate(zip(queries, responses)): 43 | print(f"Query {i+1}: {query}") 44 | print(f"Response: {response}") 45 | 46 | # %% [4. Visualization] 47 | tool_calls = [response.split(":")[0].split()[-1] for response in responses] 48 | tool_counts = Counter(tool_calls) 49 | 50 | plt.figure(figsize=(8, 4)) 51 | plt.bar(tool_counts.keys(), tool_counts.values(), color='purple') 52 | plt.title("Agent Decision Tool Usage") 53 | plt.xlabel("Tool") 54 | plt.ylabel("Count") 55 | plt.savefig("agent_reasoning_output.png") 56 | print("Visualization: Tool usage saved as agent_reasoning_output.png") 57 | 58 | # %% [5. Interview Scenario: Agent Reasoning] 59 | """ 60 | Interview Scenario: Agent Reasoning 61 | Q: How does agent reasoning work for retail tasks? 62 | A: Agents reason by selecting tools based on query context, using LLMs to plan actions autonomously. 63 | Key: ReAct framework enhances decision-making. 64 | Example: initialize_agent(tools, llm, agent="zero-shot-react-description") 65 | """ 66 | 67 | # Execute the demo 68 | if __name__ == "__main__": 69 | run_agent_reasoning_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/03 AI Agents/04 Custom Agents/custom_agents.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Custom Agents] 2 | # Learn to build retail-specific agents with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.agents import initialize_agent, Tool 7 | from langchain.llms import OpenAI 8 | from collections import Counter 9 | import numpy as np 10 | 11 | def run_custom_agents_demo(): 12 | # %% [2. Synthetic Retail Query Data] 13 | queries = [ 14 | "Check inventory for TechCorp laptop.", 15 | "Suggest a product for a student.", 16 | "Process a return for TechCorp smartphone." 17 | ] 18 | print("Synthetic Data: Retail queries created") 19 | print(f"Queries: {queries}") 20 | 21 | # %% [3. Custom Agent for Retail] 22 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 23 | 24 | def mock_inventory_check(query): 25 | return "Inventory: 10 laptops available." 26 | 27 | def mock_product_suggestion(query): 28 | return "Suggestion: TechCorp Tablet, ideal for students." 29 | 30 | def mock_return_process(query): 31 | return "Return: Processed for TechCorp smartphone." 32 | 33 | tools = [ 34 | Tool(name="InventoryCheck", func=mock_inventory_check, description="Check product inventory"), 35 | Tool(name="ProductSuggestion", func=mock_product_suggestion, description="Suggest a product"), 36 | Tool(name="ReturnProcess", func=mock_return_process, description="Process a product return") 37 | ] 38 | 39 | custom_agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False) 40 | responses = [custom_agent.run(query) for query in queries] 41 | print("Custom Agent: Responses generated") 42 | for i, (query, response) in enumerate(zip(queries, responses)): 43 | print(f"Query {i+1}: {query}") 44 | print(f"Response: {response}") 45 | 46 | # %% [4. Visualization] 47 | tool_calls = [response.split(":")[0].split()[-1] for response in responses] 48 | tool_counts = Counter(tool_calls) 49 | 50 | plt.figure(figsize=(8, 4)) 51 | plt.bar(tool_counts.keys(), tool_counts.values(), color='orange') 52 | plt.title("Custom Agent Tool Usage") 53 | plt.xlabel("Tool") 54 | plt.ylabel("Count") 55 | plt.savefig("custom_agents_output.png") 56 | print("Visualization: Tool usage saved as custom_agents_output.png") 57 | 58 | # %% [5. Interview Scenario: Custom Agents] 59 | """ 60 | Interview Scenario: Custom Agents 61 | Q: How do you build a custom agent for retail? 62 | A: Define task-specific tools and initialize an agent with a reasoning framework like ReAct for retail scenarios. 63 | Key: Tailor tools to domain needs. 64 | Example: initialize_agent(tools=[Tool(name="InventoryCheck", ...)], llm, ...) 65 | """ 66 | 67 | # Execute the demo 68 | if __name__ == "__main__": 69 | run_custom_agents_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/04 Advanced Features/04 Integration with APIs/api_integration.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Integration with APIs] 2 | # Learn to connect LangChain with retail APIs for task automation. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.agents import initialize_agent, Tool 7 | from langchain.llms import OpenAI 8 | from collections import Counter 9 | import numpy as np 10 | 11 | def run_api_integration_demo(): 12 | # %% [2. Synthetic Retail Query Data] 13 | queries = [ 14 | "Check TechCorp laptop price via API.", 15 | "Fetch TechCorp smartphone specs via API.", 16 | "Get TechCorp tablet availability via API." 17 | ] 18 | print("Synthetic Data: Retail queries created") 19 | print(f"Queries: {queries}") 20 | 21 | # %% [3. API Integration] 22 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 23 | 24 | def mock_price_api(query): 25 | return "Price: $999 for TechCorp laptop." 26 | 27 | def mock_specs_api(query): 28 | return "Specs: 8GB RAM, 128GB storage for TechCorp smartphone." 29 | 30 | def mock_availability_api(query): 31 | return "Availability: In stock for TechCorp tablet." 32 | 33 | tools = [ 34 | Tool(name="PriceAPI", func=mock_price_api, description="Fetch product price via API"), 35 | Tool(name="SpecsAPI", func=mock_specs_api, description="Fetch product specs via API"), 36 | Tool(name="AvailabilityAPI", func=mock_availability_api, description="Check product availability via API") 37 | ] 38 | 39 | agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False) 40 | responses = [agent.run(query) for query in queries] 41 | print("API Integration: Responses generated") 42 | for i, (query, response) in enumerate(zip(queries, responses)): 43 | print(f"Query {i+1}: {query}") 44 | print(f"Response: {response}") 45 | 46 | # %% [4. Visualization] 47 | api_calls = [response.split(":")[0].split()[-1] for response in responses] 48 | api_counts = Counter(api_calls) 49 | 50 | plt.figure(figsize=(8, 4)) 51 | plt.bar(api_counts.keys(), api_counts.values(), color='blue') 52 | plt.title("API Call Frequencies") 53 | plt.xlabel("API") 54 | plt.ylabel("Count") 55 | plt.savefig("api_integration_output.png") 56 | print("Visualization: API call frequencies saved as api_integration_output.png") 57 | 58 | # %% [5. Interview Scenario: Integration with APIs] 59 | """ 60 | Interview Scenario: Integration with APIs 61 | Q: How does LangChain integrate with external APIs? 62 | A: LangChain agents use tools to call APIs, enabling dynamic data retrieval for retail tasks. 63 | Key: Tools map queries to API functions. 64 | Example: Tool(name="PriceAPI", func=mock_price_api, description="...") 65 | """ 66 | 67 | # Execute the demo 68 | if __name__ == "__main__": 69 | run_api_integration_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/03 AI Agents/01 Tool Integration/tool_integration.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Tool Integration] 2 | # Learn to integrate tools for retail tasks with LangChain agents. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.agents import initialize_agent, Tool 7 | from langchain.llms import OpenAI 8 | from collections import Counter 9 | import numpy as np 10 | 11 | def run_tool_integration_demo(): 12 | # %% [2. Synthetic Retail Query Data] 13 | queries = [ 14 | "Check stock for TechCorp laptop.", 15 | "Calculate discount for TechCorp smartphone.", 16 | "Search for TechCorp tablet reviews." 17 | ] 18 | print("Synthetic Data: Retail queries created") 19 | print(f"Queries: {queries}") 20 | 21 | # %% [3. Tool Integration] 22 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 23 | 24 | def mock_stock_check(product): 25 | return f"Stock for {product}: 10 units available." 26 | 27 | def mock_discount_calculator(product): 28 | return f"Discount for {product}: 15% off." 29 | 30 | def mock_search_reviews(product): 31 | return f"Reviews for {product}: Mostly positive, 4.5/5 rating." 32 | 33 | tools = [ 34 | Tool(name="StockCheck", func=mock_stock_check, description="Check product stock"), 35 | Tool(name="DiscountCalculator", func=mock_discount_calculator, description="Calculate product discount"), 36 | Tool(name="SearchReviews", func=mock_search_reviews, description="Search product reviews") 37 | ] 38 | 39 | agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False) 40 | responses = [agent.run(query) for query in queries] 41 | print("Tool Integration: Agent responses generated") 42 | for i, (query, response) in enumerate(zip(queries, responses)): 43 | print(f"Query {i+1}: {query}") 44 | print(f"Response: {response}") 45 | 46 | # %% [4. Visualization] 47 | tool_calls = [response.split(":")[0].split()[-1] for response in responses] # Extract tool name 48 | tool_counts = Counter(tool_calls) 49 | 50 | plt.figure(figsize=(8, 4)) 51 | plt.bar(tool_counts.keys(), tool_counts.values(), color='blue') 52 | plt.title("Tool Call Frequencies") 53 | plt.xlabel("Tool") 54 | plt.ylabel("Count") 55 | plt.savefig("tool_integration_output.png") 56 | print("Visualization: Tool call frequencies saved as tool_integration_output.png") 57 | 58 | # %% [5. Interview Scenario: Tool Integration] 59 | """ 60 | Interview Scenario: Tool Integration 61 | Q: How do agents use tools in LangChain? 62 | A: Agents use tools to perform specific tasks, selected based on query context via reasoning. 63 | Key: Tools are defined with functions and descriptions. 64 | Example: Tool(name="StockCheck", func=mock_stock_check, description="...") 65 | """ 66 | 67 | # Execute the demo 68 | if __name__ == "__main__": 69 | run_tool_integration_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/05 Retail Applications/02 Recommendation Systems/recommendation_systems.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Recommendation Systems] 2 | # Learn to build product recommendation systems using embeddings with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai faiss-cpu numpy matplotlib sklearn 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.vectorstores import FAISS 8 | from langchain.embeddings import OpenAIEmbeddings 9 | from sklearn.metrics.pairwise import cosine_similarity 10 | import numpy as np 11 | 12 | def run_recommendation_systems_demo(): 13 | # %% [2. Synthetic Retail Product Data] 14 | products = [ 15 | Document(page_content="TechCorp Laptop: 16GB RAM, Intel i7, 512GB SSD, ideal for gaming.", metadata={"product": "Laptop"}), 16 | Document(page_content="TechCorp Smartphone: Long battery, vibrant display, great for media.", metadata={"product": "Smartphone"}), 17 | Document(page_content="TechCorp Tablet: Lightweight, 10-hour battery, perfect for students.", metadata={"product": "Tablet"}) 18 | ] 19 | user_query = "Recommend a product for a student." 20 | print("Synthetic Data: Retail products and user query created") 21 | print(f"Products: {[doc.metadata['product'] for doc in products]}") 22 | print(f"User Query: {user_query}") 23 | 24 | # %% [3. Recommendation System] 25 | embeddings = OpenAIEmbeddings(api_key="your-openai-api-key") # Replace with your OpenAI API key 26 | vector_store = FAISS.from_documents(products, embeddings) 27 | recommended_docs = vector_store.similarity_search(user_query, k=2) 28 | 29 | print("Recommendation System: Products recommended") 30 | for i, doc in enumerate(recommended_docs): 31 | print(f"Recommendation {i+1}: {doc.metadata['product']} - {doc.page_content}") 32 | 33 | # %% [4. Visualization] 34 | query_embedding = embeddings.embed_query(user_query) 35 | product_embeddings = [embeddings.embed_query(doc.page_content) for doc in products] 36 | similarities = [cosine_similarity([query_embedding], [emb])[0][0] for emb in product_embeddings] 37 | 38 | plt.figure(figsize=(8, 4)) 39 | plt.bar([doc.metadata['product'] for doc in products], similarities, color='green') 40 | plt.title("Product Recommendation Similarities") 41 | plt.xlabel("Product") 42 | plt.ylabel("Cosine Similarity") 43 | plt.savefig("recommendation_systems_output.png") 44 | print("Visualization: Recommendation similarities saved as recommendation_systems_output.png") 45 | 46 | # %% [5. Interview Scenario: Recommendation Systems] 47 | """ 48 | Interview Scenario: Recommendation Systems 49 | Q: How do embeddings power recommendation systems? 50 | A: Embeddings convert product descriptions into vectors, enabling similarity-based matching for personalized recommendations. 51 | Key: Cosine similarity identifies relevant products. 52 | Example: FAISS.from_documents(products, embeddings) 53 | """ 54 | 55 | # Execute the demo 56 | if __name__ == "__main__": 57 | run_recommendation_systems_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/05 Retail Applications/03 Review Analysis/review_analysis.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Review Analysis] 2 | # Learn sentiment and topic extraction from retail reviews with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib pandas nltk scikit-learn 5 | import matplotlib.pyplot as plt 6 | from langchain.llms import OpenAI 7 | from langchain.prompts import PromptTemplate 8 | from langchain.chains import LLMChain 9 | from collections import Counter 10 | import numpy as np 11 | import nltk 12 | 13 | def run_review_analysis_demo(): 14 | # %% [2. Synthetic Retail Review Data] 15 | reviews = [ 16 | "The TechCorp laptop is fast and reliable, great for gaming!", 17 | "TechCorp smartphone has poor battery life, but nice display.", 18 | "TechCorp tablet is lightweight, but the app selection is limited." 19 | ] 20 | print("Synthetic Data: Retail reviews created") 21 | print(f"Reviews: {reviews}") 22 | 23 | # %% [3. Sentiment and Topic Extraction] 24 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 25 | 26 | sentiment_prompt = PromptTemplate( 27 | input_variables=["review"], 28 | template="You are a retail analyst. Determine the sentiment (Positive, Negative, Neutral) of this review: {review}" 29 | ) 30 | sentiment_chain = LLMChain(llm=llm, prompt=sentiment_prompt) 31 | 32 | topic_prompt = PromptTemplate( 33 | input_variables=["review"], 34 | template="Extract the main topic (e.g., performance, battery, design) of this review: {review}" 35 | ) 36 | topic_chain = LLMChain(llm=llm, prompt=topic_prompt) 37 | 38 | sentiments = [sentiment_chain.run(review=review).strip() for review in reviews] 39 | topics = [topic_chain.run(review=review).strip() for review in reviews] 40 | 41 | print("Review Analysis: Sentiment and topics extracted") 42 | for i, (review, sentiment, topic) in enumerate(zip(reviews, sentiments, topics)): 43 | print(f"Review {i+1}: {review}") 44 | print(f"Sentiment: {sentiment}, Topic: {topic}") 45 | 46 | # %% [4. Visualization] 47 | sentiment_counts = Counter(sentiments) 48 | plt.figure(figsize=(8, 4)) 49 | plt.bar(sentiment_counts.keys(), sentiment_counts.values(), color='blue') 50 | plt.title("Sentiment Distribution") 51 | plt.xlabel("Sentiment") 52 | plt.ylabel("Count") 53 | plt.savefig("review_analysis_output.png") 54 | print("Visualization: Sentiment distribution saved as review_analysis_output.png") 55 | 56 | # %% [5. Interview Scenario: Review Analysis] 57 | """ 58 | Interview Scenario: Review Analysis 59 | Q: How is sentiment extracted from reviews? 60 | A: Sentiment is extracted using LLMs to classify reviews as Positive, Negative, or Neutral based on text content. 61 | Key: Prompt engineering ensures accurate classification. 62 | Example: PromptTemplate(...template="Determine the sentiment...") 63 | """ 64 | 65 | # Execute the demo 66 | if __name__ == "__main__": 67 | nltk.download('punkt', quiet=True) 68 | run_review_analysis_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/02 Retrieval Augmented Generation/03 RAG Pipeline/rag_pipeline.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to RAG Pipeline] 2 | # Learn to enhance LLM responses with RAG for retail queries using LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai faiss-cpu numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.vectorstores import FAISS 8 | from langchain.embeddings import OpenAIEmbeddings 9 | from langchain.llms import OpenAI 10 | from langchain.chains import RetrievalQA 11 | import numpy as np 12 | import nltk 13 | 14 | def run_rag_pipeline_demo(): 15 | # %% [2. Synthetic Retail Document Data and Query] 16 | documents = [ 17 | Document(page_content="TechCorp Laptop: 16GB RAM, Intel i7, 512GB SSD, ideal for gaming.", metadata={"product": "Laptop"}), 18 | Document(page_content="TechCorp Smartphone: Long battery, vibrant display.", metadata={"product": "Smartphone"}), 19 | Document(page_content="TechCorp Tablet: Lightweight, 10-hour battery.", metadata={"product": "Tablet"}) 20 | ] 21 | query = "What’s the best product for gaming?" 22 | print("Synthetic Data: Retail documents and query created") 23 | print(f"Documents: {[doc.metadata['product'] for doc in documents]}") 24 | print(f"Query: {query}") 25 | 26 | # %% [3. RAG Pipeline] 27 | embeddings = OpenAIEmbeddings(api_key="your-openai-api-key") # Replace with your OpenAI API key 28 | vector_store = FAISS.from_documents(documents, embeddings) 29 | llm = OpenAI(api_key="your-openai-api-key") 30 | rag_chain = RetrievalQA.from_chain_type( 31 | llm=llm, 32 | chain_type="stuff", 33 | retriever=vector_store.as_retriever(search_kwargs={"k": 2}) 34 | ) 35 | 36 | rag_response = rag_chain.run(query) 37 | non_rag_response = llm(query) # Direct LLM response without RAG 38 | print("RAG Pipeline: Responses generated") 39 | print(f"RAG Response: {rag_response.strip()}") 40 | print(f"Non-RAG Response: {non_rag_response.strip()}") 41 | 42 | # %% [4. Visualization] 43 | response_lengths = [ 44 | len(nltk.word_tokenize(rag_response)), 45 | len(nltk.word_tokenize(non_rag_response)) 46 | ] 47 | plt.figure(figsize=(8, 4)) 48 | plt.bar(['RAG', 'Non-RAG'], response_lengths, color=['blue', 'red']) 49 | plt.title("RAG vs Non-RAG Response Lengths") 50 | plt.xlabel("Response Type") 51 | plt.ylabel("Word Count") 52 | plt.savefig("rag_pipeline_output.png") 53 | print("Visualization: Response lengths saved as rag_pipeline_output.png") 54 | 55 | # %% [5. Interview Scenario: RAG Pipeline] 56 | """ 57 | Interview Scenario: RAG Pipeline 58 | Q: How does RAG improve LLM responses? 59 | A: RAG retrieves relevant documents to provide context, improving accuracy and relevance over standalone LLMs. 60 | Key: Combines retrieval with generation. 61 | Example: RetrievalQA.from_chain_type(llm=llm, retriever=vector_store.as_retriever()) 62 | """ 63 | 64 | # Execute the demo 65 | if __name__ == "__main__": 66 | nltk.download('punkt', quiet=True) 67 | run_rag_pipeline_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/05 Retail Applications/01 Chatbots/chatbots.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Chatbots] 2 | # Learn to build conversational agents for retail customer support with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.chains import LLMChain 7 | from langchain.prompts import PromptTemplate 8 | from langchain.memory import ConversationBufferMemory 9 | from langchain.llms import OpenAI 10 | import numpy as np 11 | import nltk 12 | 13 | def run_chatbots_demo(): 14 | # %% [2. Synthetic Retail Conversation Data] 15 | conversation = [ 16 | "What are the features of the TechCorp laptop?", 17 | "Is it good for gaming?", 18 | "What’s the price?" 19 | ] 20 | print("Synthetic Data: Retail customer conversation created") 21 | print(f"Conversation: {conversation}") 22 | 23 | # %% [3. Chatbot with Memory] 24 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 25 | prompt = PromptTemplate( 26 | input_variables=["history", "query"], 27 | template="You are a retail assistant. Given the conversation history:\n{history}\nAnswer: {query}" 28 | ) 29 | memory = ConversationBufferMemory(return_messages=True) 30 | chatbot = LLMChain(llm=llm, prompt=prompt, memory=memory) 31 | 32 | responses = [] 33 | for query in conversation: 34 | response = chatbot.run(query=query) 35 | responses.append(response) 36 | memory.save_context({"query": query}, {"output": response}) 37 | 38 | print("Chatbot: Responses generated") 39 | for i, (query, response) in enumerate(zip(conversation, responses)): 40 | print(f"Query {i+1}: {query}") 41 | print(f"Response: {response.strip()}") 42 | 43 | # %% [4. Visualization] 44 | response_lengths = [len(nltk.word_tokenize(resp)) for resp in responses] 45 | history_lengths = [len(nltk.word_tokenize(memory.buffer_as_str)) for _ in range(len(conversation))] 46 | 47 | plt.figure(figsize=(8, 4)) 48 | x = np.arange(len(conversation)) 49 | plt.plot(x, response_lengths, marker='o', label='Response Length', color='blue') 50 | plt.plot(x, history_lengths, marker='x', label='History Length', color='green') 51 | plt.xticks(x, [f"Query {i+1}" for i in range(len(conversation))]) 52 | plt.title("Chatbot Response and History Lengths") 53 | plt.xlabel("Query") 54 | plt.ylabel("Word Count") 55 | plt.legend() 56 | plt.savefig("chatbots_output.png") 57 | print("Visualization: Response and history lengths saved as chatbots_output.png") 58 | 59 | # %% [5. Interview Scenario: Chatbots] 60 | """ 61 | Interview Scenario: Chatbots 62 | Q: How do chatbots use memory in LangChain? 63 | A: Chatbots use memory to retain conversation history, ensuring context-aware responses for coherent interactions. 64 | Key: ConversationBufferMemory stores query-response pairs. 65 | Example: ConversationBufferMemory(return_messages=True) 66 | """ 67 | 68 | # Execute the demo 69 | if __name__ == "__main__": 70 | nltk.download('punkt', quiet=True) 71 | run_chatbots_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/01 Core Components/03 Memory/memory.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Memory] 2 | # Learn contextual conversation history with LangChain memory for retail interactions. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.chains import LLMChain 7 | from langchain.prompts import PromptTemplate 8 | from langchain.memory import ConversationBufferMemory 9 | from langchain.llms import OpenAI 10 | import numpy as np 11 | import nltk 12 | 13 | def run_memory_demo(): 14 | # %% [2. Synthetic Retail Conversation Data] 15 | conversation = [ 16 | "Tell me about TechCorp laptops.", 17 | "Which one is best for gaming?", 18 | "What’s the price of that model?" 19 | ] 20 | print("Synthetic Data: Retail customer conversation created") 21 | print(f"Conversation: {conversation}") 22 | 23 | # %% [3. Conversational Memory] 24 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 25 | prompt = PromptTemplate( 26 | input_variables=["history", "query"], 27 | template="You are a retail assistant. Given the conversation history:\n{history}\nAnswer the query: {query}" 28 | ) 29 | memory = ConversationBufferMemory(return_messages=True) 30 | chain = LLMChain(llm=llm, prompt=prompt, memory=memory) 31 | 32 | responses = [] 33 | for query in conversation: 34 | response = chain.run(query=query) 35 | responses.append(response) 36 | memory.save_context({"query": query}, {"output": response}) 37 | 38 | print("Memory: Conversational responses generated") 39 | for i, (query, response) in enumerate(zip(conversation, responses)): 40 | print(f"Query {i+1}: {query}") 41 | print(f"Response: {response.strip()}") 42 | 43 | # %% [4. Visualization] 44 | history_lengths = [len(nltk.word_tokenize(memory.buffer_as_str)) for _ in range(len(conversation))] 45 | response_lengths = [len(nltk.word_tokenize(resp)) for resp in responses] 46 | 47 | plt.figure(figsize=(8, 4)) 48 | x = np.arange(len(conversation)) 49 | plt.plot(x, history_lengths, marker='o', label='History Length', color='blue') 50 | plt.plot(x, response_lengths, marker='x', label='Response Length', color='green') 51 | plt.xticks(x, [f"Query {i+1}" for i in range(len(conversation))]) 52 | plt.title("Conversation History and Response Lengths") 53 | plt.xlabel("Query") 54 | plt.ylabel("Word Count") 55 | plt.legend() 56 | plt.savefig("memory_output.png") 57 | print("Visualization: History and response lengths saved as memory_output.png") 58 | 59 | # %% [5. Interview Scenario: Memory] 60 | """ 61 | Interview Scenario: Memory 62 | Q: How does memory enhance conversational agents in LangChain? 63 | A: Memory stores conversation history, enabling context-aware responses for coherent interactions. 64 | Key: Types like ConversationBufferMemory retain query-response pairs. 65 | Example: ConversationBufferMemory(return_messages=True) 66 | """ 67 | 68 | # Execute the demo 69 | if __name__ == "__main__": 70 | nltk.download('punkt', quiet=True) 71 | run_memory_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/04 Advanced Features/02 Evaluation Metrics/evaluation_metrics.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Evaluation Metrics] 2 | # Learn BLEU, ROUGE, and custom metrics for retail response quality with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib nltk rouge-score 5 | import matplotlib.pyplot as plt 6 | from langchain.llms import OpenAI 7 | from nltk.translate.bleu_score import sentence_bleu 8 | from rouge_score import rouge_scorer 9 | import numpy as np 10 | 11 | def run_evaluation_metrics_demo(): 12 | # %% [2. Synthetic Retail Query and Reference Data] 13 | queries = [ 14 | "Describe the TechCorp laptop features.", 15 | "Explain the TechCorp smartphone battery.", 16 | "Detail the TechCorp tablet use case." 17 | ] 18 | references = [ 19 | "The TechCorp laptop has 16GB RAM, Intel i7, and 512GB SSD.", 20 | "The TechCorp smartphone offers a long-lasting battery with vibrant display.", 21 | "The TechCorp tablet is lightweight, ideal for students and professionals." 22 | ] 23 | print("Synthetic Data: Retail queries and references created") 24 | print(f"Queries: {queries}") 25 | 26 | # %% [3. Response Generation and Evaluation] 27 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 28 | responses = [llm(query) for query in queries] 29 | 30 | bleu_scores = [] 31 | rouge_scores = [] 32 | scorer = rouge_scorer.RougeScorer(['rouge1', 'rougeL'], use_stemmer=True) 33 | 34 | for ref, resp in zip(references, responses): 35 | # BLEU Score 36 | ref_tokens = [ref.split()] 37 | resp_tokens = resp.split() 38 | bleu = sentence_bleu(ref_tokens, resp_tokens) 39 | bleu_scores.append(bleu) 40 | 41 | # ROUGE Score 42 | rouge = scorer.score(ref, resp) 43 | rouge_scores.append(rouge['rouge1'].fmeasure) 44 | 45 | print("Evaluation Metrics: Scores calculated") 46 | for i, (query, resp, bleu, rouge) in enumerate(zip(queries, responses, bleu_scores, rouge_scores)): 47 | print(f"Query {i+1}: {query}") 48 | print(f"Response: {resp.strip()}") 49 | print(f"BLEU: {bleu:.2f}, ROUGE-1: {rouge:.2f}") 50 | 51 | # %% [4. Visualization] 52 | plt.figure(figsize=(8, 4)) 53 | x = np.arange(len(queries)) 54 | plt.bar(x - 0.2, bleu_scores, 0.4, label='BLEU', color='blue') 55 | plt.bar(x + 0.2, rouge_scores, 0.4, label='ROUGE-1', color='red') 56 | plt.xticks(x, [f"Query {i+1}" for i in range(len(queries))]) 57 | plt.title("BLEU and ROUGE Scores") 58 | plt.xlabel("Query") 59 | plt.ylabel("Score") 60 | plt.legend() 61 | plt.savefig("evaluation_metrics_output.png") 62 | print("Visualization: Scores saved as evaluation_metrics_output.png") 63 | 64 | # %% [5. Interview Scenario: Evaluation Metrics] 65 | """ 66 | Interview Scenario: Evaluation Metrics 67 | Q: What are BLEU and ROUGE used for? 68 | A: BLEU measures n-gram overlap, ROUGE evaluates recall and precision for text similarity, both used to assess response quality. 69 | Key: Metrics quantify LLM performance. 70 | Example: sentence_bleu([ref.split()], resp.split()) 71 | """ 72 | 73 | # Execute the demo 74 | if __name__ == "__main__": 75 | nltk.download('punkt', quiet=True) 76 | run_evaluation_metrics_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/01 Core Components/04 Document Loaders/document_loaders.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Document Loaders] 2 | # Learn to process retail data with LangChain document loaders. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib pandas nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.chains import LLMChain 8 | from langchain.prompts import PromptTemplate 9 | from langchain.llms import OpenAI 10 | import numpy as np 11 | import nltk 12 | 13 | def run_document_loaders_demo(): 14 | # %% [2. Synthetic Retail Document Data] 15 | documents = [ 16 | Document( 17 | page_content="TechCorp Laptop Manual: Features include a 16GB RAM, Intel i7 processor, and 512GB SSD.", 18 | metadata={"product": "TechCorp Laptop"} 19 | ), 20 | Document( 21 | page_content="TechCorp Smartphone Review: Excellent battery life, vibrant display, but average camera.", 22 | metadata={"product": "TechCorp Smartphone"} 23 | ), 24 | Document( 25 | page_content="TechCorp Tablet Guide: Lightweight design, 10-hour battery, ideal for students.", 26 | metadata={"product": "TechCorp Tablet"} 27 | ) 28 | ] 29 | print("Synthetic Data: Retail documents created") 30 | print(f"Documents: {[doc.metadata['product'] for doc in documents]}") 31 | 32 | # %% [3. Document Processing] 33 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 34 | prompt = PromptTemplate( 35 | input_variables=["content"], 36 | template="You are a retail assistant. Summarize the document: {content}" 37 | ) 38 | chain = LLMChain(llm=llm, prompt=prompt) 39 | 40 | summaries = [chain.run(content=doc.page_content) for doc in documents] 41 | print("Document Loaders: Summaries generated") 42 | for i, (doc, summary) in enumerate(zip(documents, summaries)): 43 | print(f"Document {i+1}: {doc.metadata['product']}") 44 | print(f"Summary: {summary.strip()}") 45 | 46 | # %% [4. Visualization] 47 | document_lengths = [len(nltk.word_tokenize(doc.page_content)) for doc in documents] 48 | summary_lengths = [len(nltk.word_tokenize(summary)) for summary in summaries] 49 | 50 | plt.figure(figsize=(8, 4)) 51 | x = np.arange(len(documents)) 52 | plt.bar(x - 0.2, document_lengths, 0.4, label='Document Length', color='blue') 53 | plt.bar(x + 0.2, summary_lengths, 0.4, label='Summary Length', color='green') 54 | plt.xticks(x, [doc.metadata['product'] for doc in documents]) 55 | plt.title("Document and Summary Lengths") 56 | plt.xlabel("Product") 57 | plt.ylabel("Word Count") 58 | plt.legend() 59 | plt.savefig("document_loaders_output.png") 60 | print("Visualization: Document and summary lengths saved as document_loaders_output.png") 61 | 62 | # %% [5. Interview Scenario: Document Loaders] 63 | """ 64 | Interview Scenario: Document Loaders 65 | Q: How do document loaders process external data in LangChain? 66 | A: Document loaders parse text from various sources into Document objects, enabling LLM processing for tasks like summarization. 67 | Key: Support diverse formats like PDFs, CSVs, or raw text. 68 | Example: Document(page_content="...", metadata={...}) 69 | """ 70 | 71 | # Execute the demo 72 | if __name__ == "__main__": 73 | nltk.download('punkt', quiet=True) 74 | run_document_loaders_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/04 Advanced Features/03 Agent Optimization/agent_optimization.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Agent Optimization] 2 | # Learn to optimize agent performance and latency for retail tasks with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.agents import initialize_agent, Tool 7 | from langchain.llms import OpenAI 8 | import time 9 | import numpy as np 10 | 11 | def run_agent_optimization_demo(): 12 | # %% [2. Synthetic Retail Query Data] 13 | queries = [ 14 | "Check TechCorp laptop stock.", 15 | "Calculate TechCorp smartphone discount.", 16 | "Search TechCorp tablet reviews." 17 | ] 18 | print("Synthetic Data: Retail queries created") 19 | print(f"Queries: {queries}") 20 | 21 | # %% [3. Agent Optimization] 22 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 23 | 24 | def mock_stock_check(query): 25 | return "Stock: 10 units." 26 | 27 | def mock_discount_calculator(query): 28 | return "Discount: 15% off." 29 | 30 | def mock_search_reviews(query): 31 | return "Reviews: 4.5/5 rating." 32 | 33 | tools = [ 34 | Tool(name="StockCheck", func=mock_stock_check, description="Check product stock"), 35 | Tool(name="DiscountCalculator", func=mock_discount_calculator, description="Calculate discount"), 36 | Tool(name="SearchReviews", func=mock_search_reviews, description="Search reviews") 37 | ] 38 | 39 | # Non-Optimized Agent 40 | non_optimized_agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) 41 | non_optimized_times = [] 42 | for query in queries: 43 | start = time.time() 44 | non_optimized_agent.run(query) 45 | non_optimized_times.append(time.time() - start) 46 | 47 | # Optimized Agent (less verbose, limited tool calls) 48 | optimized_agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=False, max_iterations=3) 49 | optimized_times = [] 50 | for query in queries: 51 | start = time.time() 52 | optimized_agent.run(query) 53 | optimized_times.append(time.time() - start) 54 | 55 | print("Agent Optimization: Execution times recorded") 56 | for i, (query, non_opt_time, opt_time) in enumerate(zip(queries, non_optimized_times, optimized_times)): 57 | print(f"Query {i+1}: {query}") 58 | print(f"Non-Optimized: {non_opt_time:.2f}s, Optimized: {opt_time:.2f}s") 59 | 60 | # %% [4. Visualization] 61 | plt.figure(figsize=(8, 4)) 62 | x = np.arange(len(queries)) 63 | plt.bar(x - 0.2, non_optimized_times, 0.4, label='Non-Optimized', color='red') 64 | plt.bar(x + 0.2, optimized_times, 0.4, label='Optimized', color='green') 65 | plt.xticks(x, [f"Query {i+1}" for i in range(len(queries))]) 66 | plt.title("Agent Execution Times") 67 | plt.xlabel("Query") 68 | plt.ylabel("Time (s)") 69 | plt.legend() 70 | plt.savefig("agent_optimization_output.png") 71 | print("Visualization: Execution times saved as agent_optimization_output.png") 72 | 73 | # %% [5. Interview Scenario: Agent Optimization] 74 | """ 75 | Interview Scenario: Agent Optimization 76 | Q: How do you optimize agent performance? 77 | A: Limit iterations, reduce verbosity, and streamline tool calls to lower latency while maintaining accuracy. 78 | Key: Balances speed and quality. 79 | Example: initialize_agent(..., max_iterations=3, verbose=False) 80 | """ 81 | 82 | # Execute the demo 83 | if __name__ == "__main__": 84 | run_agent_optimization_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/04 Advanced Features/01 Custom Chains/custom_chains.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Custom Chains] 2 | # Learn to design tailored workflows for retail tasks with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.chains import LLMChain, SequentialChain 7 | from langchain.prompts import PromptTemplate 8 | from langchain.llms import OpenAI 9 | import numpy as np 10 | import nltk 11 | 12 | def run_custom_chains_demo(): 13 | # %% [2. Synthetic Retail Query Data] 14 | queries = [ 15 | "Describe the TechCorp laptop and suggest a use case.", 16 | "Explain the TechCorp smartphone features and recommend an accessory.", 17 | "Detail the TechCorp tablet and propose a target audience." 18 | ] 19 | print("Synthetic Data: Retail queries created") 20 | print(f"Queries: {queries}") 21 | 22 | # %% [3. Custom Chain] 23 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 24 | 25 | # Chain 1: Describe product 26 | description_prompt = PromptTemplate( 27 | input_variables=["query"], 28 | template="You are a retail assistant. Describe the product in the query: {query}" 29 | ) 30 | description_chain = LLMChain(llm=llm, prompt=description_prompt, output_key="description") 31 | 32 | # Chain 2: Suggest recommendation 33 | suggestion_prompt = PromptTemplate( 34 | input_variables=["description"], 35 | template="Based on the product description: {description}, suggest a use case or recommendation." 36 | ) 37 | suggestion_chain = LLMChain(llm=llm, prompt=suggestion_prompt, output_key="suggestion") 38 | 39 | custom_chain = SequentialChain( 40 | chains=[description_chain, suggestion_chain], 41 | input_variables=["query"], 42 | output_variables=["description", "suggestion"] 43 | ) 44 | 45 | responses = [custom_chain({"query": query}) for query in queries] 46 | print("Custom Chain: Responses generated") 47 | for i, (query, response) in enumerate(zip(queries, responses)): 48 | print(f"Query {i+1}: {query}") 49 | print(f"Description: {response['description'].strip()}") 50 | print(f"Suggestion: {response['suggestion'].strip()}") 51 | 52 | # %% [4. Visualization] 53 | description_lengths = [len(nltk.word_tokenize(resp["description"])) for resp in responses] 54 | suggestion_lengths = [len(nltk.word_tokenize(resp["suggestion"])) for resp in responses] 55 | 56 | plt.figure(figsize=(8, 4)) 57 | x = np.arange(len(queries)) 58 | plt.bar(x - 0.2, description_lengths, 0.4, label='Description Length', color='blue') 59 | plt.bar(x + 0.2, suggestion_lengths, 0.4, label='Suggestion Length', color='green') 60 | plt.xticks(x, [f"Query {i+1}" for i in range(len(queries))]) 61 | plt.title("Custom Chain Output Lengths") 62 | plt.xlabel("Query") 63 | plt.ylabel("Word Count") 64 | plt.legend() 65 | plt.savefig("custom_chains_output.png") 66 | print("Visualization: Output lengths saved as custom_chains_output.png") 67 | 68 | # %% [5. Interview Scenario: Custom Chains] 69 | """ 70 | Interview Scenario: Custom Chains 71 | Q: How do custom chains handle complex tasks? 72 | A: Custom chains combine multiple LLMChain instances in a SequentialChain for multi-step workflows tailored to specific tasks. 73 | Key: Modular design enhances flexibility. 74 | Example: SequentialChain(chains=[LLMChain(...), LLMChain(...)], ...) 75 | """ 76 | 77 | # Execute the demo 78 | if __name__ == "__main__": 79 | nltk.download('punkt', quiet=True) 80 | run_custom_chains_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/05 Retail Applications/04 Query Answering/query_answering.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Query Answering] 2 | # Learn to answer retail customer queries using RAG with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai faiss-cpu numpy matplotlib nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.vectorstores import FAISS 8 | from langchain.embeddings import OpenAIEmbeddings 9 | from langchain.llms import OpenAI 10 | from langchain.chains import RetrievalQA 11 | import numpy as np 12 | import nltk 13 | 14 | def run_query_answering_demo(): 15 | # %% [2. Synthetic Retail Document and Query Data] 16 | documents = [ 17 | Document(page_content="TechCorp Laptop: 16GB RAM, Intel i7, 512GB SSD, great for gaming.", metadata={"product": "Laptop"}), 18 | Document(page_content="TechCorp Smartphone: Long battery, vibrant display, ideal for media.", metadata={"product": "Smartphone"}), 19 | Document(page_content="TechCorp Tablet: Lightweight, 10-hour battery, perfect for students.", metadata={"product": "Tablet"}) 20 | ] 21 | queries = [ 22 | "What’s the best product for gaming?", 23 | "Which product has a long battery life?", 24 | "Is there a lightweight product?" 25 | ] 26 | print("Synthetic Data: Retail documents and queries created") 27 | print(f"Documents: {[doc.metadata['product'] for doc in documents]}") 28 | print(f"Queries: {queries}") 29 | 30 | # %% [3. RAG-Based Query Answering] 31 | embeddings = OpenAIEmbeddings(api_key="your-openai-api-key") # Replace with your OpenAI API key 32 | vector_store = FAISS.from_documents(documents, embeddings) 33 | llm = OpenAI(api_key="your-openai-api-key") 34 | rag_chain = RetrievalQA.from_chain_type( 35 | llm=llm, 36 | chain_type="stuff", 37 | retriever=vector_store.as_retriever(search_kwargs={"k": 2}) 38 | ) 39 | 40 | rag_responses = [rag_chain.run(query) for query in queries] 41 | non_rag_responses = [llm(query) for query in queries] 42 | 43 | print("Query Answering: Responses generated") 44 | for i, (query, rag_resp, non_rag_resp) in enumerate(zip(queries, rag_responses, non_rag_responses)): 45 | print(f"Query {i+1}: {query}") 46 | print(f"RAG Response: {rag_resp.strip()}") 47 | print(f"Non-RAG Response: {non_rag_resp.strip()}") 48 | 49 | # %% [4. Visualization] 50 | rag_lengths = [len(nltk.word_tokenize(resp)) for resp in rag_responses] 51 | non_rag_lengths = [len(nltk.word_tokenize(resp)) for resp in non_rag_responses] 52 | 53 | plt.figure(figsize=(8, 4)) 54 | x = np.arange(len(queries)) 55 | plt.bar(x - 0.2, rag_lengths, 0.4, label='RAG Response', color='blue') 56 | plt.bar(x + 0.2, non_rag_lengths, 0.4, label='Non-RAG Response', color='red') 57 | plt.xticks(x, [f"Query {i+1}" for i in range(len(queries))]) 58 | plt.title("RAG vs Non-RAG Response Lengths") 59 | plt.xlabel("Query") 60 | plt.ylabel("Word Count") 61 | plt.legend() 62 | plt.savefig("query_answering_output.png") 63 | print("Visualization: Response lengths saved as query_answering_output.png") 64 | 65 | # %% [5. Interview Scenario: Query Answering] 66 | """ 67 | Interview Scenario: Query Answering 68 | Q: How does RAG improve query answering? 69 | A: RAG retrieves relevant documents to provide context, improving accuracy and specificity of LLM responses. 70 | Key: Combines retrieval and generation. 71 | Example: RetrievalQA.from_chain_type(llm=llm, retriever=...) 72 | """ 73 | 74 | # Execute the demo 75 | if __name__ == "__main__": 76 | nltk.download('punkt', quiet=True) 77 | run_query_answering_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/01 Core Components/01 Chains/chains.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Chains] 2 | # Learn sequential workflows with LangChain chains for retail applications. 3 | 4 | # Setup: pip install langchain langchain-openai numpy matplotlib nltk 5 | import matplotlib.pyplot as plt 6 | from langchain.chains import LLMChain, SequentialChain 7 | from langchain.prompts import PromptTemplate 8 | from langchain.llms import OpenAI 9 | import numpy as np 10 | import nltk 11 | 12 | def run_chains_demo(): 13 | # %% [2. Synthetic Retail Query Data] 14 | queries = [ 15 | "What are the features of the TechCorp laptop?", 16 | "Compare TechCorp laptops and smartphones.", 17 | "Is the TechCorp laptop good for gaming?" 18 | ] 19 | print("Synthetic Data: Retail customer queries created") 20 | print(f"Queries: {queries}") 21 | 22 | # %% [3. Single LLMChain] 23 | llm = OpenAI(api_key="your-openai-api-key") # Replace with your OpenAI API key 24 | prompt = PromptTemplate( 25 | input_variables=["query"], 26 | template="You are a retail assistant. Answer the customer query: {query}" 27 | ) 28 | chain = LLMChain(llm=llm, prompt=prompt, output_key="response") 29 | 30 | single_chain_responses = [chain.run(query) for query in queries] 31 | print("Single LLMChain: Responses generated") 32 | for i, (query, response) in enumerate(zip(queries, single_chain_responses)): 33 | print(f"Query {i+1}: {query}") 34 | print(f"Response: {response.strip()}") 35 | 36 | # %% [4. SequentialChain] 37 | summary_prompt = PromptTemplate( 38 | input_variables=["response"], 39 | template="Summarize the following response in one sentence: {response}" 40 | ) 41 | summary_chain = LLMChain(llm=llm, prompt=summary_prompt, output_key="summary") 42 | 43 | sequential_chain = SequentialChain( 44 | chains=[chain, summary_chain], 45 | input_variables=["query"], 46 | output_variables=["response", "summary"] 47 | ) 48 | 49 | sequential_responses = [sequential_chain({"query": query}) for query in queries] 50 | print("SequentialChain: Responses and summaries generated") 51 | for i, (query, result) in enumerate(zip(queries, sequential_responses)): 52 | print(f"Query {i+1}: {query}") 53 | print(f"Response: {result['response'].strip()}") 54 | print(f"Summary: {result['summary'].strip()}") 55 | 56 | # %% [5. Visualization] 57 | response_lengths = [len(nltk.word_tokenize(resp["response"])) for resp in sequential_responses] 58 | summary_lengths = [len(nltk.word_tokenize(resp["summary"])) for resp in sequential_responses] 59 | 60 | plt.figure(figsize=(8, 4)) 61 | x = np.arange(len(queries)) 62 | plt.bar(x - 0.2, response_lengths, 0.4, label='Response Length', color='blue') 63 | plt.bar(x + 0.2, summary_lengths, 0.4, label='Summary Length', color='green') 64 | plt.xticks(x, [f"Query {i+1}" for i in range(len(queries))]) 65 | plt.title("Response and Summary Lengths") 66 | plt.xlabel("Query") 67 | plt.ylabel("Word Count") 68 | plt.legend() 69 | plt.savefig("chains_output.png") 70 | print("Visualization: Response and summary lengths saved as chains_output.png") 71 | 72 | # %% [6. Interview Scenario: Chains] 73 | """ 74 | Interview Scenario: Chains 75 | Q: How do LangChain chains work for LLM workflows? 76 | A: Chains combine LLMs with prompts to create sequential workflows, like LLMChain for single tasks or SequentialChain for multi-step processes. 77 | Key: Modular design enables complex task automation. 78 | Example: SequentialChain(chains=[LLMChain(...), LLMChain(...)], ...) 79 | """ 80 | 81 | # Execute the demo 82 | if __name__ == "__main__": 83 | nltk.download('punkt', quiet=True) 84 | run_chains_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/02 Retrieval Augmented Generation/04 Embedding Models/embedding_models.py: -------------------------------------------------------------------------------- 1 | # %% [1. Introduction to Embedding Models] 2 | # Learn to use Hugging Face and OpenAI embeddings for retail RAG with LangChain. 3 | 4 | # Setup: pip install langchain langchain-openai langchain-huggingface faiss-cpu numpy matplotlib 5 | import matplotlib.pyplot as plt 6 | from langchain.docstore.document import Document 7 | from langchain.vectorstores import FAISS 8 | from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings 9 | from sklearn.metrics.pairwise import cosine_similarity 10 | import numpy as np 11 | 12 | def run_embedding_models_demo(): 13 | # %% [2. Synthetic Retail Document Data and Query] 14 | documents = [ 15 | Document(page_content="TechCorp Laptop: 16GB RAM, Intel i7, 512GB SSD.", metadata={"product": "Laptop"}), 16 | Document(page_content="TechCorp Smartphone: Long battery, vibrant display.", metadata={"product": "Smartphone"}), 17 | Document(page_content="TechCorp Tablet: Lightweight, 10-hour battery.", metadata={"product": "Tablet"}) 18 | ] 19 | query = "Find a product with a good battery." 20 | print("Synthetic Data: Retail documents and query created") 21 | print(f"Documents: {[doc.metadata['product'] for doc in documents]}") 22 | print(f"Query: {query}") 23 | 24 | # %% [3. Embedding Models Comparison] 25 | openai_embeddings = OpenAIEmbeddings(api_key="your-openai-api-key") # Replace with your OpenAI API key 26 | hf_embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") 27 | 28 | openai_vector_store = FAISS.from_documents(documents, openai_embeddings) 29 | hf_vector_store = FAISS.from_documents(documents, hf_embeddings) 30 | 31 | openai_retrieved = openai_vector_store.similarity_search(query, k=2) 32 | hf_retrieved = hf_vector_store.similarity_search(query, k=2) 33 | 34 | print("Embedding Models: Documents retrieved") 35 | print("OpenAI Embeddings:") 36 | for i, doc in enumerate(openai_retrieved): 37 | print(f"Retrieved {i+1}: {doc.metadata['product']} - {doc.page_content}") 38 | print("Hugging Face Embeddings:") 39 | for i, doc in enumerate(hf_retrieved): 40 | print(f"Retrieved {i+1}: {doc.metadata['product']} - {doc.page_content}") 41 | 42 | # %% [4. Visualization] 43 | query_openai_emb = openai_embeddings.embed_query(query) 44 | query_hf_emb = hf_embeddings.embed_query(query) 45 | doc_openai_embs = [openai_embeddings.embed_query(doc.page_content) for doc in documents] 46 | doc_hf_embs = [hf_embeddings.embed_query(doc.page_content) for doc in documents] 47 | 48 | openai_similarities = [cosine_similarity([query_openai_emb], [emb])[0][0] for emb in doc_openai_embs] 49 | hf_similarities = [cosine_similarity([query_hf_emb], [emb])[0][0] for emb in doc_hf_embs] 50 | 51 | plt.figure(figsize=(10, 4)) 52 | x = np.arange(len(documents)) 53 | plt.bar(x - 0.2, openai_similarities, 0.4, label='OpenAI Embeddings', color='blue') 54 | plt.bar(x + 0.2, hf_similarities, 0.4, label='Hugging Face Embeddings', color='green') 55 | plt.xticks(x, [doc.metadata['product'] for doc in documents]) 56 | plt.title("Embedding Model Similarity Comparison") 57 | plt.xlabel("Product") 58 | plt.ylabel("Cosine Similarity") 59 | plt.legend() 60 | plt.savefig("embedding_models_output.png") 61 | print("Visualization: Embedding similarities saved as embedding_models_output.png") 62 | 63 | # %% [5. Interview Scenario: Embedding Models] 64 | """ 65 | Interview Scenario: Embedding Models 66 | Q: How do embedding models impact RAG performance? 67 | A: Embeddings determine retrieval quality; OpenAI offers high accuracy, while Hugging Face models are cost-effective and open-source. 68 | Key: Model choice balances performance and cost. 69 | Example: HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") 70 | """ 71 | 72 | # Execute the demo 73 | if __name__ == "__main__": 74 | run_embedding_models_demo() -------------------------------------------------------------------------------- /Langchian Fundamentals/03 AI Agents/README.md: -------------------------------------------------------------------------------- 1 | # 🤖 AI Agents with LangChain 2 | 3 |
Your guide to mastering AI agents with LangChain for AI/ML and retail-focused interviews
10 | 11 | --- 12 | 13 | ## 📖 Introduction 14 | 15 | Welcome to the **AI Agents** subsection of the **LangChain Library Roadmap**! 🚀 This folder explores AI agents, autonomous systems that leverage tools, reasoning, and planning for retail tasks. Designed for hands-on learning and interview success, it builds on your prior roadmaps and supports your retail-themed projects (April 26, 2025). This section equips you with skills for retail AI roles using LangChain. 16 | 17 | ## 🌟 What’s Inside? 18 | 19 | - **Tool Integration**: Use tools like search or APIs for retail tasks. 20 | - **Agent Types**: Reactive, planning, and ReAct agents. 21 | - **Agent Reasoning**: Autonomous decision-making for customer support. 22 | - **Custom Agents**: Build agents for retail scenarios. 23 | - **Hands-on Code**: Four `.py` files with examples using synthetic retail data. 24 | - **Interview Scenarios**: Key questions and answers for LangChain interviews. 25 | 26 | ## 🔍 Who Is This For? 27 | 28 | - AI Engineers building autonomous retail applications. 29 | - Machine Learning Engineers developing agent-based systems. 30 | - AI Researchers mastering LangChain’s agent capabilities. 31 | - Software Engineers deepening expertise in LangChain for retail. 32 | - Anyone preparing for AI/ML interviews in retail or tech. 33 | 34 | ## 🗺️ Learning Roadmap 35 | 36 | This subsection covers four key AI agent components, each with a dedicated `.py` file: 37 | 38 | ### 🔧 Tool Integration (`tool_integration.py`) 39 | - Tool Usage 40 | - Retail Task Automation 41 | - Tool Call Visualization 42 | 43 | ### 🧩 Agent Types (`agent_types.py`) 44 | - Reactive, Planning, ReAct Agents 45 | - Agent Behavior Comparison 46 | - Behavior Visualization 47 | 48 | ### 🧠 Agent Reasoning (`agent_reasoning.py`) 49 | - Autonomous Decision-Making 50 | - Customer Support Scenarios 51 | - Decision Visualization 52 | 53 | ### 🛠️ Custom Agents (`custom_agents.py`) 54 | - Retail-Specific Agents 55 | - Scenario-Based Design 56 | - Agent Action Visualization 57 | 58 | ## 💡 Why Master AI Agents? 59 | 60 | AI agents are critical for intelligent automation: 61 | 1. **Retail Relevance**: Automate customer support and inventory tasks. 62 | 2. **Interview Relevance**: Tested in coding challenges (e.g., agent design). 63 | 3. **Autonomy**: Enable decision-making with minimal human input. 64 | 4. **Industry Demand**: A must-have for 6 LPA+ AI/ML roles. 65 | 66 | ## 📆 Study Plan 67 | 68 | - **Week 1**: 69 | - Day 1-2: Tool Integration 70 | - Day 3-4: Agent Types 71 | - Day 5-6: Agent Reasoning 72 | - Day 7: Custom Agents 73 | - **Week 2**: 74 | - Day 1-7: Review `.py` files and practice interview scenarios. 75 | 76 | ## 🛠️ Setup Instructions 77 | 78 | 1. **Python Environment**: 79 | - Install Python 3.8+ and pip. 80 | - Create a virtual environment: `python -m venv langchain_env; source langchain_env/bin/activate`. 81 | - Install dependencies: `pip install langchain langchain-openai numpy matplotlib pandas`. 82 | 2. **API Keys**: 83 | - Obtain an OpenAI API key (replace `"your-openai-api-key"` in code). 84 | - Set environment variable: `export OPENAI_API_KEY="your-openai-api-key"`. 85 | 3. **Datasets**: 86 | - Uses synthetic retail data (e.g., customer queries, inventory). 87 | - Optional: Download datasets from [Hugging Face Datasets](https://huggingface.co/datasets). 88 | - Note: `.py` files use simulated data to avoid file I/O constraints. 89 | 4. **Running Code**: 90 | - Run `.py` files (e.g., `python tool_integration.py`). 91 | - Use Google Colab or local setup with GPU support. 92 | - View outputs in terminal and Matplotlib visualizations (PNGs). 93 | - Check terminal for errors; ensure dependencies and API keys are set. 94 | 95 | ## 🏆 Practical Tasks 96 | 97 | 1. **Tool Integration**: 98 | - Integrate a mock API for retail tasks. 99 | - Visualize tool call frequencies. 100 | 2. **Agent Types**: 101 | - Compare reactive and planning agents. 102 | - Analyze agent behavior metrics. 103 | 3. **Agent Reasoning**: 104 | - Build an agent for customer support. 105 | - Visualize decision paths. 106 | 4. **Custom Agents**: 107 | - Design an agent for inventory queries. 108 | - Plot agent action outcomes. 109 | 110 | ## 💡 Interview Tips 111 | 112 | - **Common Questions**: 113 | - How do agents use tools in LangChain? 114 | - What’s the difference between reactive and planning agents? 115 | - How does agent reasoning work for retail tasks? 116 | - How do you build a custom agent? 117 | - **Tips**: 118 | - Explain tool integration with code (e.g., `tools=[Tool(...)]`). 119 | - Demonstrate agent types (e.g., `initialize_agent`). 120 | - Code tasks like custom agent design. 121 | - Discuss trade-offs (e.g., agent complexity vs. performance). 122 | - **Coding Tasks**: 123 | - Build an agent with a mock API tool. 124 | - Implement a ReAct agent for retail queries. 125 | - **Conceptual Clarity**: 126 | - Explain agent autonomy and tool usage. 127 | - Describe ReAct reasoning framework. 128 | 129 | ## 📚 Resources 130 | 131 | - [LangChain Documentation](https://python.langchain.com/docs/) 132 | - [LangChain GitHub](https://github.com/langchain-ai/langchain) 133 | - [OpenAI API Documentation](https://platform.openai.com/docs/) 134 | - [Matplotlib Documentation](https://matplotlib.org/stable/contents.html) 135 | 136 | ## 🤝 Contributions 137 | 138 | 1. Fork the repository. 139 | 2. Create a feature branch (`git checkout -b feature/amazing-addition`). 140 | 3. Commit changes (`git commit -m 'Add some amazing content'`). 141 | 4. Push to the branch (`git push origin feature/amazing-addition`). 142 | 5. Open a Pull Request. 143 | 144 | --- 145 | 146 |Happy Learning and Good Luck with Your Interviews! ✨
148 |Your guide to mastering advanced LangChain features for AI/ML and retail-focused interviews
10 | 11 | --- 12 | 13 | ## 📖 Introduction 14 | 15 | Welcome to the **Advanced Features** subsection of the **LangChain Library Roadmap**! 🚀 This folder explores advanced LangChain capabilities, including custom chains, evaluation metrics, agent optimization, and API integration. Designed for hands-on learning and interview success, it builds on your prior roadmaps and supports your retail-themed projects (April 26, 2025). This section equips you with skills for retail AI roles using LangChain. 16 | 17 | ## 🌟 What’s Inside? 18 | 19 | - **Custom Chains**: Design tailored workflows for complex retail tasks. 20 | - **Evaluation Metrics**: BLEU, ROUGE, and custom metrics for response quality. 21 | - **Agent Optimization**: Optimize agent performance and latency. 22 | - **Integration with APIs**: Connect LangChain with external retail APIs. 23 | - **Hands-on Code**: Four `.py` files with examples using synthetic retail data. 24 | - **Interview Scenarios**: Key questions and answers for LangChain interviews. 25 | 26 | ## 🔍 Who Is This For? 27 | 28 | - AI Engineers building sophisticated LLM applications. 29 | - Machine Learning Engineers optimizing LangChain workflows. 30 | - AI Researchers mastering advanced LangChain features. 31 | - Software Engineers deepening expertise in LangChain for retail. 32 | - Anyone preparing for AI/ML interviews in retail or tech. 33 | 34 | ## 🗺️ Learning Roadmap 35 | 36 | This subsection covers four key advanced features, each with a dedicated `.py` file: 37 | 38 | ### 🔗 Custom Chains (`custom_chains.py`) 39 | - Tailored Workflows 40 | - Multi-Step Retail Tasks 41 | - Workflow Visualization 42 | 43 | ### 📊 Evaluation Metrics (`evaluation_metrics.py`) 44 | - BLEU and ROUGE Metrics 45 | - Custom Retail Metrics 46 | - Metric Visualization 47 | 48 | ### ⚡ Agent Optimization (`agent_optimization.py`) 49 | - Performance and Latency 50 | - Retail Agent Tuning 51 | - Performance Visualization 52 | 53 | ### 🌐 Integration with APIs (`api_integration.py`) 54 | - External Retail APIs 55 | - API-Driven Retail Tasks 56 | - API Call Visualization 57 | 58 | ## 💡 Why Master Advanced Features? 59 | 60 | Advanced LangChain features are critical for production-grade AI: 61 | 1. **Retail Relevance**: Enable complex workflows and API-driven tasks. 62 | 2. **Interview Relevance**: Tested in coding challenges (e.g., custom chain design). 63 | 3. **Performance**: Optimize accuracy and speed for real-world use. 64 | 4. **Industry Demand**: A must-have for 6 LPA+ AI/ML roles. 65 | 66 | ## 📆 Study Plan 67 | 68 | - **Week 1**: 69 | - Day 1-2: Custom Chains 70 | - Day 3-4: Evaluation Metrics 71 | - Day 5-6: Agent Optimization 72 | - Day 7: Integration with APIs 73 | - **Week 2**: 74 | - Day 1-7: Review `.py` files and practice interview scenarios. 75 | 76 | ## 🛠️ Setup Instructions 77 | 78 | 1. **Python Environment**: 79 | - Install Python 3.8+ and pip. 80 | - Create a virtual environment: `python -m venv langchain_env; source langchain_env/bin/activate`. 81 | - Install dependencies: `pip install langchain langchain-openai numpy matplotlib pandas nltk rouge-score`. 82 | 2. **API Keys**: 83 | - Obtain an OpenAI API key (replace `"your-openai-api-key"` in code). 84 | - Set environment variable: `export OPENAI_API_KEY="your-openai-api-key"`. 85 | 3. **Datasets**: 86 | - Uses synthetic retail data (e.g., customer queries, product data). 87 | - Optional: Download datasets from [Hugging Face Datasets](https://huggingface.co/datasets). 88 | - Note: `.py` files use simulated data to avoid file I/O constraints. 89 | 4. **Running Code**: 90 | - Run `.py` files (e.g., `python custom_chains.py`). 91 | - Use Google Colab or local setup with GPU support. 92 | - View outputs in terminal and Matplotlib visualizations (PNGs). 93 | - Check terminal for errors; ensure dependencies and API keys are set. 94 | 95 | ## 🏆 Practical Tasks 96 | 97 | 1. **Custom Chains**: 98 | - Build a chain for retail query processing. 99 | - Visualize workflow steps. 100 | 2. **Evaluation Metrics**: 101 | - Evaluate responses with BLEU and ROUGE. 102 | - Plot metric scores. 103 | 3. **Agent Optimization**: 104 | - Optimize an agent for customer support. 105 | - Analyze latency improvements. 106 | 4. **Integration with APIs**: 107 | - Connect to a mock retail API. 108 | - Visualize API call frequencies. 109 | 110 | ## 💡 Interview Tips 111 | 112 | - **Common Questions**: 113 | - How do custom chains handle complex tasks? 114 | - What are BLEU and ROUGE metrics used for? 115 | - How do you optimize agent performance? 116 | - How does LangChain integrate with external APIs? 117 | - **Tips**: 118 | - Explain custom chains with code (e.g., `SequentialChain`). 119 | - Demonstrate metric calculation (e.g., `RougeScorer`). 120 | - Code tasks like agent optimization or API integration. 121 | - Discuss trade-offs (e.g., chain complexity vs. latency). 122 | - **Coding Tasks**: 123 | - Implement a custom chain for retail tasks. 124 | - Calculate BLEU for response evaluation. 125 | - **Conceptual Clarity**: 126 | - Explain chain modularity and API integration. 127 | - Describe optimization techniques for agents. 128 | 129 | ## 📚 Resources 130 | 131 | - [LangChain Documentation](https://python.langchain.com/docs/) 132 | - [LangChain GitHub](https://github.com/langchain-ai/langchain) 133 | - [OpenAI API Documentation](https://platform.openai.com/docs/) 134 | - [Matplotlib Documentation](https://matplotlib.org/stable/contents.html) 135 | - [ROUGE Documentation](https://github.com/pltrdy/rouge) 136 | 137 | ## 🤝 Contributions 138 | 139 | 1. Fork the repository. 140 | 2. Create a feature branch (`git checkout -b feature/amazing-addition`). 141 | 3. Commit changes (`git commit -m 'Add some amazing content'`). 142 | 4. Push to the branch (`git push origin feature/amazing-addition`). 143 | 5. Open a Pull Request. 144 | 145 | --- 146 | 147 |Happy Learning and Good Luck with Your Interviews! ✨
149 |Your guide to mastering retail applications with LangChain for AI/ML and retail-focused interviews
11 | 12 | --- 13 | 14 | ## 📖 Introduction 15 | 16 | Welcome to the **Retail Applications** subsection of the **LangChain Library Roadmap**! 🚀 This folder explores practical retail use cases, including chatbots, recommendation systems, review analysis, and query answering. Designed for hands-on learning and interview success, it builds on your prior roadmaps and supports your retail-themed projects (April 26, 2025). This section equips you with skills for retail AI roles using LangChain. 17 | 18 | ## 🌟 What’s Inside? 19 | 20 | - **Chatbots**: Conversational agents for customer support with memory. 21 | - **Recommendation Systems**: Product recommendations using embeddings. 22 | - **Review Analysis**: Sentiment and topic extraction from reviews. 23 | - **Query Answering**: Answer customer queries using RAG. 24 | - **Hands-on Code**: Four `.py` files with examples using synthetic retail data. 25 | - **Interview Scenarios**: Key questions and answers for LangChain interviews. 26 | 27 | ## 🔍 Who Is This For? 28 | 29 | - AI Engineers building retail-focused AI applications. 30 | - Machine Learning Engineers developing retail recommendation or analysis systems. 31 | - AI Researchers mastering LangChain for retail use cases. 32 | - Software Engineers deepening expertise in LangChain for retail. 33 | - Anyone preparing for AI/ML interviews in retail or tech. 34 | 35 | ## 🗺️ Learning Roadmap 36 | 37 | This subsection covers four key retail applications, each with a dedicated `.py` file: 38 | 39 | ### 🤖 Chatbots (`chatbots.py`) 40 | - Conversational Agents 41 | - Memory-Based Support 42 | - Interaction Visualization 43 | 44 | ### 🛍️ Recommendation Systems (`recommendation_systems.py`) 45 | - Embedding-Based Recommendations 46 | - Product Matching 47 | - Recommendation Visualization 48 | 49 | ### 📝 Review Analysis (`review_analysis.py`) 50 | - Sentiment Extraction 51 | - Topic Modeling 52 | - Sentiment Visualization 53 | 54 | ### ❓ Query Answering (`query_answering.py`) 55 | - RAG-Based Answers 56 | - Retail Query Handling 57 | - Response Visualization 58 | 59 | ## 💡 Why Master Retail Applications? 60 | 61 | Retail applications are critical for customer-focused AI: 62 | 1. **Retail Relevance**: Enhance customer support, sales, and insights. 63 | 2. **Interview Relevance**: Tested in coding challenges (e.g., chatbot design). 64 | 3. **Practicality**: Directly applicable to retail business needs. 65 | 4. **Industry Demand**: A must-have for 6 LPA+ AI/ML roles. 66 | 67 | ## 📆 Study Plan 68 | 69 | - **Week 1**: 70 | - Day 1-2: Chatbots 71 | - Day 3-4: Recommendation Systems 72 | - Day 5-6: Review Analysis 73 | - Day 7: Query Answering 74 | - **Week 2**: 75 | - Day 1-7: Review `.py` files and practice interview scenarios. 76 | 77 | ## 🛠️ Setup Instructions 78 | 79 | 1. **Python Environment**: 80 | - Install Python 3.8+ and pip. 81 | - Create a virtual environment: `python -m venv langchain_env; source langchain_env/bin/activate`. 82 | - Install dependencies: `pip install langchain langchain-openai faiss-cpu numpy matplotlib pandas nltk scikit-learn`. 83 | 2. **API Keys**: 84 | - Obtain an OpenAI API key (replace `"your-openai-api-key"` in code). 85 | - Set environment variable: `export OPENAI_API_KEY="your-openai-api-key"`. 86 | 3. **Datasets**: 87 | - Uses synthetic retail data (e.g., customer queries, reviews, products). 88 | - Optional: Download datasets from [Hugging Face Datasets](https://huggingface.co/datasets). 89 | - Note: `.py` files use simulated data to avoid file I/O constraints. 90 | 4. **Running Code**: 91 | - Run `.py` files (e.g., `python chatbots.py`). 92 | - Use Google Colab or local setup with GPU support. 93 | - View outputs in terminal and Matplotlib visualizations (PNGs). 94 | - Check terminal for errors; ensure dependencies and API keys are set. 95 | 96 | ## 🏆 Practical Tasks 97 | 98 | 1. **Chatbots**: 99 | - Build a customer support chatbot with memory. 100 | - Visualize conversation lengths. 101 | 2. **Recommendation Systems**: 102 | - Create a product recommendation system. 103 | - Plot recommendation similarities. 104 | 3. **Review Analysis**: 105 | - Analyze sentiment in retail reviews. 106 | - Visualize sentiment distribution. 107 | 4. **Query Answering**: 108 | - Implement RAG for customer queries. 109 | - Compare RAG vs. non-RAG responses. 110 | 111 | ## 💡 Interview Tips 112 | 113 | - **Common Questions**: 114 | - How do chatbots use memory in LangChain? 115 | - How do embeddings power recommendation systems? 116 | - How is sentiment extracted from reviews? 117 | - How does RAG improve query answering? 118 | - **Tips**: 119 | - Explain chatbots with code (e.g., `ConversationBufferMemory`). 120 | - Demonstrate recommendation systems (e.g., `FAISS` embeddings). 121 | - Code tasks like sentiment analysis or RAG setup. 122 | - Discuss trade-offs (e.g., chatbot latency vs. context). 123 | - **Coding Tasks**: 124 | - Build a retail chatbot. 125 | - Implement a RAG-based query system. 126 | - **Conceptual Clarity**: 127 | - Explain memory in conversational agents. 128 | - Describe RAG’s role in query answering. 129 | 130 | ## 📚 Resources 131 | 132 | - [LangChain Documentation](https://python.langchain.com/docs/) 133 | - [LangChain GitHub](https://github.com/langchain-ai/langchain) 134 | - [OpenAI API Documentation](https://platform.openai.com/docs/) 135 | - [Faiss Documentation](https://github.com/facebookresearch/faiss) 136 | - [Matplotlib Documentation](https://matplotlib.org/stable/contents.html) 137 | 138 | ## 🤝 Contributions 139 | 140 | 1. Fork the repository. 141 | 2. Create a feature branch (`git checkout -b feature/amazing-addition`). 142 | 3. Commit changes (`git commit -m 'Add some amazing content'`). 143 | 4. Push to the branch (`git push origin feature/amazing-addition`). 144 | 5. Open a Pull Request. 145 | 146 | --- 147 | 148 |Happy Learning and Good Luck with Your Interviews! ✨
150 |Your guide to mastering Retrieval-Augmented Generation (RAG) with LangChain for AI/ML and retail-focused interviews
11 | 12 | --- 13 | 14 | ## 📖 Introduction 15 | 16 | Welcome to the **Retrieval-Augmented Generation (RAG)** subsection of the **LangChain Library Roadmap**! 🚀 This folder explores RAG, a technique to enhance LLM responses with external knowledge, using vector stores, document loaders, and embedding models. Designed for hands-on learning and interview success, it builds on your prior roadmaps and supports your retail-themed projects (April 26, 2025). This section equips you with skills for retail AI roles using LangChain. 17 | 18 | ## 🌟 What’s Inside? 19 | 20 | - **Vector Stores**: Faiss for efficient document retrieval. 21 | - **Document Loaders and Splitters**: Process large retail documents. 22 | - **RAG Pipeline**: Combine retrieval and generation for accurate responses. 23 | - **Embedding Models**: Use Hugging Face or OpenAI embeddings. 24 | - **Hands-on Code**: Four `.py` files with examples using synthetic retail data. 25 | - **Interview Scenarios**: Key questions and answers for LangChain interviews. 26 | 27 | ## 🔍 Who Is This For? 28 | 29 | - AI Engineers building knowledge-enhanced LLM applications. 30 | - Machine Learning Engineers developing RAG systems. 31 | - AI Researchers mastering LangChain’s retrieval capabilities. 32 | - Software Engineers deepening expertise in LangChain for retail. 33 | - Anyone preparing for AI/ML interviews in retail or tech. 34 | 35 | ## 🗺️ Learning Roadmap 36 | 37 | This subsection covers four key RAG components, each with a dedicated `.py` file: 38 | 39 | ### 📈 Vector Stores (`vector_stores.py`) 40 | - Faiss-Based Retrieval 41 | - Document Indexing 42 | - Retrieval Visualization 43 | 44 | ### 📄 Document Loaders and Splitters (`document_loaders_splitters.py`) 45 | - Document Processing 46 | - Text Splitting 47 | - Chunk Visualization 48 | 49 | ### 🔄 RAG Pipeline (`rag_pipeline.py`) 50 | - Retrieval and Generation 51 | - Retail Query Answering 52 | - Response Visualization 53 | 54 | ### 🧠 Embedding Models (`embedding_models.py`) 55 | - Hugging Face and OpenAI Embeddings 56 | - Embedding Comparison 57 | - Similarity Visualization 58 | 59 | ## 💡 Why Master RAG? 60 | 61 | RAG is critical for accurate, context-aware AI applications: 62 | 1. **Retail Relevance**: Enhances product query answers with manuals. 63 | 2. **Interview Relevance**: Tested in coding challenges (e.g., RAG setup). 64 | 3. **Accuracy**: Combines LLM generation with factual retrieval. 65 | 4. **Industry Demand**: A must-have for 6 LPA+ AI/ML roles. 66 | 67 | ## 📆 Study Plan 68 | 69 | - **Week 1**: 70 | - Day 1-2: Vector Stores 71 | - Day 3-4: Document Loaders and Splitters 72 | - Day 5-6: RAG Pipeline 73 | - Day 7: Embedding Models 74 | - **Week 2**: 75 | - Day 1-7: Review `.py` files and practice interview scenarios. 76 | 77 | ## 🛠️ Setup Instructions 78 | 79 | 1. **Python Environment**: 80 | - Install Python 3.8+ and pip. 81 | - Create a virtual environment: `python -m venv langchain_env; source langchain_env/bin/activate`. 82 | - Install dependencies: `pip install langchain langchain-openai langchain-huggingface faiss-cpu numpy matplotlib pandas nltk`. 83 | 2. **API Keys**: 84 | - Obtain an OpenAI API key (replace `"your-openai-api-key"` in code). 85 | - Set environment variable: `export OPENAI_API_KEY="your-openai-api-key"`. 86 | - Optional: Use Hugging Face models (`langchain-huggingface`). 87 | 3. **Datasets**: 88 | - Uses synthetic retail data (e.g., product manuals, queries). 89 | - Optional: Download datasets from [Hugging Face Datasets](https://huggingface.co/datasets). 90 | - Note: `.py` files use simulated data to avoid file I/O constraints. 91 | 4. **Running Code**: 92 | - Run `.py` files (e.g., `python vector_stores.py`). 93 | - Use Google Colab or local setup with GPU support. 94 | - View outputs in terminal and Matplotlib visualizations (PNGs). 95 | - Check terminal for errors; ensure dependencies and API keys are set. 96 | 97 | ## 🏆 Practical Tasks 98 | 99 | 1. **Vector Stores**: 100 | - Index retail documents with Faiss. 101 | - Visualize retrieval similarities. 102 | 2. **Document Loaders and Splitters**: 103 | - Process synthetic manuals. 104 | - Analyze chunk sizes. 105 | 3. **RAG Pipeline**: 106 | - Build a RAG system for product queries. 107 | - Compare RAG vs. non-RAG responses. 108 | 4. **Embedding Models**: 109 | - Compare Hugging Face and OpenAI embeddings. 110 | - Visualize embedding similarities. 111 | 112 | ## 💡 Interview Tips 113 | 114 | - **Common Questions**: 115 | - How does RAG improve LLM accuracy? 116 | - What’s the role of vector stores in RAG? 117 | - How do document splitters optimize retrieval? 118 | - How do embedding models impact RAG performance? 119 | - **Tips**: 120 | - Explain RAG with code (e.g., `FAISS.from_texts`). 121 | - Demonstrate document splitting (e.g., `CharacterTextSplitter`). 122 | - Code tasks like RAG pipeline setup. 123 | - Discuss trade-offs (e.g., retrieval speed vs. accuracy). 124 | - **Coding Tasks**: 125 | - Build a Faiss-based vector store. 126 | - Implement a RAG pipeline for retail queries. 127 | - **Conceptual Clarity**: 128 | - Explain RAG’s retrieval-generation synergy. 129 | - Describe embedding model trade-offs. 130 | 131 | ## 📚 Resources 132 | 133 | - [LangChain Documentation](https://python.langchain.com/docs/) 134 | - [LangChain GitHub](https://github.com/langchain-ai/langchain) 135 | - [OpenAI API Documentation](https://platform.openai.com/docs/) 136 | - [Faiss Documentation](https://github.com/facebookresearch/faiss) 137 | - [Hugging Face Documentation](https://huggingface.co/docs) 138 | - [Matplotlib Documentation](https://matplotlib.org/stable/contents.html) 139 | 140 | ## 🤝 Contributions 141 | 142 | 1. Fork the repository. 143 | 2. Create a feature branch (`git checkout -b feature/amazing-addition`). 144 | 3. Commit changes (`git commit -m 'Add some amazing content'`). 145 | 4. Push to the branch (`git push origin feature/amazing-addition`). 146 | 5. Open a Pull Request. 147 | 148 | --- 149 | 150 |Happy Learning and Good Luck with Your Interviews! ✨
152 |Your guide to mastering LangChain's core components for AI/ML and retail-focused interviews
11 | 12 | --- 13 | 14 | ## 📖 Introduction 15 | 16 | Welcome to the **Core Components** subsection of the **LangChain Library Roadmap**! 🚀 This folder dives into the foundational elements of the **LangChain** library, including chains, prompts, memory, and document loaders. Designed for hands-on learning and interview success, it builds on your prior roadmaps—**Python**, **TensorFlow.js**, **GenAI**, **JavaScript**, **Keras**, **Matplotlib**, **Pandas**, **NumPy**, **Computer Vision with OpenCV (cv2)**, **NLP with NLTK**, and **Hugging Face Transformers**—and supports your retail-themed projects (April 26, 2025). Whether tackling coding challenges or technical discussions, this section equips you with the skills to excel in retail AI roles using LangChain. 17 | 18 | ## 🌟 What’s Inside? 19 | 20 | - **Chains**: Sequential workflows for LLM tasks (e.g., LLMChain, SequentialChain). 21 | - **Prompts**: Dynamic prompt engineering for retail queries. 22 | - **Memory**: Contextual conversation history for customer interactions. 23 | - **Document Loaders**: Process retail data like product manuals and reviews. 24 | - **Hands-on Code**: Four `.py` files with practical examples using synthetic retail data (e.g., customer queries, product descriptions). 25 | - **Interview Scenarios**: Key questions and answers to ace LangChain interviews. 26 | 27 | ## 🔍 Who Is This For? 28 | 29 | - AI Engineers building LLM-powered retail applications. 30 | - Machine Learning Engineers creating workflows with LangChain. 31 | - AI Researchers mastering LangChain’s core components. 32 | - Software Engineers deepening expertise in LangChain for retail use cases. 33 | - Anyone preparing for AI/ML interviews in retail or tech. 34 | 35 | ## 🗺️ Learning Roadmap 36 | 37 | This subsection covers four key core components, each with a dedicated `.py` file: 38 | 39 | ### 🔗 Chains (`chains.py`) 40 | - Sequential Workflows 41 | - LLMChain and SequentialChain 42 | - Workflow Visualization 43 | 44 | ### 📝 Prompts (`prompts.py`) 45 | - Prompt Engineering 46 | - Dynamic Prompts for Retail 47 | - Response Visualization 48 | 49 | ### 🧠 Memory (`memory.py`) 50 | - Conversation History 51 | - Contextual Retail Interactions 52 | - Context Visualization 53 | 54 | ### 📚 Document Loaders (`document_loaders.py`) 55 | - Retail Data Processing 56 | - Text Extraction 57 | - Data Visualization 58 | 59 | ## 💡 Why Master Core Components? 60 | 61 | LangChain’s core components are essential for building intelligent AI applications, and here’s why they matter: 62 | 1. **Foundation**: Chains, prompts, memory, and loaders form the backbone of LangChain workflows. 63 | 2. **Retail Relevance**: Enable customer support, product queries, and review processing. 64 | 3. **Interview Relevance**: Tested in coding challenges (e.g., chain design, prompt engineering). 65 | 4. **Flexibility**: Support diverse retail tasks with modular components. 66 | 5. **Industry Demand**: A must-have for 6 LPA+ AI/ML roles. 67 | 68 | This section is your roadmap to mastering LangChain’s core components for technical interviews—let’s dive in! 69 | 70 | ## 📆 Study Plan 71 | 72 | - **Week 1**: 73 | - Day 1-2: Chains 74 | - Day 3-4: Prompts 75 | - Day 5-6: Memory 76 | - Day 7: Document Loaders 77 | - **Week 2**: 78 | - Day 1-7: Review all `.py` files and practice interview scenarios. 79 | 80 | ## 🛠️ Setup Instructions 81 | 82 | 1. **Python Environment**: 83 | - Install Python 3.8+ and pip. 84 | - Create a virtual environment: `python -m venv langchain_env; source langchain_env/bin/activate`. 85 | - Install dependencies: `pip install langchain langchain-openai numpy matplotlib pandas nltk`. 86 | 2. **API Keys**: 87 | - Obtain an OpenAI API key for LLM access (replace `"your-openai-api-key"` in code). 88 | - Set environment variable: `export OPENAI_API_KEY="your-openai-api-key"`. 89 | - Alternatively, use Hugging Face models with `langchain-huggingface` (`pip install langchain-huggingface huggingface_hub`). 90 | 3. **Datasets**: 91 | - Uses synthetic retail data (e.g., customer queries, product descriptions, reviews). 92 | - Optional: Download datasets from [Hugging Face Datasets](https://huggingface.co/datasets) (e.g., Amazon Reviews). 93 | - Note: `.py` files use simulated data to avoid file I/O constraints. 94 | 4. **Running Code**: 95 | - Run `.py` files in a Python environment (e.g., `python chains.py`). 96 | - Use Google Colab for convenience or local setup with GPU support for faster processing. 97 | - View outputs in terminal (console logs) and Matplotlib visualizations (saved as PNGs). 98 | - Check terminal for errors; ensure dependencies and API keys are configured. 99 | 100 | ## 🏆 Practical Tasks 101 | 102 | 1. **Chains**: 103 | - Build a chain for retail product queries. 104 | - Visualize chain response lengths. 105 | 2. **Prompts**: 106 | - Create dynamic prompts for customer support queries. 107 | - Analyze prompt response quality. 108 | 3. **Memory**: 109 | - Implement a conversational agent with context retention. 110 | - Visualize conversation history length. 111 | 4. **Document Loaders**: 112 | - Process synthetic retail manuals or reviews. 113 | - Visualize extracted text statistics. 114 | 115 | ## 💡 Interview Tips 116 | 117 | - **Common Questions**: 118 | - How do LangChain chains work for LLM workflows? 119 | - What’s the role of prompt engineering in LangChain? 120 | - How does memory enhance conversational agents? 121 | - How do document loaders process external data? 122 | - **Tips**: 123 | - Explain chains with code (e.g., `LLMChain` with `PromptTemplate`). 124 | - Demonstrate prompt engineering (e.g., `PromptTemplate` for retail queries). 125 | - Be ready to code tasks like memory implementation or document loading. 126 | - Discuss trade-offs (e.g., chain complexity vs. performance, memory size vs. latency). 127 | - **Coding Tasks**: 128 | - Implement a chain for retail query processing. 129 | - Create a dynamic prompt for customer support. 130 | - Build a conversational agent with memory. 131 | - **Conceptual Clarity**: 132 | - Explain how chains combine LLMs with prompts. 133 | - Describe the role of memory in maintaining context. 134 | 135 | ## 📚 Resources 136 | 137 | - [LangChain Documentation](https://python.langchain.com/docs/) 138 | - [LangChain GitHub](https://github.com/langchain-ai/langchain) 139 | - [OpenAI API Documentation](https://platform.openai.com/docs/) 140 | - [NumPy Documentation](https://numpy.org/doc/) 141 | - [Matplotlib Documentation](https://matplotlib.org/stable/contents.html) 142 | - [“Deep Learning with Python” by François Chollet](https://www.manning.com/books/deep-learning-with-python) 143 | 144 | ## 🤝 Contributions 145 | 146 | Love to collaborate? Here’s how! 🌟 147 | 1. Fork the repository. 148 | 2. Create a feature branch (`git checkout -b feature/amazing-addition`). 149 | 3. Commit your changes (`git commit -m 'Add some amazing content'`). 150 | 4. Push to the branch (`git push origin feature/amazing-addition`). 151 | 5. Open a Pull Request. 152 | 153 | --- 154 | 155 |Happy Learning and Good Luck with Your Interviews! ✨
157 |Your comprehensive guide to mastering the LangChain library for AI/ML and retail-focused interviews
12 | 13 | --- 14 | 15 | ## 📖 Introduction 16 | 17 | Welcome to the **LangChain Library Roadmap** for AI/ML and retail-focused interview preparation! 🚀 This roadmap dives deep into the **LangChain** library, a powerful framework for building applications powered by large language models (LLMs) with external tools, memory, and data retrieval. Covering all major **LangChain components** and retail applications, it’s designed for hands-on learning and interview success, building on your prior roadmaps—**Python**, **TensorFlow.js**, **GenAI**, **JavaScript**, **Keras**, **Matplotlib**, **Pandas**, **NumPy**, **Computer Vision with OpenCV (cv2)**, **NLP with NLTK**, and **Hugging Face Transformers**. Tailored to your retail-themed projects (April 26, 2025), this roadmap equips you with the skills to excel in advanced AI roles, whether tackling coding challenges or technical discussions. 18 | 19 | ## 🌟 What’s Inside? 20 | 21 | - **LangChain Components**: Chains, prompts, memory, and document loaders for LLM workflows. 22 | - **Retrieval-Augmented Generation (RAG)**: Knowledge-enhanced LLM responses with vector stores. 23 | - **AI Agents**: Autonomous agents with tools and reasoning for retail tasks. 24 | - **Advanced Features**: Custom chains, agent optimization, and evaluation metrics. 25 | - **Retail Applications**: Chatbots, recommendation systems, review analysis, and query answering. 26 | - **Hands-on Code**: Subsections with `.py` files using synthetic retail data (e.g., product reviews, customer queries). 27 | - **Interview Scenarios**: Key questions and answers to ace LangChain-related interviews. 28 | 29 | ## 🔍 Who Is This For? 30 | 31 | - AI Engineers building LLM-powered retail applications. 32 | - Machine Learning Engineers developing RAG systems or AI agents. 33 | - AI Researchers exploring LangChain’s capabilities with LLMs. 34 | - Software Engineers deepening expertise in LangChain for retail use cases. 35 | - Anyone preparing for AI/ML interviews in retail or tech. 36 | 37 | ## 🗺️ Learning Roadmap 38 | 39 | This roadmap is organized into subsections, each covering a key aspect of the LangChain library. Each subsection includes a dedicated folder with a `README.md` and `.py` files for practical demos. 40 | 41 | ### 🛠️ Core Components 42 | - **Chains**: Sequential workflows for LLM tasks (e.g., LLMChain, SequentialChain). 43 | - **Prompts**: Dynamic prompt engineering for retail queries. 44 | - **Memory**: Contextual conversation history for customer interactions. 45 | - **Document Loaders**: Process retail data (e.g., product manuals, reviews). 46 | 47 | ### 📚 Retrieval-Augmented Generation (RAG) 48 | - **Vector Stores**: Faiss or Chroma for document retrieval. 49 | - **Document Loaders and Splitters**: Handle large retail documents. 50 | - **RAG Pipeline**: Enhance LLM responses with external knowledge. 51 | - **Embedding Models**: Use Hugging Face or OpenAI embeddings. 52 | 53 | ### 🤖 AI Agents 54 | - **Tool Integration**: Use tools like search, calculators, or APIs for retail tasks. 55 | - **Agent Types**: Reactive, planning, and ReAct agents. 56 | - **Agent Reasoning**: Autonomous decision-making for customer support. 57 | - **Custom Agents**: Build agents for specific retail scenarios. 58 | 59 | ### 🚀 Advanced Features 60 | - **Custom Chains**: Design tailored workflows for complex tasks. 61 | - **Evaluation Metrics**: BLEU, ROUGE, and custom metrics for response quality. 62 | - **Agent Optimization**: Optimize agent performance and latency. 63 | - **Integration with APIs**: Connect LangChain with external retail APIs. 64 | 65 | ### 🛒 Retail Applications 66 | - **Chatbots**: Conversational agents for customer support with memory. 67 | - **Recommendation Systems**: Product recommendations using embeddings. 68 | - **Review Analysis**: Sentiment and topic extraction from reviews. 69 | - **Query Answering**: Answer customer queries using RAG. 70 | 71 | ## 💡 Why Master LangChain? 72 | 73 | LangChain is a cornerstone for building intelligent, context-aware AI applications, and here’s why it matters: 74 | 1. **Retail Relevance**: Powers customer support chatbots, personalized recommendations, and review analysis. 75 | 2. **Scalability**: Combines LLMs with external data and tools for robust applications. 76 | 3. **Interview Relevance**: Tested in coding challenges (e.g., RAG implementation, agent design). 77 | 4. **Flexibility**: Supports diverse use cases with chains, memory, and agents. 78 | 5. **Industry Demand**: A must-have for 6 LPA+ AI/ML roles in retail and tech. 79 | 80 | This roadmap is your guide to mastering LangChain for technical interviews and retail AI projects—let’s dive in! 81 | 82 | ## 📆 Study Plan 83 | 84 | - **Month 1**: 85 | - Week 1: Core Components (Chains, Prompts) 86 | - Week 2: Core Components (Memory, Document Loaders) 87 | - Week 3: Retrieval-Augmented Generation (Vector Stores, RAG Pipeline) 88 | - Week 4: Retrieval-Augmented Generation (Embedding Models, Document Splitters) 89 | - **Month 2**: 90 | - Week 1: AI Agents (Tool Integration, Agent Types) 91 | - Week 2: AI Agents (Agent Reasoning, Custom Agents) 92 | - Week 3: Advanced Features (Custom Chains, Evaluation Metrics) 93 | - Week 4: Advanced Features (Agent Optimization, API Integration) 94 | - **Month 3**: 95 | - Week 1: Retail Applications (Chatbots, Review Analysis) 96 | - Week 2: Retail Applications (Recommendation Systems, Query Answering) 97 | - Week 3: Review all subsections and practice coding tasks 98 | - Week 4: Prepare for interviews with scenarios and mock coding challenges 99 | 100 | ## 🛠️ Setup Instructions 101 | 102 | 1. **Python Environment**: 103 | - Install Python 3.8+ and pip. 104 | - Create a virtual environment: `python -m venv langchain_env; source langchain_env/bin/activate`. 105 | - Install dependencies: `pip install langchain langchain-openai faiss-cpu numpy matplotlib pandas scikit-learn`. 106 | 2. **API Keys**: 107 | - Obtain an OpenAI API key for LLM access (replace `"your-openai-api-key"` in code). 108 | - Set environment variable: `export OPENAI_API_KEY="your-openai-api-key"`. 109 | - Alternatively, use Hugging Face models with `langchain-huggingface` (`pip install langchain-huggingface huggingface_hub`). 110 | 3. **Datasets**: 111 | - Uses synthetic retail data (e.g., product descriptions, customer queries, reviews). 112 | - Optional: Download datasets from [Hugging Face Datasets](https://huggingface.co/datasets) (e.g., Amazon Reviews). 113 | - Note: `.py` files use simulated data to avoid file I/O constraints. 114 | 4. **Running Code**: 115 | - Run `.py` files in a Python environment (e.g., `python core_components.py`). 116 | - Use Google Colab for convenience or local setup with GPU support for faster processing. 117 | - View outputs in terminal (console logs) and Matplotlib visualizations (saved as PNGs). 118 | - Check terminal for errors; ensure dependencies and API keys are configured. 119 | 120 | ## 🏆 Practical Tasks 121 | 122 | 1. **Core Components**: 123 | - Build a chain to answer retail product queries. 124 | - Implement a conversational agent with memory for customer support. 125 | 2. **Retrieval-Augmented Generation (RAG)**: 126 | - Create a RAG system for product manual queries. 127 | - Use Faiss to retrieve relevant documents for customer questions. 128 | 3. **AI Agents**: 129 | - Design an agent to track orders using a mock API tool. 130 | - Build a planning agent for retail inventory queries. 131 | 4. **Advanced Features**: 132 | - Develop a custom chain for multi-step retail workflows. 133 | - Evaluate response quality with BLEU and ROUGE metrics. 134 | 5. **Retail Applications**: 135 | - Build a chatbot for customer queries with RAG and memory. 136 | - Create a recommendation system using product description embeddings. 137 | - Analyze sentiment in retail reviews. 138 | 139 | ## 💡 Interview Tips 140 | 141 | - **Common Questions**: 142 | - What is LangChain, and how does it enhance LLM applications? 143 | - How does RAG improve LLM response accuracy? 144 | - What are the differences between reactive and planning agents in LangChain? 145 | - How can LangChain be applied to retail use cases? 146 | - **Tips**: 147 | - Explain chains with code (e.g., `LLMChain` with `PromptTemplate`). 148 | - Demonstrate RAG with a vector store (e.g., `FAISS.from_texts`). 149 | - Be ready to code tasks like agent tool integration or review analysis. 150 | - Discuss trade-offs (e.g., RAG latency vs. accuracy, agent complexity vs. reliability). 151 | - **Coding Tasks**: 152 | - Implement a simple chain for retail queries. 153 | - Build a RAG system for product information. 154 | - Design an agent for customer support. 155 | - **Conceptual Clarity**: 156 | - Explain how LangChain integrates LLMs with external data and tools. 157 | - Describe the role of memory in maintaining conversational context. 158 | 159 | ## 📚 Resources 160 | 161 | - [LangChain Documentation](https://python.langchain.com/docs/) 162 | - [LangChain GitHub](https://github.com/langchain-ai/langchain) 163 | - [OpenAI API Documentation](https://platform.openai.com/docs/) 164 | - [Faiss Documentation](https://github.com/facebookresearch/faiss) 165 | - [Hugging Face Datasets Documentation](https://huggingface.co/docs/datasets/) 166 | - [NumPy Documentation](https://numpy.org/doc/) 167 | - [Matplotlib Documentation](https://matplotlib.org/stable/contents.html) 168 | - [“Deep Learning with Python” by François Chollet](https://www.manning.com/books/deep-learning-with-python) 169 | 170 | ## 🤝 Contributions 171 | 172 | Love to collaborate? Here’s how! 🌟 173 | 1. Fork the repository. 174 | 2. Create a feature branch (`git checkout -b feature/amazing-addition`). 175 | 3. Commit your changes (`git commit -m 'Add some amazing content'`). 176 | 4. Push to the branch (`git push origin feature/amazing-addition`). 177 | 5. Open a Pull Request. 178 | 179 | --- 180 | 181 |Happy Learning and Good Luck with Your Interviews! ✨
183 |