├── .gitignore ├── 00_basics.js ├── 01_first_chain.js ├── 02_simplesequentialchain.js ├── 03_sequentialchain.js ├── 04_parsers.js ├── 05_indexes.js ├── 06_usestore.js ├── 07_chat.js ├── 08_agents.js ├── README.md ├── package.json └── restaurant.txt /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | faiss.index 3 | docstore.json 4 | package-lock.json 5 | .env -------------------------------------------------------------------------------- /00_basics.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { Configuration, OpenAIApi } from "openai"; 5 | 6 | const configuration = new Configuration({ 7 | apiKey: process.env.OPENAI_API_KEY, 8 | }); 9 | const openai = new OpenAIApi(configuration); 10 | 11 | async function chat(input) { 12 | const messages = [{ role: "user", content: input }]; 13 | 14 | const response = await openai.createChatCompletion({ 15 | model: "gpt-3.5-turbo", 16 | messages: messages, 17 | temperature: 0, 18 | }); 19 | 20 | return response.data.choices[0].message.content; 21 | } 22 | 23 | const question = "What is the capital of France"; 24 | 25 | chat(question) 26 | .then((response) => console.log(response)) 27 | .catch((error) => console.error(error)); 28 | 29 | const promptTemplate = ` 30 | Be very funny when answering questions 31 | Question: {question} 32 | `; 33 | 34 | const prompt = promptTemplate.replace("{question}", question); 35 | 36 | chat(prompt) 37 | .then((response) => console.log(response)) 38 | .catch((error) => console.error(error)); 39 | -------------------------------------------------------------------------------- /01_first_chain.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { OpenAI } from "langchain/llms/openai"; 5 | import { PromptTemplate } from "langchain/prompts"; 6 | import { LLMChain } from "langchain/chains"; 7 | 8 | const model = new OpenAI({ temperature: 0 }); 9 | const template = 10 | "Be very funny when answering questions\n Question: {question}"; 11 | const prompt = new PromptTemplate({ template, inputVariables: ["question"] }); 12 | 13 | const chain = new LLMChain({ llm: model, prompt }); 14 | 15 | const result = await chain.call({ question: "What is the capital of France?" }); 16 | console.log(result); 17 | -------------------------------------------------------------------------------- /02_simplesequentialchain.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { SimpleSequentialChain, LLMChain } from "langchain/chains"; 5 | import { OpenAI } from "langchain/llms/openai"; 6 | import { PromptTemplate } from "langchain/prompts"; 7 | 8 | const llm = new OpenAI({ temperature: 0 }); 9 | 10 | const responseTemplate1 = ` 11 | You are a helpful bot that creates a 'thank you' response text. 12 | If customers are unsatisfied, offer them a real world assistant to talk to. 13 | You will get a sentiment and subject as input and evaluate. 14 | 15 | text: {input} 16 | `; 17 | 18 | const responseTemplate2 = ` 19 | You are an assistant bot. Your job is to make the customer feel heard and understood. 20 | Reflect on the input you receive. 21 | 22 | text: {input} 23 | `; 24 | 25 | const reviewPromptTemplate1 = new PromptTemplate({ 26 | template: responseTemplate1, 27 | inputVariables: ["input"], 28 | }); 29 | 30 | const reviewPromptTemplate2 = new PromptTemplate({ 31 | template: responseTemplate2, 32 | inputVariables: ["input"], 33 | }); 34 | 35 | const reviewChain1 = new LLMChain({ llm, prompt: reviewPromptTemplate1 }); 36 | const reviewChain2 = new LLMChain({ llm, prompt: reviewPromptTemplate2 }); 37 | 38 | const overallChain = new SimpleSequentialChain({ 39 | chains: [reviewChain1, reviewChain2], 40 | verbose: true, 41 | }); 42 | 43 | const result = await overallChain.run({ 44 | input: "I ordered Pizza Salami and it was awful!", 45 | }); 46 | 47 | console.log(result); 48 | -------------------------------------------------------------------------------- /03_sequentialchain.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { SequentialChain, LLMChain } from "langchain/chains"; 5 | import { OpenAI } from "langchain/llms/openai"; 6 | import { PromptTemplate } from "langchain/prompts"; 7 | 8 | const llm = new OpenAI({ temperature: 0 }); 9 | 10 | let template = 11 | "You ordered {dish_name} and your experience was {experience}. Write a review: "; 12 | let promptTemplate = new PromptTemplate({ 13 | template, 14 | inputVariables: ["dish_name", "experience"], 15 | }); 16 | const reviewChain = new LLMChain({ 17 | llm, 18 | prompt: promptTemplate, 19 | outputKey: "review", 20 | }); 21 | 22 | template = "Given the restaurant review: {review}, write a follow-up comment: "; 23 | promptTemplate = new PromptTemplate({ 24 | template, 25 | inputVariables: ["review"], 26 | }); 27 | const commentChain = new LLMChain({ 28 | llm, 29 | prompt: promptTemplate, 30 | outputKey: "comment", 31 | }); 32 | 33 | template = "Summarise the review in one short sentence: \n\n {review}"; 34 | promptTemplate = new PromptTemplate({ 35 | template, 36 | inputVariables: ["review"], 37 | }); 38 | const summaryChain = new LLMChain({ 39 | llm, 40 | prompt: promptTemplate, 41 | outputKey: "summary", 42 | }); 43 | 44 | template = "Translate the summary to german: \n\n {summary}"; 45 | promptTemplate = new PromptTemplate({ 46 | template, 47 | inputVariables: ["summary"], 48 | }); 49 | const translationChain = new LLMChain({ 50 | llm, 51 | prompt: promptTemplate, 52 | outputKey: "german_translation", 53 | }); 54 | 55 | const overallChain = new SequentialChain({ 56 | chains: [reviewChain, commentChain, summaryChain, translationChain], 57 | inputVariables: ["dish_name", "experience"], 58 | outputVariables: ["review", "comment", "summary", "german_translation"], 59 | }); 60 | 61 | const result = await overallChain.call({ 62 | dish_name: "Pizza Salami", 63 | experience: "It was awful!", 64 | }); 65 | console.log(result); 66 | -------------------------------------------------------------------------------- /04_parsers.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { OpenAI } from "langchain/llms/openai"; 5 | import { PromptTemplate } from "langchain/prompts"; 6 | import { StructuredOutputParser } from "langchain/output_parsers"; 7 | 8 | const parser = StructuredOutputParser.fromNamesAndDescriptions({ 9 | answer: "answer to the user's question", 10 | }); 11 | const formatInstructions = parser.getFormatInstructions(); 12 | 13 | const prompt = new PromptTemplate({ 14 | template: 15 | "Be very funny when answering questions\n{format_instructions}\n Question: {question}", 16 | inputVariables: ["question"], 17 | partialVariables: { format_instructions: formatInstructions }, 18 | }); 19 | 20 | const model = new OpenAI({ temperature: 0 }); 21 | 22 | const input = await prompt.format({ 23 | question: "What is the capital of France?", 24 | }); 25 | console.log(input); 26 | 27 | const response = await model.call(input); 28 | 29 | console.log(response); 30 | 31 | console.log(await parser.parse(response)); 32 | -------------------------------------------------------------------------------- /05_indexes.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { TextLoader } from "langchain/document_loaders/fs/text"; 5 | import { CharacterTextSplitter } from "langchain/text_splitter"; 6 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 7 | import { FaissStore } from "langchain/vectorstores/faiss"; 8 | 9 | const loader = new TextLoader("./restaurant.txt"); 10 | 11 | const docs = await loader.load(); 12 | 13 | const splitter = new CharacterTextSplitter({ 14 | chunkSize: 200, 15 | chunkOverlap: 50, 16 | }); 17 | 18 | const documents = await splitter.splitDocuments(docs); 19 | console.log(documents); 20 | 21 | const embeddings = new OpenAIEmbeddings(); 22 | 23 | const vectorstore = await FaissStore.fromDocuments(documents, embeddings); 24 | await vectorstore.save("./"); 25 | -------------------------------------------------------------------------------- /06_usestore.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 5 | import { FaissStore } from "langchain/vectorstores/faiss"; 6 | import { OpenAI } from "langchain/llms/openai"; 7 | import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains"; 8 | 9 | const embeddings = new OpenAIEmbeddings(); 10 | const vectorStore = await FaissStore.load("./", embeddings); 11 | 12 | const model = new OpenAI({ temperature: 0 }); 13 | 14 | const chain = new RetrievalQAChain({ 15 | combineDocumentsChain: loadQAStuffChain(model), 16 | retriever: vectorStore.asRetriever(), 17 | returnSourceDocuments: true, 18 | }); 19 | 20 | const res = await chain.call({ 21 | query: "When does the restaurant open on friday?", 22 | }); 23 | console.log(res.text); 24 | -------------------------------------------------------------------------------- /07_chat.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { ConversationChain } from "langchain/chains"; 5 | import { ChatOpenAI } from "langchain/chat_models/openai"; 6 | import { 7 | ChatPromptTemplate, 8 | HumanMessagePromptTemplate, 9 | SystemMessagePromptTemplate, 10 | MessagesPlaceholder, 11 | } from "langchain/prompts"; 12 | import { BufferMemory } from "langchain/memory"; 13 | 14 | const chat = new ChatOpenAI({ temperature: 0 }); 15 | 16 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([ 17 | SystemMessagePromptTemplate.fromTemplate( 18 | "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know." 19 | ), 20 | new MessagesPlaceholder("history"), 21 | HumanMessagePromptTemplate.fromTemplate("{input}"), 22 | ]); 23 | 24 | const chain = new ConversationChain({ 25 | memory: new BufferMemory({ returnMessages: true, memoryKey: "history" }), 26 | prompt: chatPrompt, 27 | llm: chat, 28 | }); 29 | 30 | const response = await chain.call({ 31 | input: "What is the capital of France?", 32 | }); 33 | const response2 = await chain.call({ 34 | input: "What is a great place to see there?", 35 | }); 36 | 37 | console.log(response2); 38 | -------------------------------------------------------------------------------- /08_agents.js: -------------------------------------------------------------------------------- 1 | import { config } from "dotenv"; 2 | config(); 3 | 4 | import { ChatOpenAI } from "langchain/chat_models/openai"; 5 | import { initializeAgentExecutorWithOptions } from "langchain/agents"; 6 | import { Calculator } from "langchain/tools/calculator"; 7 | 8 | process.env.LANGCHAIN_HANDLER = "langchain"; 9 | const model = new ChatOpenAI({ temperature: 0 }); 10 | const tools = [new Calculator()]; 11 | 12 | const executor = await initializeAgentExecutorWithOptions(tools, model, { 13 | agentType: "chat-conversational-react-description", 14 | verbose: true, 15 | }); 16 | 17 | const input0 = "What is the capital of France?"; 18 | 19 | const result0 = await executor.call({ input: input0 }); 20 | console.log(result0); 21 | 22 | const input1 = "What is 100 devided by 25?"; 23 | 24 | const result1 = await executor.call({ input: input1 }); 25 | console.log(result1); 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LangChain-JS-Crash-course 2 | 3 | This repository contains a series of example scripts showcasing the usage of Langchain, a JavaScript library for creating conversational AI applications. 4 | 5 | - `00_basics.js`: Introduces the basics of using the OpenAI API without Langchain. 6 | - `01_first_chain.js`: Demonstrates how to create your first conversation chain in Langchain. 7 | - `02_simplesequentialchain.js`: Provides a simple example of creating a sequential conversation chain. 8 | - `03_sequentialchain.js`: Gives a more detailed walkthrough of creating and utilizing a sequential conversation chain in Langchain. 9 | - `04_parsers.js`: Shows how to use parsers to process input and output in a conversation chain. 10 | - `05_indexes.js`: Explains how to create and use indexes in Langchain for efficient retrieval of information. 11 | - `06_usestore.js`: Guides on how to utilize the Vector Databases in Langchain for maintaining and retrieving information which was not trained into the model. 12 | - `07_chat.js`: Showcases how to create a chat bot in Langchain, forming the basis of a conversational AI application. 13 | - `08_agents.js`: Illustrates how to create and use agents in Langchain, which are autonomous entities that can interact within a conversation chain. 14 | 15 | To run these examples, clone the git repository and run `npm install` to install die dependencies. 16 | You need to create a `.env` file and add your API Key for OpenAI like this: `OPENAI_API_KEY=sk-...` 17 | 18 | This codes utilizes ES6 modules, to allow `import` statements and `async/await` within NodeJS. 19 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "langchainjs", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "type": "module", 10 | "target": "ES2020", 11 | "module": "nodenext", 12 | "repository": { 13 | "type": "git", 14 | "url": "git+https://github.com/Coding-Crashkurse/LangChain-JS-Full-Course.git" 15 | }, 16 | "keywords": [], 17 | "author": "", 18 | "license": "ISC", 19 | "bugs": { 20 | "url": "https://github.com/Coding-Crashkurse/LangChain-JS-Full-Course/issues" 21 | }, 22 | "homepage": "https://github.com/Coding-Crashkurse/LangChain-JS-Full-Course#readme", 23 | "dependencies": { 24 | "dotenv": "^16.3.1", 25 | "faiss-node": "^0.2.1", 26 | "langchain": "^0.0.96", 27 | "openai": "^3.3.0" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /restaurant.txt: -------------------------------------------------------------------------------- 1 | Q: Can you tell me about your signature dishes? 2 | A: Our signature dishes include our locally-sourced Steak with Truffle Butter, and our Seasonal Vegetable Pasta, which changes based on what's fresh and in season. 3 | 4 | Q: What's included in your Seafood Platter? 5 | A: Our Seafood Platter includes a selection of the freshest catch of the day, including oysters, prawns, clams, and grilled fish, served with our house special sauce. 6 | 7 | Q: Do you have any specials or promotions running currently? 8 | A: Yes, we have a Happy Hour promotion from 3 to 5 p.m. on weekdays, with special prices on selected appetizers and drinks. 9 | 10 | Q: What are the ingredients in your gluten-free options? 11 | A: Our gluten-free dishes are prepared using a variety of ingredients that don't contain gluten. Some options include our Quinoa Salad and our Grilled Chicken with Roasted Vegetables. 12 | 13 | Q: What steps is your restaurant taking to ensure safety amid the ongoing pandemic? 14 | A: We adhere to strict health and safety protocols, including regular sanitization, maintaining physical distance between tables, and providing hand sanitizers for customers. We also offer contactless pickup and delivery options. 15 | 16 | Q: Can I request alterations to a dish due to allergies? 17 | A: Absolutely, we strive to accommodate all of our customers' needs. Please inform our staff about any allergies you have, and we'll do our best to modify the dish accordingly. 18 | 19 | Q: What are the hours of operation for your restaurant? 20 | A: Our restaurant is open from 11 a.m. to 10 p.m. from Monday to Saturday. On Sundays, we open at 12 p.m. and close at 9 p.m. 21 | 22 | Q: What type of cuisine does your restaurant serve? 23 | A: Our restaurant specializes in contemporary American cuisine with an emphasis on local and sustainable ingredients. 24 | 25 | Q: Do you offer vegetarian or vegan options? 26 | A: Yes, we have a range of dishes to cater to vegetarians and vegans. Please let our staff know about any dietary restrictions you have when you order. --------------------------------------------------------------------------------