├── .env.example ├── .gitignore ├── README.md ├── agents ├── ResearchAgent-soln.js └── ResearchAgent.js ├── app ├── Footer.jsx ├── HomeClient.js ├── Navbar.jsx ├── _document.js ├── components │ ├── Button.jsx │ ├── ButtonContainer.jsx │ ├── ChatHistory.jsx │ ├── Gallery.jsx │ ├── HamburgerMenu.jsx │ ├── Input.jsx │ ├── PageContainer.jsx │ ├── PageHeader.jsx │ ├── PromptBox.jsx │ ├── Result.jsx │ ├── ResultStreaming.jsx │ ├── ResultWithSources.jsx │ ├── Results.jsx │ ├── Title.jsx │ ├── TwoColumnLayout.jsx │ └── hamburgerMenu.css ├── content-generator │ └── page.jsx ├── favicon.ico ├── globals.css ├── layout.jsx ├── memory │ ├── memory-solution.jsx │ └── page.jsx ├── page-template │ └── page.jsx ├── page.jsx ├── pdf │ ├── page-namespace.jsx │ └── page.jsx ├── resume-reader │ └── page.jsx ├── streaming │ ├── page.jsx │ └── streaming-solution.jsx ├── styles │ ├── InstrumentSans-VariableFont_wdth,wght.ttf │ ├── burger.css │ └── fonts.js └── video-chat │ └── page.jsx ├── data ├── document_loaders │ ├── bitcoin.pdf │ └── naval-ravikant-book.pdf └── resumes │ ├── resume_aubrey_graham.pdf │ ├── resume_joanna_smith.pdf │ └── resume_kaito_esquivel.pdf ├── jsconfig.json ├── next.config.js ├── package-lock.json ├── package.json ├── pages └── api │ ├── chatcompletions.js │ ├── content-generator.js │ ├── memory.js │ ├── pdf-query.js │ ├── pdf-upload.js │ ├── resume-query-metadata.js │ ├── resume-upload.js │ ├── solutions │ ├── chatcompletions-soln.js │ ├── content-generator-soln.js │ ├── memory-soln.js │ ├── pdf-query-namespace-soln.js │ ├── pdf-query-soln.js │ ├── pdf-upload-namespace-soln.js │ ├── pdf-upload-soln.js │ ├── resume-query-metadata-soln.js │ ├── resume-upload-soln.js │ ├── streaming-soln.js │ └── video-chat-soln.js │ ├── streaming.js │ └── video-chat.js ├── playground ├── quickstart-soln.mjs └── quickstart.mjs ├── postcss.config.js ├── public ├── assets │ └── images │ │ ├── brain.png │ │ ├── chatbot.png │ │ ├── green-square.png │ │ ├── pdf.png │ │ ├── robohr.png │ │ ├── stream.png │ │ ├── tools.png │ │ ├── wizard.png │ │ └── youtube.png ├── hamburger.svg ├── next.svg └── vercel.svg ├── tailwind.config.js ├── tools ├── SerpAPI.js ├── WebBrowser.js └── solutions │ ├── SerpAPI-soln.js │ └── WebBrowser-soln.js ├── tsconfig.json └── utils ├── extractVideoId.js ├── getVideoMetaData.js └── solutions ├── extractVideoId-soln.js └── getVideoMetadata-soln.js /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | SERPAPI_API_KEY= 3 | PINECONE_API_KEY= 4 | PINECONE_ENVIRONMENT= 5 | PINECONE_INDEX= -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | 27 | # local env files 28 | .env*.local 29 | .env 30 | # vercel 31 | .vercel 32 | 33 | # typescript 34 | *.tsbuildinfo 35 | next-env.d.ts 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Langchain Course 2 | 3 | This course teaches you how to build AI applications using [Langchain](https: //langchain.org/ ) and [OpenAI's API](https: //openai.com/api/ ) with [Next.js](https: //nextjs.org/ ). 4 | 5 | ## Getting Started 6 | 7 | To get started, clone this repository and install the dependencies: 8 | 9 | Clone the repository 10 | 11 | ``` 12 | git clone https://github.com/shawnesquivel/openai-javascript-course.git 13 | ``` 14 | 15 | Navigate to folder 16 | 17 | ``` 18 | cd openai-javascript-course 19 | ``` 20 | 21 | Install dependencies from package.json 22 | 23 | ``` 24 | npm install 25 | ``` 26 | 27 | Then, run the development server: 28 | 29 | ``` 30 | npm run dev 31 | # or 32 | yarn dev 33 | ``` 34 | 35 | Open [http: //localhost :3000](http: //localhost :3000) with your browser to see the app. 36 | 37 | ## Course Outline 38 | 39 | This course covers the following topics: 40 | 41 | Introduciton to AI with JavaScript 42 | 43 | - How to setup Open AI API 44 | - Build a PDF chatbot 45 | - Learn to use AI tools from Langchain 46 | - Automate Social Media Content generation 47 | - Deploying a Langchain app for FREE 48 | 49 | Each topic has code examples and exercises to help you learn! 50 | 51 | ## Deploy on Vercel 52 | 53 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https: //vercel.com/new ?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 54 | 55 | Check out our [Next.js deployment documentation](https: //nextjs.org/ docs/deployment) for more details. 56 | 57 | ## Feedback and Contributions 58 | 59 | Your feedback and contributions are welcome! Please feel free to open an issue or submit a pull request. 60 | -------------------------------------------------------------------------------- /agents/ResearchAgent-soln.js: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { LLMChain } from "langchain/chains"; 3 | import { ZeroShotAgent } from "langchain/agents"; 4 | import { 5 | ChatPromptTemplate, 6 | HumanMessagePromptTemplate, 7 | SystemMessagePromptTemplate, 8 | } from "langchain/prompts"; 9 | import { AgentExecutor } from "langchain/agents"; 10 | import SerpAPITool from "../tools/SerpAPI"; 11 | import WebBrowserTool from "../tools/WebBrowser"; 12 | 13 | /** 14 | * 15 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 16 | * 17 | */ 18 | const ResearchAgent = async (topic) => { 19 | console.log({ topic }); 20 | try { 21 | // We'll give it tohe ability to search Google and also Browse the WEb 22 | // show the importance of returnDirect 23 | const SerpAPI = SerpAPITool(); 24 | const WebBrowser = WebBrowserTool(); 25 | 26 | console.log(SerpAPI.returnDirect); 27 | console.log(WebBrowser.returnDirect); 28 | // We put them into an array of tools 29 | const tools = [SerpAPI, WebBrowser]; 30 | 31 | // We'll use the ZeroShotReactDescription which is the recommended tool for chat models 32 | // https://js.langchain.com/docs/modules/agents/agents/ 33 | // The cool thing is that Langchain gives us a method that can create prompts specifically for that agent 34 | const promptTemplate = ZeroShotAgent.createPrompt(tools, { 35 | prefix: `Answer the following questions as best you can. You have access to the following tools:`, 36 | suffix: `Begin! Answer concisely. It's OK to say you don't know.`, 37 | }); 38 | 39 | // Then we'll initialize it with a Prompt template again 40 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([ 41 | new SystemMessagePromptTemplate(promptTemplate), 42 | HumanMessagePromptTemplate.fromTemplate(`{input}`), 43 | ]); 44 | 45 | // And we'll initialize the model, what is chatOpenAI since we're using a ChatAgent 46 | const chat = new ChatOpenAI({}); 47 | // We'll create an LLM chain which just a prompt template and a LLM or chat model 48 | const llmChain = new LLMChain({ 49 | prompt: chatPrompt, 50 | llm: chat, 51 | }); 52 | // then we'lll use that LLM chain as the basis of the agent 53 | // so basidcally our agent is made up of: Tools, LLM and Prompt Templates, making it highly customizable to our needs! 54 | const agent = new ZeroShotAgent({ 55 | llmChain, 56 | allowedTools: tools.map((tool) => tool.name), 57 | }); 58 | // Now we'll create an Executor instance which allows us to make queries to the agent 59 | 60 | const executor = AgentExecutor.fromAgentAndTools({ 61 | agent, 62 | tools, 63 | returnIntermediateSteps: false, 64 | // Max iterations is important - because sometimes our agent can get confused. this can be from a variety of factors such as: 65 | // not using the right agent for the tool 66 | // prompts are not perfect – check out the document for crafting the perfect prompt! 67 | // always set this to a low number to start, or if you're not going to watch the output 68 | maxIterations: 3, 69 | // Always set verbose to true, there was a case where I didn't do this and realized my agent was going in loops 70 | verbose: true, 71 | }); 72 | 73 | const result = await executor.run(`Who is ${topic}?`); 74 | 75 | return result; 76 | } catch (err) { 77 | console.error(err); 78 | return "Error in completing research"; 79 | } 80 | }; 81 | export default ResearchAgent; 82 | -------------------------------------------------------------------------------- /agents/ResearchAgent.js: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { LLMChain } from "langchain/chains"; 3 | import { ZeroShotAgent } from "langchain/agents"; 4 | import { 5 | ChatPromptTemplate, 6 | HumanMessagePromptTemplate, 7 | SystemMessagePromptTemplate, 8 | } from "langchain/prompts"; 9 | import { AgentExecutor } from "langchain/agents"; 10 | import SerpAPITool from "../tools/SerpAPI"; 11 | import WebBrowserTool from "../tools/WebBrowser"; 12 | 13 | const ResearchAgent = async (topic) => { 14 | console.log({ topic }); 15 | 16 | try { 17 | // do stuff 18 | 19 | return result; 20 | } catch (err) { 21 | console.error(err); 22 | } 23 | }; 24 | 25 | export default ResearchAgent; 26 | -------------------------------------------------------------------------------- /app/Footer.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { sourceCodePro } from "./styles/fonts"; 3 | 4 | const Footer = () => { 5 | const year = new Date().getFullYear(); 6 | return ( 7 | 20 | ); 21 | }; 22 | 23 | export default Footer; 24 | -------------------------------------------------------------------------------- /app/HomeClient.js: -------------------------------------------------------------------------------- 1 | // import { useState } from "react"; 2 | 3 | // export default function HomeClient() { 4 | // // Define query parameters as an object 5 | // const queryParams = { 6 | // videoId: "O_9JoimRj8w", 7 | // }; 8 | 9 | // // Use URLSearchParams to create a query string 10 | // const queryString = new URLSearchParams(queryParams).toString(); 11 | 12 | // // Append the query string to the URL 13 | // const url = `api/generate?${queryString}`; 14 | 15 | // // Declare a state variable to store fetched data 16 | // const [data, setData] = useState(null); 17 | // const [error, setError] = useState(null); 18 | 19 | // const handleClick = async () => { 20 | // try { 21 | // const res = await fetch(url); 22 | // if (!res.ok) { 23 | // console.error("Error fetching data:", res.statusText); 24 | // setError(res.statusText); 25 | // } else { 26 | // const responseData = await res.json(); 27 | // console.log(responseData); 28 | // setData(responseData); 29 | // setError(null); 30 | // } 31 | // } catch (error) { 32 | // console.error("Error fetching data:", error); 33 | // setError(error.message); 34 | // } 35 | // }; 36 | 37 | // return ( 38 | //
39 | // 40 | // {data && ( 41 | //
42 | //

{data.id}

43 | //
44 | // )} 45 | // {error &&
Error fetching data: {error}
} 46 | //
47 | // ); 48 | // } 49 | -------------------------------------------------------------------------------- /app/Navbar.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useEffect, useState } from "react"; 4 | 5 | import Link from "next/link"; 6 | import { sourceCodePro } from "./styles/fonts"; 7 | import HamburgerMenu from "./components/HamburgerMenu"; 8 | const Navbar = () => { 9 | const [isClient, setIsClient] = useState(false); 10 | 11 | useEffect(() => { 12 | setIsClient(true); 13 | }, []); 14 | 15 | // const Navbar = () => { 16 | return ( 17 | 56 | ); 57 | }; 58 | 59 | export default Navbar; 60 | -------------------------------------------------------------------------------- /app/_document.js: -------------------------------------------------------------------------------- 1 | // pages/_document.js 2 | import Document, { Html, Head, Main, NextScript } from "next/document"; 3 | 4 | class MyDocument extends Document { 5 | render() { 6 | return ( 7 | 8 | 9 | 10 | 11 | 12 | {/* Any other head tags you want to include */} 13 | 14 | 15 |
16 | 17 | 18 | 19 | ); 20 | } 21 | } 22 | 23 | export default MyDocument; 24 | -------------------------------------------------------------------------------- /app/components/Button.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const Button = ({ color, handleSubmit, endpoint, buttonText }) => { 4 | const colorClasses = { 5 | red: "bg-red-500 hover:bg-red-600", 6 | blue: "bg-blue-500 hover:bg-blue-600", 7 | green: "bg-green-500 hover:bg-green-600", 8 | pink: "bg-pink-500 hover:bg-pink-600", 9 | // Add more colors as needed 10 | }; 11 | 12 | const colorClass = colorClasses[color] || "bg-white hover:bg-white"; // Default to blue if color prop not recognized 13 | 14 | return ( 15 | 21 | ); 22 | }; 23 | 24 | export default Button; 25 | -------------------------------------------------------------------------------- /app/components/ButtonContainer.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const ButtonContainer = ({ children }) => { 4 | return
{children}
; 5 | }; 6 | 7 | export default ButtonContainer; 8 | -------------------------------------------------------------------------------- /app/components/ChatHistory.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const ChatHistory = ({ chatHistory }) => { 4 | return ( 5 | chatHistory && ( 6 |
7 | {chatHistory.map((message, index) => ( 8 |
9 |
10 |
14 |

{message.human}

15 |
16 |
17 |
18 |
21 |

{message.chatbot}

22 |
23 |
24 |
25 | ))} 26 |
27 | ) 28 | ); 29 | }; 30 | 31 | export default ChatHistory; 32 | -------------------------------------------------------------------------------- /app/components/Gallery.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import Image from "next/image"; 3 | 4 | const ImageCard = ({ src, alt, title }) => ( 5 |
6 |
7 | {alt} 14 |
15 |
16 |

{title}

17 |
18 |
19 | ); 20 | 21 | const Gallery = () => { 22 | const items = [ 23 | { src: "/assets/images/pdf.png", alt: "PDF GPT", title: "PDF-GPT" }, 24 | { src: "/assets/images/brain.png", alt: "Memory", title: "Memory" }, 25 | { src: "/assets/images/stream.png", alt: "Stream", title: "Stream" }, 26 | { src: "/assets/images/youtube.png", alt: "YT Video", title: "YT Video" }, 27 | { 28 | src: "/assets/images/wizard.png", 29 | alt: "Content Wizard", 30 | title: "Content Wizard", 31 | }, 32 | { src: "/assets/images/robohr.png", alt: "RoboHR", title: "RoboHR" }, 33 | { src: "/assets/images/tools.png", alt: "Tools", title: "Tools" }, 34 | ]; 35 | 36 | return ( 37 |
38 | 44 |
45 | {items.slice(1, 4).map((item, index) => ( 46 | 52 | ))} 53 |
54 | 55 |
56 | {items.slice(5).map((item, index) => ( 57 | 63 | ))} 64 |
65 |
66 | ); 67 | }; 68 | 69 | export default Gallery; 70 | -------------------------------------------------------------------------------- /app/components/HamburgerMenu.jsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect, useRef } from "react"; 2 | import styles from "./hamburgerMenu.css"; 3 | import Image from "next/image"; 4 | import { pressStart2P, sourceCodePro, instrumentSans } from "../styles/fonts"; 5 | 6 | const HamburgerMenu = () => { 7 | const [isOpen, setIsOpen] = useState(false); 8 | const menuRef = useRef(null); 9 | const buttonRef = useRef(null); 10 | 11 | const toggleMenu = () => { 12 | setIsOpen(!isOpen); 13 | }; 14 | 15 | useEffect(() => { 16 | const handleDocumentClick = (event) => { 17 | if ( 18 | !menuRef.current.contains(event.target) && 19 | !buttonRef.current.contains(event.target) 20 | ) { 21 | setIsOpen(false); 22 | } 23 | }; 24 | 25 | const handleKeyDown = (event) => { 26 | if (event.key === "Escape") { 27 | setIsOpen(false); 28 | } 29 | }; 30 | 31 | if (isOpen) { 32 | document.addEventListener("click", handleDocumentClick); 33 | document.addEventListener("keydown", handleKeyDown); 34 | } 35 | 36 | return () => { 37 | document.removeEventListener("click", handleDocumentClick); 38 | document.removeEventListener("keydown", handleKeyDown); 39 | }; 40 | }, [isOpen]); 41 | 42 | return ( 43 |
47 | 60 | 223 |
224 | ); 225 | }; 226 | 227 | export default HamburgerMenu; 228 | -------------------------------------------------------------------------------- /app/components/Input.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const Input = ({ 4 | label, 5 | inputValue, 6 | onChange, 7 | placeHolderText, 8 | inputWidth, 9 | labelWidth, 10 | }) => { 11 | return ( 12 |
13 | 18 | 25 |
26 | ); 27 | }; 28 | 29 | export default Input; 30 | -------------------------------------------------------------------------------- /app/components/PageContainer.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const PageContainer = ({ children }) => { 4 | return
{children}
; 5 | }; 6 | 7 | export default PageContainer; 8 | -------------------------------------------------------------------------------- /app/components/PageHeader.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { pressStart2P, instrumentSans } from "../styles/fonts"; 3 | 4 | const PageHeader = ({ heading, boldText, description }) => { 5 | return ( 6 | <> 7 |

8 | {heading} 9 |

10 |

11 | {boldText} {description} 12 |

{" "} 13 | 14 | ); 15 | }; 16 | 17 | export default PageHeader; 18 | -------------------------------------------------------------------------------- /app/components/PromptBox.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { sourceCodePro } from "../styles/fonts"; 3 | 4 | const PromptBox = ({ 5 | prompt, 6 | handlePromptChange, 7 | handleSubmit, 8 | placeHolderText, 9 | buttonText, 10 | error, 11 | disableButton, 12 | labelText, 13 | }) => { 14 | const handleKeyDown = (e) => { 15 | if (e.key === "Enter") { 16 | handleSubmit(); 17 | } 18 | }; 19 | return ( 20 | <> 21 |
22 | {labelText && ( 23 | 26 | )} 27 | 28 | 36 | 37 | {!disableButton && ( 38 | 44 | )} 45 |
46 |

{error}

47 | 48 | ); 49 | }; 50 | 51 | export default PromptBox; 52 | -------------------------------------------------------------------------------- /app/components/Result.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const Result = ({ data }) => { 4 | return ( 5 |
6 | {/* If data is a string */} 7 | {typeof data === "string" && ( 8 |

{data}

9 | )} 10 | {/* If data is an object */} 11 | {data &&

{data?.output}

} 12 | 13 | {/* If data has source documents (e.g. when querying from a VectorDBQAChain and returnSourceDocuments is true) */} 14 | {data && 15 | data.sourceDocuments && 16 | data.sourceDocuments.map((doc, index) => ( 17 |
18 |

19 | Source {index}: {doc.pageContent} 20 |

21 |

From: {doc.metadata.source}

22 |
23 | ))} 24 |
25 | ); 26 | }; 27 | 28 | export default Result; 29 | -------------------------------------------------------------------------------- /app/components/ResultStreaming.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const ResultStreaming = ({ data }) => { 4 | return ( 5 |
6 | {/* If data is a string */} 7 | {typeof data === "string" && ( 8 |
{data}
9 | )} 10 | {/* If data is an object */} 11 | {data &&

{data?.output}

} 12 | 13 | {/* If data has source documents (e.g. when querying from a VectorDBQAChain and returnSourceDocuments is true) */} 14 | {data && 15 | data.sourceDocuments && 16 | data.sourceDocuments.map((doc, index) => ( 17 |
18 |

19 | Source {index}: {doc.pageContent} 20 |

21 |

From: {doc.metadata.source}

22 |
23 | ))} 24 |
25 | ); 26 | }; 27 | 28 | export default ResultStreaming; 29 | -------------------------------------------------------------------------------- /app/components/ResultWithSources.jsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect, useRef } from "react"; 2 | import Image from "next/image"; 3 | 4 | const MessageItem = ({ message, pngFile, isLast }) => { 5 | const userImage = "/assets/images/green-square.png"; 6 | const botImage = `/assets/images/${pngFile}.png`; 7 | const [showSources, setShowSources] = useState(false); 8 | 9 | return ( 10 |
11 |
12 |
13 | {`${message.type}'s 22 |
23 |

27 | {message.text} 28 |

29 |
30 | 31 | {message.sourceDocuments && ( 32 |
33 | 39 | {showSources && 40 | message.sourceDocuments.map((document, docIndex) => ( 41 |
42 |

43 | Source {docIndex + 1}: 44 |

45 |

46 | {document.pageContent} 47 |

48 |
49 |                   {JSON.stringify(document.metadata, null, 2)}
50 |                 
51 |
52 | ))} 53 |
54 | )} 55 |
56 | ); 57 | }; 58 | 59 | const ResultWithSources = ({ messages, pngFile, maxMsgs }) => { 60 | const messagesContainerRef = useRef(); 61 | 62 | useEffect(() => { 63 | if (messagesContainerRef.current) { 64 | const element = messagesContainerRef.current; 65 | element.scrollTop = element.scrollHeight; 66 | } 67 | }, [messages]); 68 | 69 | // E.g. Before we reach the max messages, we should add the justify-end property, which pushes messages to the bottom 70 | const maxMsgToScroll = maxMsgs || 5; 71 | 72 | return ( 73 |
79 | {messages && 80 | messages.map((message, index) => ( 81 | 82 | ))} 83 |
84 | ); 85 | }; 86 | 87 | export default ResultWithSources; 88 | -------------------------------------------------------------------------------- /app/components/Results.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | /** 4 | * For an array of objects, 5 | * [ {}, {}, {}] 6 | * 7 | * We can return a line for each result 8 | * 9 | */ 10 | const Results = ({ data }) => { 11 | return ( 12 |
13 | {data?.map((dataObj, index) => ( 14 |

15 | {dataObj.summary} 16 |

17 | ))} 18 |
19 | ); 20 | }; 21 | 22 | export default Results; 23 | -------------------------------------------------------------------------------- /app/components/Title.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const Title = ({ emoji, headingText }) => { 4 | return ( 5 | <> 6 |

{emoji}

7 |

{headingText.toUpperCase()}

8 | 9 | ); 10 | }; 11 | 12 | export default Title; 13 | -------------------------------------------------------------------------------- /app/components/TwoColumnLayout.jsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const TwoColumnLayout = ({ leftChildren, rightChildren }) => ( 4 |
5 | {/* Description */} 6 |
{leftChildren}
7 | {/* Chat */} 8 |
{rightChildren}
9 |
10 | ); 11 | 12 | export default TwoColumnLayout; 13 | -------------------------------------------------------------------------------- /app/components/hamburgerMenu.css: -------------------------------------------------------------------------------- 1 | .menu-container { 2 | position: relative; 3 | } 4 | 5 | .menu { 6 | position: absolute; 7 | left: 50%; 8 | top: 3rem; 9 | transform: translate(-50%, 0); 10 | background-color: #181818; 11 | color: #F5F5F5; 12 | display: none; 13 | } 14 | 15 | .menu.open { 16 | display: block; 17 | } 18 | 19 | .menu ul { 20 | list-style: none; 21 | padding: 0; 22 | margin: 0; 23 | } 24 | 25 | .menu li { 26 | margin-bottom: 10px; 27 | } 28 | 29 | .menu li a { 30 | text-decoration: none; 31 | } 32 | 33 | .overlay { 34 | position: fixed; 35 | top: 0; 36 | left: 0; 37 | width: 100%; 38 | height: 100%; 39 | background-color: rgba(0, 0, 0, 0.5); 40 | z-index: 1; 41 | } 42 | 43 | 44 | /* Add any additional styling as needed */ -------------------------------------------------------------------------------- /app/content-generator/page.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useState } from "react"; 4 | import PageHeader from "../components/PageHeader"; 5 | import PromptBox from "../components/PromptBox"; 6 | import ResultWithSources from "../components/ResultWithSources"; 7 | import Title from "../components/Title"; 8 | import TwoColumnLayout from "../components/TwoColumnLayout"; 9 | 10 | /** 11 | * 12 | * Module 5: AI Content Generator 13 | * 14 | * Use this to create new content from a piece of content! 15 | * 16 | */ 17 | const ContentGenerator = () => { 18 | // Follw up: Write me a tweet about pedro pascal. 19 | const [prompt, setPrompt] = useState( 20 | "https://www.youtube.com/watch?v=O_9JoimRj8w" 21 | ); 22 | const [topic, setTopic] = useState("Pedro Pascal"); 23 | const [error, setError] = useState(null); 24 | const [firstMsg, setFirstMsg] = useState(true); 25 | const [messages, setMessages] = useState([ 26 | { 27 | text: "Hi there! I'm your personal YouTube video script generator. If you give me a YouTube URL and topic, I can transform it into a unique video script. Send me a YouTube URL to get started.", 28 | }, 29 | ]); 30 | 31 | const handlePromptChange = (e) => { 32 | setPrompt(e.target.value); 33 | }; 34 | const handleTopicChange = (e) => { 35 | setTopic(e.target.value); 36 | }; 37 | 38 | // Make sure to change the API route 39 | const handleSubmit = async () => { 40 | try { 41 | // Push the user's message into the messages array 42 | setMessages((prevMessages) => [ 43 | ...prevMessages, 44 | { text: prompt, type: "user", sourceDocuments: null }, 45 | ]); 46 | 47 | const response = await fetch(`/api/content-generator`, { 48 | method: "POST", 49 | headers: { 50 | "Content-Type": "application/json", 51 | }, 52 | body: JSON.stringify({ prompt: prompt, topic: topic, firstMsg }), 53 | }); 54 | 55 | const searchRes = await response.json(); 56 | 57 | console.log({ searchRes }); 58 | 59 | if (!response.ok) { 60 | throw new Error(searchRes.error); 61 | } 62 | 63 | // Push the response into the messages array 64 | setMessages((prevMessages) => [ 65 | ...prevMessages, 66 | { 67 | text: searchRes.output.text, 68 | type: "bot", 69 | }, 70 | ]); 71 | setFirstMsg(false); 72 | setPrompt(""); 73 | setError(""); 74 | } catch (err) { 75 | console.error(err); 76 | setError("Error fetching transcript. Please try again."); 77 | } 78 | }; 79 | 80 | return ( 81 | <> 82 | 83 | <TwoColumnLayout 84 | leftChildren={ 85 | <> 86 | <PageHeader 87 | heading="Automated Content Generator" 88 | boldText="Doing your own manual research is so 2022. Let's automate it." 89 | description="This tool uses the agents to create a unique video script for you. Head over to Module X to get started!" 90 | /> 91 | </> 92 | } 93 | rightChildren={ 94 | <> 95 | {/* Added max messages */} 96 | <ResultWithSources 97 | messages={messages} 98 | pngFile="wizard" 99 | maxMsgs={3} 100 | /> 101 | <PromptBox 102 | prompt={topic} 103 | handlePromptChange={handleTopicChange} 104 | handleSubmit={handleSubmit} 105 | error={error} 106 | placeHolderText={"Enter a topic"} 107 | disableButton={true} 108 | labelText="Topic" 109 | /> 110 | <PromptBox 111 | prompt={prompt} 112 | handlePromptChange={handlePromptChange} 113 | handleSubmit={handleSubmit} 114 | placeHolderText={ 115 | messages.length === 1 116 | ? "Enter a youtube url, e.g., https://www.youtube.com/watch?v=O_9JoimRj8w" 117 | : "Ask a follow up question" 118 | } 119 | error={error} 120 | labelText="Chat" 121 | /> 122 | </> 123 | } 124 | /> 125 | </> 126 | ); 127 | }; 128 | 129 | export default ContentGenerator; 130 | -------------------------------------------------------------------------------- /app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/app/favicon.ico -------------------------------------------------------------------------------- /app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | 6 | /* @import url('https://fonts.googleapis.com/css2?family=Instrument+Sans:ital,wght@0,400;0,500;1,400&family=Press+Start+2P&family=Source+Code+Pro&display=swap'); */ 7 | /* :root { 8 | --foreground-rgb: 0, 0, 0; 9 | --background-start-rgb: 214, 219, 220; 10 | --background-end-rgb: 255, 255, 255; 11 | } 12 | 13 | @media (prefers-color-scheme: dark) { 14 | :root { 15 | --foreground-rgb: 255, 255, 255; 16 | --background-start-rgb: 0, 0, 0; 17 | --background-end-rgb: 0, 0, 0; 18 | } 19 | } */ 20 | 21 | html, 22 | body { 23 | background-color: #f9fafb; 24 | /* Light gray */ 25 | /* Black text */ 26 | font-family: inter-var, -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, "Apple Color Emoji", Arial, sans-serif, "Segoe UI Emoji", "Segoe UI Symbol"; 27 | /* Add any other styles you want to apply globally */ 28 | } 29 | 30 | .codeblock-chatgpt { 31 | font-family: 'Söhne Mono', Monaco, Andale Mono, Ubuntu Mono, monospace; 32 | font-size: 0.875em; 33 | font-weight: 400; 34 | line-height: 1.7142857; 35 | background-color: #282c34; 36 | color: #00a67d; 37 | padding: 1rem; 38 | border-radius: 4px; 39 | overflow-x: auto; 40 | overflow-y: auto; 41 | white-space: pre-wrap; 42 | } 43 | 44 | /* .press-start { 45 | font-family: 'Press Start 2P'; 46 | font-style: normal; 47 | font-weight: 400; 48 | font-size: 50px; 49 | line-height: 60px; 50 | text-transform: uppercase; 51 | } */ 52 | 53 | 54 | h1, 55 | h2, 56 | h3 { 57 | font-family: 'OpenSans', sans-serif; 58 | font-weight: 400; 59 | } 60 | 61 | .Button { 62 | display: flex; 63 | flex-direction: row; 64 | align-items: center; 65 | padding: 16px 32px; 66 | gap: 16px; 67 | width: 239px; 68 | height: 54px; 69 | background: #FFFFFF; 70 | /* 600 */ 71 | border: 1px solid #6F6F6F; 72 | /* Drop */ 73 | box-shadow: 4px 4px 20px rgba(0, 0, 0, 0.05); 74 | border-radius: 60px; 75 | /* Inside auto layout */ 76 | flex: none; 77 | order: 0; 78 | flex-grow: 0; 79 | } -------------------------------------------------------------------------------- /app/layout.jsx: -------------------------------------------------------------------------------- 1 | import "./globals.css"; 2 | import Navbar from "./Navbar"; 3 | import Footer from "./Footer"; 4 | import { instrumentSans } from "./styles/fonts"; 5 | 6 | export const metadata = { 7 | title: "Langchain JavaScript", 8 | description: "Learn the latest AI technologies from Shawn Esquivel.", 9 | }; 10 | 11 | export default function RootLayout({ children }) { 12 | return ( 13 | <html lang="en"> 14 | <body className={`${instrumentSans.className} `}> 15 | <Navbar /> 16 | <main className="flex flex-col pt-20 px-20">{children}</main> 17 | <Footer /> 18 | </body> 19 | </html> 20 | ); 21 | } 22 | -------------------------------------------------------------------------------- /app/memory/memory-solution.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | import React, { useState } from "react"; 3 | import PageHeader from "../components/PageHeader"; 4 | import PromptBox from "../components/PromptBox"; 5 | import Title from "../components/Title"; 6 | import TwoColumnLayout from "../components/TwoColumnLayout"; 7 | import ResultWithSources from "../components/ResultWithSources"; 8 | import "../globals.css"; 9 | 10 | /** 11 | * 12 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 13 | * 14 | */ 15 | 16 | const Memory = () => { 17 | const [prompt, setPrompt] = useState(""); 18 | const [error, setError] = useState(null); 19 | const [messages, setMessages] = useState([ 20 | { 21 | text: "Hi there! What's your name and favourite food?", 22 | type: "bot", 23 | }, 24 | ]); 25 | const [firstMsg, setFirstMsg] = useState(true); 26 | 27 | const handlePromptChange = (e) => { 28 | setPrompt(e.target.value); 29 | }; 30 | 31 | const handleSubmitPrompt = async () => { 32 | console.log("sending ", prompt); 33 | try { 34 | // Update the user message 35 | setMessages((prevMessages) => [ 36 | ...prevMessages, 37 | { text: prompt, type: "user", sourceDocuments: null }, 38 | ]); 39 | 40 | const response = await fetch("/api/memory", { 41 | method: "POST", 42 | headers: { 43 | "Content-Type": "application/json", 44 | }, 45 | body: JSON.stringify({ input: prompt, firstMsg }), 46 | }); 47 | 48 | if (!response.ok) { 49 | throw new Error(`HTTP Error! Status: ${response.status}`); 50 | } 51 | 52 | setPrompt(""); 53 | // So we don't reinitialize the chain 54 | setFirstMsg(false); 55 | const searchRes = await response.json(); 56 | // Add the bot message 57 | setMessages((prevMessages) => [ 58 | ...prevMessages, 59 | { text: searchRes.output.response, type: "bot", sourceDocuments: null }, 60 | ]); 61 | 62 | console.log({ searchRes }); 63 | // Clear any old error messages 64 | setError(""); 65 | } catch (err) { 66 | console.error(err); 67 | setError(err); 68 | } 69 | }; 70 | 71 | return ( 72 | <> 73 | <Title headingText={"Memory"} emoji="🧠" /> 74 | <TwoColumnLayout 75 | leftChildren={ 76 | <> 77 | <PageHeader 78 | heading="I remember everything" 79 | boldText="Let's see if it can remember your name and favourite food. This tool will let you ask anything contained in a PDF document. " 80 | description="This tool uses Buffer Memory and Conversation Chain. Head over to Module X to get started!" 81 | /> 82 | </> 83 | } 84 | rightChildren={ 85 | <> 86 | <ResultWithSources messages={messages} pngFile="brain" /> 87 | <PromptBox 88 | prompt={prompt} 89 | handleSubmit={handleSubmitPrompt} 90 | error={error} 91 | handlePromptChange={handlePromptChange} 92 | /> 93 | </> 94 | } 95 | /> 96 | </> 97 | ); 98 | }; 99 | 100 | export default Memory; 101 | -------------------------------------------------------------------------------- /app/memory/page.jsx: -------------------------------------------------------------------------------- 1 | // start here 2 | -------------------------------------------------------------------------------- /app/page-template/page.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useState } from "react"; 4 | import ResultWithSources from "../components/ResultWithSources"; 5 | import PromptBox from "../components/PromptBox"; 6 | import Button from "../components/Button"; 7 | import PageHeader from "../components/PageHeader"; 8 | import Title from "../components/Title"; 9 | import TwoColumnLayout from "../components/TwoColumnLayout"; 10 | import ButtonContainer from "../components/ButtonContainer"; 11 | import "../globals.css"; 12 | 13 | const AnyComponentName = () => { 14 | const [prompt, setPrompt] = useState(""); 15 | const [data, setData] = useState(""); 16 | 17 | const handlePromptChange = (e) => { 18 | setPrompt(e.target.value); 19 | }; 20 | 21 | const handleSubmit = async () => { 22 | console.log(`sending ${prompt}`); 23 | // STEP 1: Modify Endpoint 24 | const response = await fetch("/api/", { 25 | // STEP 2: Check Method 26 | method: "POST", 27 | headers: { 28 | "Content-Type": "application/json", 29 | }, 30 | body: JSON.stringify({ input: prompt }), 31 | }); 32 | 33 | const searchRes = await response.json(); 34 | // Step 3: Double check the console log and setData accordingly 35 | console.log(searchRes); 36 | setData(searchRes.output); 37 | }; 38 | 39 | return ( 40 | <> 41 | <Title emoji="💬" headingText="PDF-GPT" /> 42 | <TwoColumnLayout 43 | leftChildren={ 44 | <> 45 | <PageHeader 46 | heading="Catch Title" 47 | boldText="Bold Text" 48 | description="Description" 49 | /> 50 | <ButtonContainer> 51 | <Button 52 | handleSubmit={handleSubmit} 53 | endpoint="pdfupload-book" 54 | buttonText="Upload" 55 | /> 56 | </ButtonContainer> 57 | </> 58 | } 59 | rightChildren={ 60 | <> 61 | <ResultWithSources messages={messages} /> 62 | <PromptBox 63 | prompt={prompt} 64 | handlePromptChange={handlePromptChange} 65 | handleSubmit={() => handleSubmitQuery("/endpoint")} 66 | placeHolderText={"...."} 67 | error={error} 68 | /> 69 | </> 70 | } 71 | /> 72 | </> 73 | ); 74 | }; 75 | 76 | export default AnyComponentName; 77 | -------------------------------------------------------------------------------- /app/page.jsx: -------------------------------------------------------------------------------- 1 | import Image from "next/image"; 2 | import Gallery from "./components/Gallery"; 3 | import { pressStart2P, sourceCodePro, instrumentSans } from "./styles/fonts"; 4 | 5 | export default function Home() { 6 | return ( 7 | <div className="w-11/12 m-auto flex-col my-6"> 8 | <h1 className={`text-center ${instrumentSans.className}`}> 9 | The Home of Your AI-JS Toolkit 10 | </h1> 11 | <div className="flex flex-row justify-start"> 12 | <div className="flex flex-col items-start justify-center min-h-screen text-gray-800 py-4 px-4 sm:px-6 lg:px-8 w-6/12"> 13 | <h2 14 | className={`w-full text-4xl tracking-tight font-extrabold text-gray-900 sm:text-5xl md:text-6xl text-left ${pressStart2P.className}`} 15 | > 16 | Your AI-JS Bootcamp Starts Here 17 | </h2> 18 | <p 19 | className={`w-full mt-6 max-w-2xl text-center text-lg leading-7 sm:text-2xl sm:leading-9 sm:text-left lg:text-3xl ${instrumentSans.className}`} 20 | > 21 | <span className="font-bold"> 22 | Throughout this course, you'll be building stunning AI projects 23 | that are not only impressive but also have real-world 24 | applications. 25 | </span> 26 | Whether you're aiming to generate a passive income, create a 27 | personal assistant to streamline your work, or simply to enhance 28 | your portfolio, the skills and knowledge you acquire here will be 29 | instrumental in achieving your goals. 30 | </p> 31 | </div> 32 | {/* Gallery */} 33 | <Gallery /> 34 | </div> 35 | <p 36 | className={`w-full mt-4 text-center text-10 leading-7 sm:text-2xl sm:leading-9 sm:text-center lg:text-3xl ${sourceCodePro.className}`} 37 | > 38 | Remember, this journey is yours. So let's{" "} 39 | <strong>roll up our sleeves, dive in, and start building</strong>. 🔨 40 | </p> 41 | </div> 42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /app/pdf/page-namespace.jsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import React, { useState } from 'react' 4 | import ResultWithSources from '../components/ResultWithSources' 5 | import PromptBox from '../components/PromptBox' 6 | import Button from '../components/Button' 7 | import PageHeader from '../components/PageHeader' 8 | import Title from '../components/Title' 9 | import TwoColumnLayout from '../components/TwoColumnLayout' 10 | import ButtonContainer from '../components/ButtonContainer' 11 | import '../globals.css' 12 | 13 | // This functional component is responsible for loading PDFs 14 | const PDFLoader = () => { 15 | // Managing prompt, messages, and error states with useState 16 | const [prompt, setPrompt] = useState('How to get rich?') 17 | const [messages, setMessages] = useState([ 18 | { 19 | text: "Hi, I'm a Naval AI. What would you like to know?", 20 | type: 'bot', 21 | }, 22 | ]) 23 | const [error, setError] = useState('') 24 | const [bookId, setBookId] = useState('101') 25 | 26 | // This function updates the prompt value when the user types in the prompt box 27 | const handlePromptChange = (e) => { 28 | setPrompt(e.target.value) 29 | } 30 | 31 | const handleBookIdChange = (e) => { 32 | setBookId(e.target.value) 33 | } 34 | 35 | // This function handles the submission of the form when the user hits 'Enter' or 'Submit' 36 | // It sends a GET request to the provided endpoint with the current prompt as the query 37 | const handleSubmit = async (endpoint) => { 38 | try { 39 | console.log(`sending ${prompt}`) 40 | console.log(`using ${endpoint}`) 41 | 42 | // A GET request is sent to the backend 43 | const response = await fetch(`/api/${endpoint}?bookId=${bookId}`, { 44 | method: 'GET', 45 | }) 46 | 47 | // The response from the backend is parsed as JSON 48 | const searchRes = await response.json() 49 | console.log(searchRes) 50 | setError('') // Clear any existing error messages 51 | } catch (error) { 52 | console.log(error) 53 | setError(error.message) 54 | } 55 | } 56 | 57 | // This function handles the submission of the user's prompt when the user hits 'Enter' or 'Submit' 58 | // It sends a POST request to the provided endpoint with the current prompt in the request body 59 | const handleSubmitPrompt = async (endpoint) => { 60 | try { 61 | setPrompt('') 62 | 63 | // Push the user's message into the messages array 64 | setMessages((prevMessages) => [ 65 | ...prevMessages, 66 | { text: prompt, type: 'user', sourceDocuments: null }, 67 | ]) 68 | 69 | // A POST request is sent to the backend with the current prompt in the request body 70 | const response = await fetch(`/api/${endpoint}`, { 71 | method: 'POST', 72 | headers: { 73 | 'Content-Type': 'application/json', 74 | }, 75 | body: JSON.stringify({ input: prompt, bookId }), 76 | }) 77 | 78 | // Throw an error if the HTTP status is not OK 79 | if (!response.ok) { 80 | throw new Error(`HTTP error! status: ${response.status}`) 81 | } 82 | 83 | // Parse the response from the backend as JSON 84 | const searchRes = await response.json() 85 | 86 | console.log({ searchRes }) 87 | 88 | // Push the response into the messages array 89 | setMessages((prevMessages) => [ 90 | ...prevMessages, 91 | { 92 | text: searchRes.result.text, 93 | type: 'bot', 94 | sourceDocuments: searchRes.result.sourceDocuments, 95 | }, 96 | ]) 97 | 98 | setError('') // Clear any existing error messages 99 | } catch (error) { 100 | console.log(error) 101 | setError(error.message) 102 | } 103 | } 104 | 105 | // The component returns a two column layout with various child components 106 | return ( 107 | <> 108 | <Title emoji="💬" headingText="PDF-GPT" /> 109 | <TwoColumnLayout 110 | leftChildren={ 111 | <> 112 | <PageHeader 113 | heading="Ask Naval Anything" 114 | boldText="How to get rich? How to be happy?" 115 | description="This tool will 116 | let you ask anything contained in a PDF document. This tool uses 117 | Embeddings, Pinecone, VectorDBQAChain, and VectorStoreAgents. Head over to Module 1 to 118 | get started!" 119 | /> 120 | <ButtonContainer> 121 | {/* <Button 122 | handleSubmit={()=>{handleSubmit('pdfupload-book')}} 123 | endpoint="pdfuploadtest" 124 | buttonText="Upload Test Data ☁️" 125 | className="Button" 126 | /> */} 127 | <Button 128 | handleSubmit={handleSubmit} 129 | endpoint="pdf-upload" 130 | buttonText="Upload Book 📚" 131 | className="Button" 132 | /> 133 | </ButtonContainer> 134 | <select value={bookId} onChange={handleBookIdChange}> 135 | <option value={101}>Bitcoin</option> 136 | <option value={102}>Naval</option> 137 | </select> 138 | </> 139 | } 140 | rightChildren={ 141 | <> 142 | <ResultWithSources messages={messages} pngFile="pdf" /> 143 | <PromptBox 144 | prompt={prompt} 145 | handlePromptChange={handlePromptChange} 146 | handleSubmit={() => handleSubmitPrompt('/pdf-query')} 147 | // handleSubmit={() => handleSubmitQuery("/pdfquery-agent")} 148 | placeHolderText={'How to get rich?'} 149 | error={error} 150 | /> 151 | </> 152 | } 153 | /> 154 | </> 155 | ) 156 | } 157 | 158 | export default PDFLoader 159 | -------------------------------------------------------------------------------- /app/pdf/page.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useState } from "react"; 4 | import ResultWithSources from "../components/ResultWithSources"; 5 | import PromptBox from "../components/PromptBox"; 6 | import Button from "../components/Button"; 7 | import PageHeader from "../components/PageHeader"; 8 | import Title from "../components/Title"; 9 | import TwoColumnLayout from "../components/TwoColumnLayout"; 10 | import ButtonContainer from "../components/ButtonContainer"; 11 | import "../globals.css"; 12 | 13 | // This functional component is responsible for loading PDFs 14 | const PDFLoader = () => { 15 | // Managing prompt, messages, and error states with useState 16 | const [prompt, setPrompt] = useState("How to get rich?"); 17 | const [messages, setMessages] = useState([ 18 | { 19 | text: "Hi, I'm a Naval AI. What would you like to know?", 20 | type: "bot", 21 | }, 22 | ]); 23 | const [error, setError] = useState(""); 24 | 25 | // This function updates the prompt value when the user types in the prompt box 26 | const handlePromptChange = (e) => { 27 | setPrompt(e.target.value); 28 | }; 29 | 30 | // This function handles the submission of the form when the user hits 'Enter' or 'Submit' 31 | // It sends a GET request to the provided endpoint with the current prompt as the query 32 | const handleSubmit = async (endpoint) => { 33 | try { 34 | console.log(`sending ${prompt}`); 35 | console.log(`using ${endpoint}`); 36 | 37 | // A GET request is sent to the backend 38 | const response = await fetch(`/api/${endpoint}`, { 39 | method: "GET", 40 | }); 41 | 42 | // The response from the backend is parsed as JSON 43 | const searchRes = await response.json(); 44 | console.log(searchRes); 45 | setError(""); // Clear any existing error messages 46 | } catch (error) { 47 | console.log(error); 48 | setError(error.message); 49 | } 50 | }; 51 | 52 | // This function handles the submission of the user's prompt when the user hits 'Enter' or 'Submit' 53 | // It sends a POST request to the provided endpoint with the current prompt in the request body 54 | const handleSubmitPrompt = async (endpoint) => { 55 | try { 56 | setPrompt(""); 57 | 58 | // Push the user's message into the messages array 59 | setMessages((prevMessages) => [ 60 | ...prevMessages, 61 | { text: prompt, type: "user", sourceDocuments: null }, 62 | ]); 63 | 64 | // A POST request is sent to the backend with the current prompt in the request body 65 | const response = await fetch(`/api/${endpoint}`, { 66 | method: "POST", 67 | headers: { 68 | "Content-Type": "application/json", 69 | }, 70 | body: JSON.stringify({ input: prompt }), 71 | }); 72 | 73 | // Throw an error if the HTTP status is not OK 74 | if (!response.ok) { 75 | throw new Error(`HTTP error! status: ${response.status}`); 76 | } 77 | 78 | // Parse the response from the backend as JSON 79 | const searchRes = await response.json(); 80 | 81 | console.log({ searchRes }); 82 | 83 | // Push the response into the messages array 84 | setMessages((prevMessages) => [ 85 | ...prevMessages, 86 | { 87 | text: searchRes.result.text, 88 | type: "bot", 89 | sourceDocuments: searchRes.result.sourceDocuments, 90 | }, 91 | ]); 92 | 93 | setError(""); // Clear any existing error messages 94 | } catch (error) { 95 | console.log(error); 96 | setError(error.message); 97 | } 98 | }; 99 | 100 | // The component returns a two column layout with various child components 101 | return ( 102 | <> 103 | <Title emoji="💬" headingText="PDF-GPT" /> 104 | <TwoColumnLayout 105 | leftChildren={ 106 | <> 107 | <PageHeader 108 | heading="Ask Naval Anything" 109 | boldText="How to get rich? How to be happy?" 110 | description="This tool will 111 | let you ask anything contained in a PDF document. This tool uses 112 | Embeddings, Pinecone, VectorDBQAChain, and VectorStoreAgents. Head over to Module 1 to 113 | get started!" 114 | /> 115 | <ButtonContainer> 116 | {/* <Button 117 | handleSubmit={()=>{handleSubmit('pdfupload-book')}} 118 | endpoint="pdfuploadtest" 119 | buttonText="Upload Test Data ☁️" 120 | className="Button" 121 | /> */} 122 | <Button 123 | handleSubmit={handleSubmit} 124 | endpoint="pdf-upload" 125 | buttonText="Upload Book 📚" 126 | className="Button" 127 | /> 128 | </ButtonContainer> 129 | </> 130 | } 131 | rightChildren={ 132 | <> 133 | <ResultWithSources messages={messages} pngFile="pdf" /> 134 | <PromptBox 135 | prompt={prompt} 136 | handlePromptChange={handlePromptChange} 137 | handleSubmit={() => handleSubmitPrompt("/pdf-query")} 138 | // handleSubmit={() => handleSubmitQuery("/pdfquery-agent")} 139 | placeHolderText={"How to get rich?"} 140 | error={error} 141 | /> 142 | </> 143 | } 144 | /> 145 | </> 146 | ); 147 | }; 148 | 149 | export default PDFLoader; 150 | -------------------------------------------------------------------------------- /app/resume-reader/page.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useState } from "react"; 4 | import PageHeader from "../components/PageHeader"; 5 | import PromptBox from "../components/PromptBox"; 6 | import Title from "../components/Title"; 7 | import TwoColumnLayout from "../components/TwoColumnLayout"; 8 | import ResultWithSources from "../components/ResultWithSources"; 9 | import ButtonContainer from "../components/ButtonContainer"; 10 | import Button from "../components/Button"; 11 | 12 | const endpoint = "/api/resume-query-metadata"; 13 | 14 | const ResumeReader = () => { 15 | const [prompt, setPrompt] = useState("Who has experience with Python?"); 16 | const [error, setError] = useState(null); 17 | 18 | const [messages, setMessages] = useState([ 19 | { 20 | text: "After loading the vector database, ask me anything about your documents! E.g., Has anyone worked at Meta? Where did Joanna Smith go to school? Does Kaito Esquivel have any recommendations?", 21 | type: "bot", 22 | }, 23 | ]); 24 | 25 | const handlePromptChange = (e) => { 26 | setPrompt(e.target.value); 27 | }; 28 | const handleSubmitUpload = async () => { 29 | try { 30 | // Push the response into the messages array 31 | setMessages((prevMessages) => [ 32 | ...prevMessages, 33 | { 34 | text: "Uploading resumes...", 35 | type: "bot", 36 | }, 37 | ]); 38 | 39 | const response = await fetch(`/api/resume-upload`); 40 | const transcriptRes = await response.json(); 41 | 42 | if (!response.ok) { 43 | throw new Error(transcriptRes.error); 44 | } 45 | 46 | console.log({ transcriptRes }); 47 | 48 | // assuming transcriptRes is an object 49 | const summariesArray = JSON.parse(transcriptRes.output); 50 | 51 | const newMessages = summariesArray.map((summary) => ({ 52 | text: summary.summary, 53 | type: "bot", 54 | })); 55 | 56 | setMessages((prevMessages) => [...prevMessages, ...newMessages]); 57 | 58 | setPrompt(""); 59 | } catch (err) { 60 | console.error(err); 61 | setError("Error"); 62 | } 63 | }; 64 | 65 | const handleSubmit = async () => { 66 | try { 67 | // Push the user's message into the messages array 68 | setMessages((prevMessages) => [ 69 | ...prevMessages, 70 | { text: prompt, type: "user", sourceDocuments: null }, 71 | ]); 72 | 73 | // set loading message 74 | setMessages((prevMessages) => [ 75 | ...prevMessages, 76 | { text: "...", type: "bot", sourceDocuments: null }, 77 | ]); 78 | 79 | const response = await fetch(`${endpoint}`, { 80 | method: "POST", 81 | headers: { 82 | "Content-Type": "application/json", 83 | }, 84 | body: JSON.stringify({ prompt }), 85 | }); 86 | 87 | const searchRes = await response.json(); 88 | console.log({ searchRes }); 89 | 90 | // remove loading message 91 | setMessages((prevMessages) => prevMessages.slice(0, -1)); 92 | 93 | // Push the response into the messages array 94 | setMessages((prevMessages) => [ 95 | ...prevMessages, 96 | { 97 | text: searchRes.output, 98 | type: "bot", 99 | sourceDocuments: searchRes.sourceDocuments, 100 | }, 101 | ]); 102 | setPrompt(""); 103 | } catch (err) { 104 | console.error(err); 105 | setError(err); 106 | } 107 | }; 108 | 109 | return ( 110 | <> 111 | <> 112 | <Title emoji="🤖" headingText="RoboHR" /> 113 | <TwoColumnLayout 114 | leftChildren={ 115 | <> 116 | <PageHeader 117 | heading="Your personal HR assistant" 118 | boldText="Get information on a whole lot of documents." 119 | description="This tool uses Document Loaders, OpenAI Embeddings, Summarization Chain, Pinecone, VectorDB QA Chain, Prompt Templates, and the Vector Store Agent." 120 | /> 121 | 122 | <ButtonContainer> 123 | <Button 124 | handleSubmit={handleSubmitUpload} 125 | endpoint="" 126 | buttonText=" Upload Resumes 📂" 127 | /> 128 | </ButtonContainer> 129 | </> 130 | } 131 | rightChildren={ 132 | <> 133 | <ResultWithSources messages={messages} pngFile="robohr" /> 134 | 135 | <PromptBox 136 | prompt={prompt} 137 | handlePromptChange={handlePromptChange} 138 | handleSubmit={handleSubmit} 139 | error={error} 140 | placeHolderText={"Enter Prompt"} 141 | /> 142 | </> 143 | } 144 | /> 145 | </> 146 | </> 147 | ); 148 | }; 149 | 150 | export default ResumeReader; 151 | -------------------------------------------------------------------------------- /app/streaming/page.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | import React, { useState, useEffect } from "react"; 3 | import PageHeader from "../components/PageHeader"; 4 | import PromptBox from "../components/PromptBox"; 5 | import ResultStreaming from "../components/ResultStreaming"; 6 | import Title from "../components/Title"; 7 | import TwoColumnLayout from "app/components/TwoColumnLayout"; 8 | 9 | const Streaming = () => { 10 | const [prompt, setPrompt] = useState(""); 11 | const [error, setError] = useState(null); 12 | const [data, setData] = useState(""); 13 | // add code 14 | 15 | const processToken = (token) => { 16 | // add code 17 | return; 18 | }; 19 | 20 | const handlePromptChange = (e) => { 21 | setPrompt(e.target.value); 22 | }; 23 | 24 | const handleSubmit = async () => { 25 | try { 26 | // add code 27 | } catch (err) { 28 | console.error(err); 29 | setError(error); 30 | } 31 | }; 32 | 33 | // Clean up the EventSource on component unmount 34 | // add code 35 | return ( 36 | <> 37 | <Title emoji="💭" headingText="Streaming" /> 38 | <TwoColumnLayout 39 | leftChildren={ 40 | <> 41 | <PageHeader 42 | heading="Spit a Rap." 43 | boldText="Nobody likes waiting for APIs to load. Use streaming to improve the user experience of chat bots." 44 | description="This tutorial uses streaming. Head over to Module X to get started!" 45 | /> 46 | </> 47 | } 48 | rightChildren={ 49 | <> 50 | <ResultStreaming data={data} /> 51 | <PromptBox 52 | prompt={prompt} 53 | handlePromptChange={handlePromptChange} 54 | handleSubmit={handleSubmit} 55 | placeHolderText={"Enter your name and city"} 56 | error={error} 57 | pngFile="pdf" 58 | /> 59 | </> 60 | } 61 | /> 62 | </> 63 | ); 64 | }; 65 | 66 | export default Streaming; 67 | -------------------------------------------------------------------------------- /app/streaming/streaming-solution.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | import React, { useState, useEffect } from "react"; 3 | import PageHeader from "../components/PageHeader"; 4 | import PromptBox from "../components/PromptBox"; 5 | import ResultStreaming from "../components/ResultStreaming"; 6 | import Title from "../components/Title"; 7 | import TwoColumnLayout from "app/components/TwoColumnLayout"; 8 | 9 | /** 10 | * 11 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 12 | * 13 | */ 14 | const Streaming = () => { 15 | const [prompt, setPrompt] = useState(""); 16 | const [error, setError] = useState(null); 17 | const [data, setData] = useState(""); 18 | const [source, setSource] = useState(null); 19 | 20 | const processToken = (token) => { 21 | return token.replace(/\\n/g, "\n").replace(/\"/g, ""); 22 | }; 23 | 24 | const handlePromptChange = (e) => { 25 | setPrompt(e.target.value); 26 | }; 27 | 28 | const handleSubmit = async () => { 29 | try { 30 | console.log(`sending ${prompt}`); 31 | await fetch("/api/streaming", { 32 | method: "POST", 33 | headers: { 34 | "Content-Type": "application/json", 35 | }, 36 | body: JSON.stringify({ input: prompt }), 37 | }); 38 | // close existing sources 39 | if (source) { 40 | source.close(); 41 | } 42 | // create new eventsource 43 | 44 | const newSource = new EventSource("/api/streaming"); 45 | 46 | setSource(newSource); 47 | 48 | newSource.addEventListener("newToken", (event) => { 49 | const token = processToken(event.data); 50 | setData((prevData) => prevData + token); 51 | }); 52 | 53 | newSource.addEventListener("end", () => { 54 | newSource.close(); 55 | }); 56 | } catch (err) { 57 | console.error(err); 58 | setError(error); 59 | } 60 | }; 61 | 62 | // Clean up the EventSource on component unmount 63 | useEffect(() => { 64 | // stuff is gonna happen 65 | return () => { 66 | if (source) { 67 | source.close(); 68 | } 69 | }; 70 | }, [source]); 71 | return ( 72 | <> 73 | <Title emoji="💭" headingText="Streaming" /> 74 | <TwoColumnLayout 75 | leftChildren={ 76 | <> 77 | <PageHeader 78 | heading="Spit a Rap." 79 | boldText="Nobody likes waiting for APIs to load. Use streaming to improve the user experience of chat bots." 80 | description="This tutorial uses streaming. Head over to Module X to get started!" 81 | /> 82 | </> 83 | } 84 | rightChildren={ 85 | <> 86 | <ResultStreaming data={data} /> 87 | <PromptBox 88 | prompt={prompt} 89 | handlePromptChange={handlePromptChange} 90 | handleSubmit={handleSubmit} 91 | placeHolderText={"Enter your name and city"} 92 | error={error} 93 | pngFile="pdf" 94 | /> 95 | </> 96 | } 97 | /> 98 | </> 99 | ); 100 | }; 101 | 102 | export default Streaming; 103 | -------------------------------------------------------------------------------- /app/styles/InstrumentSans-VariableFont_wdth,wght.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/app/styles/InstrumentSans-VariableFont_wdth,wght.ttf -------------------------------------------------------------------------------- /app/styles/burger.css: -------------------------------------------------------------------------------- 1 | .bm-burger-button { 2 | 3 | width: 36px; 4 | height: 30px; 5 | left: 36px; 6 | 7 | top: 36px; 8 | } 9 | 10 | .bm-burger-bars { 11 | background: #373a47; 12 | } 13 | 14 | .bm-burger-bars-hover { 15 | background: #a90000; 16 | 17 | } 18 | 19 | .bm-cross-button { 20 | height: 24px; 21 | width: 24px; 22 | } 23 | 24 | .bm-cross { 25 | background: #bdc3c7; 26 | } 27 | 28 | .bm-menu-wrap { 29 | position: fixed; 30 | height: 100%; 31 | } 32 | 33 | .bm-menu { 34 | background: #373a47; 35 | padding: 0.3rem 0.3rem 0; 36 | font-size: 1.15em; 37 | } 38 | 39 | .bm-morph-shape { 40 | fill: #373a47; 41 | } 42 | 43 | .bm-item-list { 44 | 45 | color: #b8b7ad; 46 | padding: 0.8em; 47 | } 48 | 49 | .bm-item { 50 | display: inline-block; 51 | } 52 | 53 | .bm-overlay { 54 | background: rgba(0, 0, 0, 0.3); 55 | } 56 | 57 | /* styling next link component so that will be a column list of links*/ 58 | .bm-item-list a { 59 | display: flex; 60 | flex-direction: column; 61 | } -------------------------------------------------------------------------------- /app/styles/fonts.js: -------------------------------------------------------------------------------- 1 | import { Press_Start_2P, Source_Code_Pro } from "next/font/google"; 2 | import localFont from "next/font/local"; 3 | 4 | /** 5 | * 6 | * 7 | * GOOGLE FONTS 8 | * 9 | * Automatically self-host any Google Font. Fonts are included in the deployment and served from the same domain as your deployment. No requests are sent to Google by the browser. 10 | * 11 | * Get started by importing the font you would like to use from next/font/google as a function. We recommend using variable fonts for the best performance and flexibility. 12 | * 13 | * https://nextjs.org/docs/app/building-your-application/optimizing/fonts#google-fonts 14 | * 15 | * 16 | */ 17 | 18 | const pressStart2P = Press_Start_2P({ subsets: ["latin"], weight: "400" }); 19 | const sourceCodePro = Source_Code_Pro({ subsets: ["latin"], weight: "400" }); 20 | const instrumentSans = localFont({ 21 | src: "./InstrumentSans-VariableFont_wdth,wght.ttf", 22 | }); 23 | 24 | export { pressStart2P, sourceCodePro, instrumentSans }; 25 | -------------------------------------------------------------------------------- /app/video-chat/page.jsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useState } from "react"; 4 | import PageHeader from "../components/PageHeader"; 5 | import PromptBox from "../components/PromptBox"; 6 | import ResultWithSources from "../components/ResultWithSources"; 7 | import Title from "../components/Title"; 8 | import TwoColumnLayout from "../components/TwoColumnLayout"; 9 | 10 | /** 11 | * 12 | * MODULE 4: YOUTUBE CHATBOT: 13 | * 14 | * Start with the UI.. no need to recreate! 15 | * 16 | * */ 17 | const VideoChat = () => { 18 | // We'll set a default YouTube video so we don't have to copy and paste this every time 19 | const [prompt, setPrompt] = useState( 20 | "https://www.youtube.com/watch?v=0lJKucu6HJc" 21 | ); 22 | const [error, setError] = useState(null); 23 | const [firstMsg, setFirstMsg] = useState(true); 24 | 25 | // And we'll set an initial message as well, to make the UI look a little nicer. 26 | const [messages, setMessages] = useState([ 27 | { 28 | text: "Hi there! I'm YT chatbot. Please provide a YouTube video URL and I'll answer any questions you have.", 29 | type: "bot", 30 | }, 31 | ]); 32 | 33 | const handlePromptChange = (e) => { 34 | setPrompt(e.target.value); 35 | }; 36 | 37 | // The only differences here will be the "URL" for the api call 38 | // And the body will send a prompt as well as a firstMsg, which tells us if its the first message in the chat or not 39 | // Because the first message will tell us to create the YouTube Chat bot 40 | const handleSubmit = async () => { 41 | try { 42 | // Push the user's message into the messages array 43 | setMessages((prevMessages) => [ 44 | ...prevMessages, 45 | { text: prompt, type: "user", sourceDocuments: null }, 46 | ]); 47 | 48 | const response = await fetch(`/api/video-chat`, { 49 | method: "POST", 50 | headers: { 51 | "Content-Type": "application/json", 52 | }, 53 | body: JSON.stringify({ prompt: prompt, firstMsg }), 54 | }); 55 | 56 | console.log({ response }); 57 | if (!response.ok) { 58 | throw new Error(`HTTP error! status: ${response.status}`); 59 | } 60 | 61 | const searchRes = await response.json(); 62 | 63 | // Push the response into the messages array 64 | setMessages((prevMessages) => [ 65 | ...prevMessages, 66 | { 67 | text: searchRes.output.text, 68 | type: "bot", 69 | }, 70 | ]); 71 | 72 | setPrompt(""); 73 | setFirstMsg(false); 74 | setError(""); 75 | } catch (err) { 76 | console.error(err); 77 | setError("Error fetching transcript. Please try again."); 78 | } 79 | }; 80 | 81 | return ( 82 | <> 83 | <Title emoji="💬" headingText="YouTube Video Chat" /> 84 | <TwoColumnLayout 85 | leftChildren={ 86 | <> 87 | <PageHeader 88 | heading="Talk to Your Videos" 89 | boldText="This tool lets you chat with your YouTube videos. " 90 | description="This tool uses the YouTube API, Text Splitters, and the Conversational Retrieval QA CHain. Head over to Module X to get started!" 91 | /> 92 | </> 93 | } 94 | rightChildren={ 95 | <> 96 | <ResultWithSources messages={messages} pngFile="youtube" /> 97 | <PromptBox 98 | prompt={prompt} 99 | handlePromptChange={handlePromptChange} 100 | handleSubmit={handleSubmit} 101 | placeHolderText={ 102 | messages.length === 1 103 | ? "Enter a youtube url, e.g., https://www.youtube.com/watch?v=O_9JoimRj8w" 104 | : "Ask a follow up question" 105 | } 106 | error={error} 107 | /> 108 | </> 109 | } 110 | /> 111 | </> 112 | ); 113 | }; 114 | 115 | export default VideoChat; 116 | -------------------------------------------------------------------------------- /data/document_loaders/bitcoin.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/document_loaders/bitcoin.pdf -------------------------------------------------------------------------------- /data/document_loaders/naval-ravikant-book.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/document_loaders/naval-ravikant-book.pdf -------------------------------------------------------------------------------- /data/resumes/resume_aubrey_graham.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/resumes/resume_aubrey_graham.pdf -------------------------------------------------------------------------------- /data/resumes/resume_joanna_smith.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/resumes/resume_joanna_smith.pdf -------------------------------------------------------------------------------- /data/resumes/resume_kaito_esquivel.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/resumes/resume_kaito_esquivel.pdf -------------------------------------------------------------------------------- /jsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "baseUrl": ".", 4 | "paths": { 5 | "@/utils/*": ["utils/*"] 6 | } 7 | }, 8 | "exclude": ["node_modules"] 9 | } 10 | -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | // https://js.langchain.com/docs/getting-started/install#vercel--nextjs 3 | // To use LangChain with Next.js (either with app/ or pages/), add the following to your next.config.js to enable support for WebAssembly modules (which is required by the tokenizer library @dqbd/tiktoken): 4 | const nextConfig = { 5 | webpack(config) { 6 | config.experiments = { 7 | asyncWebAssembly: true, 8 | layers: true, 9 | }; 10 | 11 | return config; 12 | }, 13 | // Add env { API_KEY: process.env.API_KEY} 14 | }; 15 | 16 | module.exports = nextConfig; 17 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "yt-script-generator", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "@pinecone-database/pinecone": "^0.1.5", 13 | "autoprefixer": "10.4.14", 14 | "axios": "^1.4.0", 15 | "cheerio": "^1.0.0-rc.12", 16 | "cors": "^2.8.5", 17 | "debug": "^4.3.4", 18 | "dotenv": "^16.0.3", 19 | "hnswlib-node": "^1.4.2", 20 | "langchain": "^0.0.75", 21 | "next": "13.4.1", 22 | "openai": "^3.2.1", 23 | "pdf-parse": "^1.1.1", 24 | "postcss": "8.4.23", 25 | "react": "18.2.0", 26 | "react-burger-menu": "^3.0.9", 27 | "react-dom": "18.2.0", 28 | "serpapi": "^1.1.1", 29 | "supports-color": "^9.3.1", 30 | "tailwindcss": "3.3.2", 31 | "youtube-transcript": "^1.0.6" 32 | }, 33 | "devDependencies": { 34 | "@types/cors": "^2.8.13", 35 | "@types/node": "20.1.0", 36 | "@types/react": "18.2.6", 37 | "express-sse": "^0.5.3", 38 | "typescript": "5.0.4" 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pages/api/chatcompletions.js: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain/schema"; 3 | 4 | // create instance of chatOpenAI 5 | 6 | export default async function handler(req, res) { 7 | if (req.method === "POST") { 8 | // Grab the user prompt 9 | // console.log(process.env.OPENAI_API_KEY); 10 | // console.log(process.env.SERPAPI_API_KEY); 11 | 12 | // Enter your code here 13 | 14 | // Modify output as needed 15 | return res.status(200).json({ result: response }); 16 | } else { 17 | res.status(405).json({ message: "Method not allowed" }); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /pages/api/content-generator.js: -------------------------------------------------------------------------------- 1 | // /pages/api/transcript_chat.js 2 | import { YoutubeTranscript } from "youtube-transcript"; 3 | import { ChatOpenAI } from "langchain/chat_models/openai"; 4 | import { LLMChain } from "langchain/chains"; 5 | import { 6 | ChatPromptTemplate, 7 | HumanMessagePromptTemplate, 8 | SystemMessagePromptTemplate, 9 | } from "langchain/prompts"; 10 | import extractVideoId from "../../utils/extractVideoId"; 11 | import getVideoMetaData from "../../utils/getVideoMetaData"; 12 | import ResearchAgent from "../../agents/ResearchAgent"; 13 | 14 | // Global Variables 15 | 16 | // Initialize Chain with Data 17 | const initChain = async (transcript, metadataString, research, topic) => { 18 | try { 19 | // do stuff 20 | 21 | return response; 22 | } catch (error) { 23 | console.error( 24 | `An error occurred during the initialization of the Chat Prompt: ${error.message}` 25 | ); 26 | throw error; // rethrow the error to let the calling function know that an error occurred 27 | } 28 | }; 29 | 30 | export default async function handler(req, res) { 31 | const { prompt, topic, firstMsg } = req.body; 32 | console.log(`Prompt: ${prompt} Topic: ${topic}`); 33 | 34 | if ( 35 | chain === undefined && 36 | !prompt.includes("https://www.youtube.com/watch?v=") 37 | ) { 38 | return res.status(400).json({ 39 | error: 40 | "Chain not initialized. Please send a YouTube URL to initialize the chain.", 41 | }); 42 | } 43 | 44 | chatHistory.push({ 45 | role: "user", 46 | content: prompt, 47 | }); 48 | 49 | // Just like in the previous section, if we have a firstMsg set to true, we need to initialize with chain with the context 50 | if (firstMsg) { 51 | console.log("Received URL"); 52 | try { 53 | // Initialize chain with transcript, metadata, research, and topic 54 | 55 | // return res.status(200).json({ output: research }); 56 | return res.status(200).json({ 57 | output: response, 58 | chatHistory, 59 | transcript, 60 | metadata, 61 | research, 62 | }); 63 | } catch (err) { 64 | console.error(err); 65 | return res 66 | .status(500) 67 | .json({ error: "An error occurred while fetching transcript" }); 68 | } 69 | } else { 70 | // Very similar to previous section, don't worry too much about this just copy and paste it from the previous section! 71 | console.log("Received question"); 72 | try { 73 | // do stuff 74 | 75 | // just make sure to modify this response as necessary. 76 | return res.status(200).json({ 77 | output: response, 78 | metadata: metadataString, 79 | transcript, 80 | chatHistory, 81 | }); 82 | } catch (error) { 83 | console.error(error); 84 | res 85 | .status(500) 86 | .json({ error: "An error occurred during the conversation." }); 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /pages/api/memory.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/pages/api/memory.js -------------------------------------------------------------------------------- /pages/api/pdf-query.js: -------------------------------------------------------------------------------- 1 | import { PineconeClient } from "@pinecone-database/pinecone"; 2 | import { VectorDBQAChain } from "langchain/chains"; 3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 4 | import { OpenAI } from "langchain/llms/openai"; 5 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 6 | 7 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf 8 | export default async function handler(req, res) { 9 | try { 10 | if (req.method !== "POST") { 11 | throw new Error("Method not allowed"); 12 | } 13 | 14 | console.log("Query PDF"); 15 | 16 | // Grab the user prompt 17 | const { input } = req.body; 18 | 19 | if (!input) { 20 | throw new Error("No input"); 21 | } 22 | 23 | console.log("input received:", input); 24 | 25 | /* Use as part of a chain (currently no metadata filters) */ 26 | 27 | // Initialize Pinecone 28 | 29 | // Search! 30 | 31 | return res.status(200).json({ result: response }); 32 | } catch (error) { 33 | console.error(error); 34 | res.status(500).json({ message: error.message }); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pages/api/pdf-upload.js: -------------------------------------------------------------------------------- 1 | import { PDFLoader } from "langchain/document_loaders/fs/pdf"; 2 | import { PineconeClient } from "@pinecone-database/pinecone"; 3 | import { Document } from "langchain/document"; 4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 5 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 6 | import { CharacterTextSplitter } from "langchain/text_splitter"; 7 | 8 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf 9 | export default async function handler(req, res) { 10 | if (req.method === "GET") { 11 | console.log("Inside the PDF handler"); 12 | // Enter your code here 13 | /** STEP ONE: LOAD DOCUMENT */ 14 | 15 | // Chunk it 16 | 17 | // Reduce the size of the metadata 18 | 19 | /** STEP TWO: UPLOAD TO DATABASE */ 20 | 21 | // upload documents to Pinecone 22 | return res.status(200).json({ result: docs }); 23 | } else { 24 | res.status(405).json({ message: "Method not allowed" }); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /pages/api/resume-query-metadata.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database. 3 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory 4 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization 5 | * Dependencies: npm install pdf-parse 6 | */ 7 | 8 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 9 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 10 | import { PineconeClient } from "@pinecone-database/pinecone"; 11 | import { OpenAI } from "langchain/llms/openai"; 12 | import { VectorDBQAChain } from "langchain/chains"; 13 | import { PromptTemplate } from "langchain/prompts"; 14 | 15 | export default async function handler(req, res) { 16 | try { 17 | // do stuff 18 | 19 | return res.status(200).json({ 20 | output: response.text, 21 | sourceDocuments: response.sourceDocuments, 22 | }); 23 | } catch (err) { 24 | console.error(err); 25 | return res.status(500).json({ error: "Error" }); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /pages/api/resume-upload.js: -------------------------------------------------------------------------------- 1 | // /pages/api/resume_upload.js 2 | // Import dependencies 3 | 4 | /** 5 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database. 6 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory 7 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization 8 | * Dependencies: npm install pdf-parse 9 | */ 10 | 11 | import { DirectoryLoader } from "langchain/document_loaders/fs/directory"; 12 | import { PDFLoader } from "langchain/document_loaders/fs/pdf"; 13 | import { CharacterTextSplitter } from "langchain/text_splitter"; 14 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 15 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 16 | import { PineconeClient } from "@pinecone-database/pinecone"; 17 | import { loadSummarizationChain } from "langchain/chains"; 18 | import { OpenAI } from "langchain/llms/openai"; 19 | 20 | export default async function handler(req, res) { 21 | // Grab the prompt from the url (?prompt=[value]) 22 | // console.log(process.env.PINECONE_API_KEY); 23 | // console.log(process.env.PINECONE_ENVIRONMENT); 24 | // console.log(process.env.PINECONE_INDEX); 25 | // Always use a try catch block to do asynchronous requests and catch any errors 26 | try { 27 | // do stuff 28 | } catch (err) { 29 | // If we have an error 30 | 31 | console.error(err); 32 | return res.status(500).json({ error: err }); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pages/api/solutions/chatcompletions-soln.js: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { HumanChatMessage, SystemChatMessage } from "langchain/schema"; 3 | 4 | /** 5 | * 6 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 7 | * 8 | */ 9 | 10 | const chat = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" }); 11 | 12 | export default async function handler(req, res) { 13 | if (req.method === "POST") { 14 | // Grab the user prompt 15 | const { input } = req.body; 16 | 17 | if (!input) { 18 | throw new Error("No input"); 19 | } 20 | 21 | // Enter your code here 22 | const response = await chat.call([ 23 | new HumanChatMessage(`How do I write a for loop in ${input}?`), 24 | ]); 25 | 26 | console.log(response); 27 | 28 | // Modify output as needed 29 | return res.status(200).json({ result: response }); 30 | } else { 31 | res.status(405).json({ message: "Method not allowed" }); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /pages/api/solutions/content-generator-soln.js: -------------------------------------------------------------------------------- 1 | // /pages/api/transcript_chat.js 2 | 3 | import { YoutubeTranscript } from "youtube-transcript"; 4 | import extractVideoId from "../../utils/extractVideoId"; 5 | import getVideoMetaData from "../../utils/getVideoMetaData"; 6 | import { ChatOpenAI } from "langchain/chat_models/openai"; 7 | import { LLMChain } from "langchain/chains"; 8 | import ResearchAgent from "../../agents/ResearchAgent"; 9 | import { 10 | ChatPromptTemplate, 11 | HumanMessagePromptTemplate, 12 | SystemMessagePromptTemplate, 13 | } from "langchain/prompts"; 14 | 15 | /** 16 | * 17 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 18 | * 19 | */ 20 | 21 | // Global Variables 22 | let chain; 23 | let chatHistory = []; 24 | let transcript = ""; 25 | let metadataString = ""; 26 | let research; 27 | 28 | // Initialize Chain with Data 29 | const initChain = async (transcript, metadataString, research, topic) => { 30 | try { 31 | // For chat models, we provide a `ChatPromptTemplate` class that can be used to format chat prompts. 32 | const llm = new ChatOpenAI({ 33 | temperature: 0.7, 34 | modelName: "gpt-3.5-turbo", 35 | }); 36 | 37 | console.log(`Initializing Chat Prompt`); 38 | 39 | // For chat models, we provide a `ChatPromptTemplate` class that can be used to format chat prompts. 40 | // This allows us to set the template that the bot sees every time 41 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([ 42 | SystemMessagePromptTemplate.fromTemplate( 43 | "You are a helpful social media assistant that provides research, new content, and advice to me. \n You are given the transcript of the video: {transcript} \n and video metadata: {metadata} as well as additional research: {research}" 44 | ), 45 | HumanMessagePromptTemplate.fromTemplate( 46 | "{input}. Remember to use the video transcript and research as reference." 47 | ), 48 | ]); 49 | 50 | const question = `Write me a script for a new video that provides commentary on this video in a lighthearted, joking manner. It should compliment ${topic} with puns.`; 51 | console.log(question); 52 | 53 | chain = new LLMChain({ 54 | prompt: chatPrompt, 55 | llm: llm, 56 | // memory, 57 | }); 58 | 59 | const response = await chain.call({ 60 | transcript, 61 | metadata: metadataString, 62 | research, 63 | input: question, 64 | }); 65 | 66 | console.log({ response }); 67 | 68 | chatHistory.push({ 69 | role: "assistant", 70 | content: response.text, 71 | }); 72 | 73 | return response; 74 | } catch (error) { 75 | console.error( 76 | `An error occurred during the initialization of the Chat Prompt: ${error.message}` 77 | ); 78 | throw error; // rethrow the error to let the calling function know that an error occurred 79 | } 80 | }; 81 | 82 | export default async function handler(req, res) { 83 | const { prompt, topic, firstMsg } = req.body; 84 | console.log(`Prompt: ${prompt} Topic: ${topic}`); 85 | 86 | if ( 87 | chain === undefined && 88 | !prompt.includes("https://www.youtube.com/watch?v=") 89 | ) { 90 | return res.status(400).json({ 91 | error: 92 | "Chain not initialized. Please send a YouTube URL to initialize the chain.", 93 | }); 94 | } 95 | 96 | chatHistory.push({ 97 | role: "user", 98 | content: prompt, 99 | }); 100 | 101 | // Just like in the previous section, if we have a firstMsg set to true, we need to initialize with chain with the context 102 | if (firstMsg) { 103 | console.log("Received URL"); 104 | try { 105 | const videoId = extractVideoId(prompt); 106 | // API call for video transcript (same as last video, but we just grab the array and flatten it into a variable)[{text:" "},{ text: ""}] 107 | const transcriptResponse = await YoutubeTranscript.fetchTranscript( 108 | prompt 109 | ); 110 | transcriptResponse.forEach((line) => { 111 | transcript += line.text; 112 | }); 113 | // Some error handling 114 | if (!transcriptResponse) { 115 | return res.status(400).json({ error: "Failed to get transcript" }); 116 | } 117 | 118 | // API call for video metadata –– go to VideoMetaData and explain this 119 | const metadata = await getVideoMetaData(videoId); 120 | 121 | // JSON object { [], [], [] } , null (no characters between), and use 2 spaces for indentation 122 | metadataString = JSON.stringify(metadata, null, 2); 123 | console.log({ metadataString }); 124 | 125 | // ResearchAgent 126 | research = await ResearchAgent(topic); 127 | 128 | console.log({ research }); 129 | 130 | // Alright, finally we have all the context and we can initialize the chain! 131 | const response = await initChain( 132 | transcript, 133 | metadataString, 134 | research, 135 | topic 136 | ); 137 | 138 | // return res.status(200).json({ output: research }); 139 | return res.status(200).json({ 140 | output: response, 141 | chatHistory, 142 | transcript, 143 | metadata, 144 | research, 145 | }); 146 | } catch (err) { 147 | console.error(err); 148 | return res 149 | .status(500) 150 | .json({ error: "An error occurred while fetching transcript" }); 151 | } 152 | } else { 153 | // Very similar to previous section, don't worry too much about this just copy and paste it from the previous section! 154 | console.log("Received question"); 155 | try { 156 | const question = prompt; 157 | 158 | console.log("Asking:", question); 159 | console.log("Using old chain:", chain); 160 | // Everytime we call the chain we need to pass all the context back so that it can fill in the prompt template appropriately 161 | const response = await chain.call({ 162 | transcript, 163 | metadata: metadataString, 164 | research, 165 | input: question, 166 | }); 167 | 168 | // update chat history 169 | chatHistory.push({ 170 | role: "assistant", 171 | content: response.text, 172 | }); 173 | // just make sure to modify this response as necessary. 174 | return res.status(200).json({ 175 | output: response, 176 | metadata: metadataString, 177 | transcript, 178 | chatHistory, 179 | }); 180 | } catch (error) { 181 | console.error(error); 182 | res 183 | .status(500) 184 | .json({ error: "An error occurred during the conversation." }); 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /pages/api/solutions/memory-soln.js: -------------------------------------------------------------------------------- 1 | // Solution 2 | import { OpenAI } from "langchain/llms/openai"; 3 | import { BufferMemory } from "langchain/memory"; 4 | import { ConversationChain } from "langchain/chains"; 5 | 6 | /** 7 | * 8 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 9 | * 10 | */ 11 | 12 | let model; 13 | let memory; 14 | let chain; 15 | export default async function handler(req, res) { 16 | if (req.method === "POST") { 17 | const { input, firstMsg } = req.body; 18 | 19 | if (!input) { 20 | throw new Error("No input!"); 21 | } 22 | 23 | if (firstMsg) { 24 | console.log("initializing chain"); 25 | model = new OpenAI({ modelName: "gpt-3.5-turbo" }); 26 | memory = new BufferMemory(); 27 | chain = new ConversationChain({ llm: model, memory: memory }); 28 | } 29 | 30 | console.log({ input }); 31 | const response = await chain.call({ input }); 32 | console.log({ response }); 33 | return res.status(200).json({ output: response }); 34 | } else { 35 | res.status(405).json({ message: "Only POST is allowed" }); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /pages/api/solutions/pdf-query-namespace-soln.js: -------------------------------------------------------------------------------- 1 | import { PineconeClient } from '@pinecone-database/pinecone' 2 | import { VectorDBQAChain } from 'langchain/chains' 3 | import { OpenAIEmbeddings } from 'langchain/embeddings/openai' 4 | import { OpenAI } from 'langchain/llms/openai' 5 | import { PineconeStore } from 'langchain/vectorstores/pinecone' 6 | 7 | export default async function handler(req, res) { 8 | try { 9 | if (req.method !== 'POST') { 10 | throw new Error('Method not allowed') 11 | } 12 | 13 | console.log('Query PDF') 14 | 15 | // Grab the user prompt 16 | const { input, bookId } = req.body 17 | 18 | if (!input) { 19 | throw new Error('No input') 20 | } 21 | 22 | console.log('input received:', input) 23 | 24 | const client = new PineconeClient() 25 | await client.init({ 26 | apiKey: process.env.PINECONE_API_KEY, 27 | environment: process.env.PINECONE_ENVIRONMENT, 28 | }) 29 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX) 30 | 31 | const vectorStore = await PineconeStore.fromExistingIndex(new OpenAIEmbeddings(), { 32 | pineconeIndex, 33 | namespace: bookId.toString(), 34 | }) 35 | 36 | /* Part Two: Use as part of a chain (currently no metadata filters) */ 37 | 38 | const model = new OpenAI() 39 | const chain = VectorDBQAChain.fromLLM(model, vectorStore, { 40 | k: 1, 41 | returnSourceDocuments: true, 42 | }) 43 | const response = await chain.call({ query: input }) 44 | 45 | console.log(response) 46 | 47 | return res.status(200).json({ result: response }) 48 | } catch (error) { 49 | console.error(error) 50 | res.status(500).json({ message: error.message }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /pages/api/solutions/pdf-query-soln.js: -------------------------------------------------------------------------------- 1 | import { PineconeClient } from "@pinecone-database/pinecone"; 2 | import { VectorDBQAChain } from "langchain/chains"; 3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 4 | import { OpenAI } from "langchain/llms/openai"; 5 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 6 | /** 7 | * 8 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 9 | * 10 | */ 11 | 12 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf 13 | export default async function handler(req, res) { 14 | try { 15 | if (req.method !== "POST") { 16 | throw new Error("Method not allowed"); 17 | } 18 | 19 | console.log("Query PDF"); 20 | 21 | // Grab the user prompt 22 | const { input } = req.body; 23 | 24 | if (!input) { 25 | throw new Error("No input"); 26 | } 27 | 28 | console.log("input received:", input); 29 | 30 | const client = new PineconeClient(); 31 | await client.init({ 32 | apiKey: process.env.PINECONE_API_KEY, 33 | environment: process.env.PINECONE_ENVIRONMENT, 34 | }); 35 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX); 36 | 37 | const vectorStore = await PineconeStore.fromExistingIndex( 38 | new OpenAIEmbeddings(), 39 | { pineconeIndex } 40 | ); 41 | 42 | /* Part Two: Use as part of a chain (currently no metadata filters) */ 43 | 44 | const model = new OpenAI(); 45 | const chain = VectorDBQAChain.fromLLM(model, vectorStore, { 46 | k: 1, 47 | returnSourceDocuments: true, 48 | }); 49 | const response = await chain.call({ query: input }); 50 | 51 | console.log(response); 52 | 53 | return res.status(200).json({ result: response }); 54 | } catch (error) { 55 | console.error(error); 56 | res.status(500).json({ message: error.message }); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /pages/api/solutions/pdf-upload-namespace-soln.js: -------------------------------------------------------------------------------- 1 | import { PDFLoader } from 'langchain/document_loaders/fs/pdf' 2 | import { PineconeClient } from '@pinecone-database/pinecone' 3 | import { Document } from 'langchain/document' 4 | import { OpenAIEmbeddings } from 'langchain/embeddings/openai' 5 | import { PineconeStore } from 'langchain/vectorstores/pinecone' 6 | import { CharacterTextSplitter } from 'langchain/text_splitter' 7 | 8 | export default async function handler(req, res) { 9 | if (req.method === 'GET') { 10 | console.log('Uploading book') 11 | /** STEP ONE: LOAD DOCUMENT */ 12 | const { bookId } = req.query 13 | const bookDb = { 14 | 101: 'c:/bitcoin.pdf', 15 | 102: 'c:/naval.pdf', 16 | } 17 | const bookPath = bookDb[bookId] 18 | const loader = new PDFLoader(bookPath) 19 | 20 | const docs = await loader.load() 21 | 22 | if (docs.length === 0) { 23 | console.log('No documents found.') 24 | return 25 | } 26 | 27 | const splitter = new CharacterTextSplitter({ 28 | separator: ' ', 29 | chunkSize: 250, 30 | chunkOverlap: 10, 31 | }) 32 | 33 | const splitDocs = await splitter.splitDocuments(docs) 34 | 35 | // Reduce the size of the metadata for each document -- lots of useless pdf information 36 | const reducedDocs = splitDocs.map((doc) => { 37 | const reducedMetadata = { ...doc.metadata } 38 | delete reducedMetadata.pdf // Remove the 'pdf' field 39 | return new Document({ 40 | pageContent: doc.pageContent, 41 | metadata: reducedMetadata, 42 | }) 43 | }) 44 | 45 | /** STEP TWO: UPLOAD TO DATABASE */ 46 | 47 | const client = new PineconeClient() 48 | 49 | await client.init({ 50 | apiKey: process.env.PINECONE_API_KEY, 51 | environment: process.env.PINECONE_ENVIRONMENT, 52 | }) 53 | 54 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX) 55 | 56 | await PineconeStore.fromDocuments(reducedDocs, new OpenAIEmbeddings(), { 57 | pineconeIndex, 58 | namespace: bookId.toString(), 59 | }) 60 | 61 | console.log('Successfully uploaded to DB') 62 | // Modify output as needed 63 | return res.status(200).json({ 64 | result: `Uploaded to Pinecone! Before splitting: ${docs.length}, After splitting: ${splitDocs.length}`, 65 | }) 66 | } else { 67 | res.status(405).json({ message: 'Method not allowed' }) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /pages/api/solutions/pdf-upload-soln.js: -------------------------------------------------------------------------------- 1 | import { PDFLoader } from "langchain/document_loaders/fs/pdf"; 2 | import { PineconeClient } from "@pinecone-database/pinecone"; 3 | import { Document } from "langchain/document"; 4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 5 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 6 | import { CharacterTextSplitter } from "langchain/text_splitter"; 7 | 8 | /** 9 | * 10 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 11 | * 12 | */ 13 | 14 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf 15 | 16 | /** 17 | * 18 | * INSTRUCTIONS 19 | * 1. Run with book 20 | error { 21 | name: 'PineconeError', 22 | source: 'server', 23 | message: 'PineconeClient: Error calling upsert: PineconeError: metadata size is 140052 bytes, which exceeds the limit of 40960 bytes per vector', 24 | stack: '' 25 | } 26 | * 2. Explain why -- vector meta data sizes is too big. 27 | Language Models are often limited by the amount of text that you can pass to them. Therefore, it is neccessary to split them up into smaller chunks. LangChain provides several utilities for doing so. 28 | https://js.langchain.com/docs/modules/indexes/text_splitters/ 29 | 30 | Play with chunk sizes... too small and you can't understand. 31 | Fine tune this to your liking. 32 | More vectors = more $$ 33 | 34 | 35 | 3. Pinecone size 1536 36 | https://platform.openai.com/docs/guides/embeddings/second-generation-models 37 | 38 | 4. Upsert metadata size -- add this after split Docs 39 | 40 | // Reduce the size of the metadata for each document 41 | const reducedDocs = splitDocs.map(doc => { 42 | const reducedMetadata = { ...doc.metadata }; 43 | delete reducedMetadata.pdf; // Remove the 'pdf' field 44 | return new Document({ 45 | pageContent: doc.pageContent, 46 | metadata: reducedMetadata, 47 | }); 48 | }); 49 | 50 | 51 | 52 | 53 | 54 | * */ 55 | 56 | export default async function handler(req, res) { 57 | if (req.method === "GET") { 58 | console.log("Uploading book"); 59 | // Enter your code here 60 | /** STEP ONE: LOAD DOCUMENT */ 61 | const bookPath = 62 | "/Users/shawnesquivel/GitHub/yt-script-generator/data/document_loaders/naval-ravikant-book.pdf"; 63 | const loader = new PDFLoader(bookPath); 64 | 65 | const docs = await loader.load(); 66 | 67 | if (docs.length === 0) { 68 | console.log("No documents found."); 69 | return; 70 | } 71 | 72 | const splitter = new CharacterTextSplitter({ 73 | separator: " ", 74 | chunkSize: 250, 75 | chunkOverlap: 10, 76 | }); 77 | 78 | const splitDocs = await splitter.splitDocuments(docs); 79 | 80 | // Reduce the size of the metadata for each document -- lots of useless pdf information 81 | const reducedDocs = splitDocs.map((doc) => { 82 | const reducedMetadata = { ...doc.metadata }; 83 | delete reducedMetadata.pdf; // Remove the 'pdf' field 84 | return new Document({ 85 | pageContent: doc.pageContent, 86 | metadata: reducedMetadata, 87 | }); 88 | }); 89 | 90 | // docs.forEach((doc) => { 91 | // console.log(doc); 92 | // }); 93 | 94 | // console.log(`Uploading documents to Pinecone: ${docs}`); 95 | 96 | console.log(docs[100]); 97 | console.log(splitDocs[100].metadata); 98 | console.log(reducedDocs[100].metadata); 99 | 100 | /** STEP TWO: UPLOAD TO DATABASE */ 101 | 102 | const client = new PineconeClient(); 103 | 104 | await client.init({ 105 | apiKey: process.env.PINECONE_API_KEY, 106 | environment: process.env.PINECONE_ENVIRONMENT, 107 | }); 108 | 109 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX); 110 | 111 | await PineconeStore.fromDocuments(reducedDocs, new OpenAIEmbeddings(), { 112 | pineconeIndex, 113 | }); 114 | 115 | console.log("Successfully uploaded to DB"); 116 | // Modify output as needed 117 | return res.status(200).json({ 118 | result: `Uploaded to Pinecone! Before splitting: ${docs.length}, After splitting: ${splitDocs.length}`, 119 | }); 120 | } else { 121 | res.status(405).json({ message: "Method not allowed" }); 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /pages/api/solutions/resume-query-metadata-soln.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database. 3 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory 4 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization 5 | * Dependencies: npm install pdf-parse 6 | */ 7 | 8 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 9 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 10 | import { PineconeClient } from "@pinecone-database/pinecone"; 11 | import { OpenAI } from "langchain/llms/openai"; 12 | import { VectorDBQAChain } from "langchain/chains"; 13 | import { PromptTemplate } from "langchain/prompts"; 14 | 15 | /** 16 | * 17 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 18 | * 19 | */ 20 | 21 | export default async function handler(req, res) { 22 | try { 23 | // do stuff 24 | const { prompt } = req.body; 25 | 26 | /** Load vector database */ 27 | const client = new PineconeClient(); 28 | await client.init({ 29 | apiKey: process.env.PINECONE_API_KEY, 30 | environment: process.env.PINECONE_ENVIRONMENT, 31 | }); 32 | 33 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX); 34 | 35 | const vectorStore = await PineconeStore.fromExistingIndex( 36 | new OpenAIEmbeddings(), 37 | { pineconeIndex } 38 | ); 39 | 40 | // Create Vector DBQA CHain 41 | const model = new OpenAI(); 42 | const chain = VectorDBQAChain.fromLLM(model, vectorStore, { 43 | k: 1, 44 | returnSourceDocuments: true, 45 | }); 46 | 47 | // Prompt Template 48 | const promptTemplate = new PromptTemplate({ 49 | template: `Assume you are a Human Resources Director. According to the resumes, answer this question: {question}`, 50 | inputVariables: ["question"], 51 | }); 52 | 53 | const formattedPrompt = await promptTemplate.format({ 54 | question: prompt, 55 | }); 56 | 57 | // console.log({ formattedPrompt }); 58 | 59 | const response = await chain.call({ 60 | query: formattedPrompt, 61 | }); 62 | 63 | console.log({ response }); 64 | 65 | return res.status(200).json({ 66 | // String 67 | output: response.text, 68 | // [Document, Document] 69 | sourceDocuments: response.sourceDocuments, 70 | }); 71 | } catch (err) { 72 | console.error(err); 73 | return res.status(500).json({ error: "Error" }); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /pages/api/solutions/resume-upload-soln.js: -------------------------------------------------------------------------------- 1 | // /pages/api/resume_upload.js 2 | // Import dependencies 3 | 4 | /** 5 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database. 6 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory 7 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization 8 | * Dependencies: npm install pdf-parse 9 | */ 10 | 11 | import { DirectoryLoader } from "langchain/document_loaders/fs/directory"; 12 | import { PDFLoader } from "langchain/document_loaders/fs/pdf"; 13 | import { CharacterTextSplitter } from "langchain/text_splitter"; 14 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 15 | import { PineconeStore } from "langchain/vectorstores/pinecone"; 16 | import { PineconeClient } from "@pinecone-database/pinecone"; 17 | import { loadSummarizationChain } from "langchain/chains"; 18 | import { OpenAI } from "langchain/llms/openai"; 19 | 20 | /** 21 | * 22 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 23 | * 24 | */ 25 | 26 | export default async function handler(req, res) { 27 | // Grab the prompt from the url (?prompt=[value]) 28 | // console.log(process.env.PINECONE_API_KEY); 29 | // console.log(process.env.PINECONE_ENVIRONMENT); 30 | // console.log(process.env.PINECONE_INDEX); 31 | // Always use a try catch block to do asynchronous requests and catch any errors 32 | try { 33 | // Load the directory 34 | const loader = new DirectoryLoader( 35 | "/Users/shawnesquivel/Desktop/openai-javascript-course/data/resumes", 36 | { 37 | ".pdf": (path) => new PDFLoader(path, "/pdf"), 38 | } 39 | ); 40 | 41 | const docs = await loader.load(); 42 | // 3 43 | // console.log(`Loaded ${docs.length}`); 44 | 45 | // Split the documents with their metadata 46 | const splitter = new CharacterTextSplitter({ 47 | separator: " ", 48 | chunkSize: 200, 49 | chunkOverlap: 20, 50 | }); 51 | 52 | // 53 | const splitDocs = await splitter.splitDocuments(docs); 54 | 55 | // console.log(`Split Docs: ${splitDocs.length}`); 56 | 57 | // console.log(docs[0]); 58 | // console.log(splitDocs[0]); 59 | 60 | // reduce the metadata and make it more searchable 61 | const reducedDocs = splitDocs.map((doc) => { 62 | // ["Users", "shawnesquivel", ... "resume_aubrey_graham.pdf"] 63 | const fileName = doc.metadata.source.split("/").pop(); 64 | // ["resume", "aubrey", "graham.pdf"] 65 | const [_, firstName, lastName] = fileName.split("_"); 66 | 67 | return { 68 | ...doc, 69 | metadata: { 70 | first_name: firstName, 71 | last_name: lastName.slice(0, -4), 72 | docType: "resume", 73 | }, 74 | }; 75 | }); 76 | 77 | // console.log(reducedDocs[4]); 78 | let summaries = []; 79 | const model = new OpenAI({ temperature: 0 }); 80 | const summarizeAllChain = loadSummarizationChain(model, { 81 | type: "map_reduce", 82 | }); 83 | 84 | // raw documents 85 | const summarizeRes = await summarizeAllChain.call({ 86 | input_documents: docs, 87 | }); 88 | summaries.push({ summary: summarizeRes.text }); 89 | 90 | /** Summarize each candidate */ 91 | for (let doc of docs) { 92 | const summarizeOneChain = loadSummarizationChain(model, { 93 | type: "map_reduce", 94 | }); 95 | const summarizeOneRes = await summarizeOneChain.call({ 96 | input_documents: [doc], 97 | }); 98 | 99 | console.log({ summarizeOneRes }); 100 | summaries.push({ summary: summarizeOneRes.text }); 101 | } 102 | 103 | /** Upload the reducedDocs */ 104 | const client = new PineconeClient(); 105 | await client.init({ 106 | apiKey: process.env.PINECONE_API_KEY, 107 | environment: process.env.PINECONE_ENVIRONMENT, 108 | }); 109 | 110 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX); 111 | 112 | await PineconeStore.fromDocuments(reducedDocs, new OpenAIEmbeddings(), { 113 | pineconeIndex, 114 | }); 115 | 116 | console.log("Uploaded to Pinecone"); 117 | 118 | console.log({ summaries }); 119 | // [{summary: 'gdajkljgadkl'}, {summary: 'gdjaklgkadl'}] 120 | const summaryStr = JSON.stringify(summaries, null, 2); 121 | 122 | return res.status(200).json({ output: summaryStr }); 123 | } catch (err) { 124 | // If we have an error 125 | 126 | console.error(err); 127 | return res.status(500).json({ error: err }); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /pages/api/solutions/streaming-soln.js: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import SSE from "express-sse"; 3 | 4 | /** 5 | * 6 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 7 | * 8 | */ 9 | 10 | const sse = new SSE(); 11 | 12 | export default function handler(req, res) { 13 | if (req.method === "POST") { 14 | const { input } = req.body; 15 | 16 | if (!input) { 17 | throw new Error("No input"); 18 | } 19 | // Initialize model 20 | const chat = new OpenAI({ 21 | streaming: true, 22 | callbacks: [ 23 | { 24 | handleLLMNewToken(token) { 25 | sse.send(token, "newToken"); 26 | }, 27 | }, 28 | ], 29 | }); 30 | 31 | // create the prompt 32 | const prompt = `Create me a short rap about my name and city. Make it funny and punny. Name: ${input}`; 33 | 34 | console.log({ prompt }); 35 | // call frontend to backend 36 | chat.call(prompt).then(() => { 37 | sse.send(null, "end"); 38 | }); 39 | 40 | return res.status(200).json({ result: "Streaming complete" }); 41 | } else if (req.method === "GET") { 42 | sse.init(req, res); 43 | } else { 44 | res.status(405).json({ message: "Method not allowed" }); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pages/api/solutions/video-chat-soln.js: -------------------------------------------------------------------------------- 1 | // /pages/api/transcript.js 2 | import { YoutubeTranscript } from "youtube-transcript"; 3 | import { ChatOpenAI } from "langchain/chat_models/openai"; 4 | import { ConversationalRetrievalQAChain } from "langchain/chains"; 5 | import { HNSWLib } from "langchain/vectorstores/hnswlib"; 6 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 7 | import { CharacterTextSplitter } from "langchain/text_splitter"; 8 | 9 | /** 10 | * 11 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 12 | * 13 | */ 14 | 15 | // First, we'll initialize the chain and the chat history so that they can be preserved on multiple calls to the API 16 | let chain; 17 | // Remember, the chat history is where we store each human/chatbot message. 18 | let chatHistory = []; 19 | 20 | // DO THIS SECOND 21 | const initializeChain = async (initialPrompt, transcript) => { 22 | try { 23 | // Initialize model with GPT-3.5 24 | const model = new ChatOpenAI({ 25 | temperature: 0.8, 26 | modelName: "gpt-3.5-turbo", 27 | }); 28 | // Create a text splitter, we use a smaller chunk size and chunk overlap since we are working with small sentences 29 | const splitter = new CharacterTextSplitter({ 30 | separator: " ", 31 | chunkSize: 7, 32 | chunkOverlap: 3, 33 | }); 34 | 35 | // Using the splitter, we create documents from a bigger document, in this case the YouTube Transcript 36 | const docs = await splitter.createDocuments([transcript]); 37 | 38 | console.log(`Loading data ${docs[0]}`); 39 | 40 | // Upload chunks to database as documents 41 | // We'll be using HNSWLib for this one. 42 | // The nice thing about this one is that we don't need to create any accounts or get any API keys besides our OpenAI key to use this library 43 | // So I find that it's nice for doing some quick prototyping. 44 | // But the downside is that you don't get the nice dashboard like we had in Pinecone. 45 | const vectorStore = await HNSWLib.fromDocuments( 46 | [{ pageContent: transcript }], 47 | new OpenAIEmbeddings() 48 | ); 49 | 50 | // Just to show you, we'll also save the vector store as a file in case you want to retrieve it later. 51 | // We'll copy our root directory and save it as a variable 52 | const directory = "/Users/shawnesquivel/GitHub/yt-script-generator/"; 53 | await vectorStore.save(directory); 54 | // it will create some files for us, including a way for us to view the vector store documents which is helpful. 55 | // then you can access it like this: 56 | const loadedVectorStore = await HNSWLib.load( 57 | directory, 58 | new OpenAIEmbeddings() 59 | ); 60 | 61 | // The ConversationalRetrievalQA chain builds on RetrievalQAChain to provide a chat history component. 62 | 63 | // To create one, you will need a retriever. In the below example, we will create one from a vectorstore, which can be created from embeddings. 64 | 65 | // Remember we can use the loadedVectorStore or the vectorStore, in case for example you want to scale this application up and use the same vector store to store multiple Youtube transcripts. 66 | chain = ConversationalRetrievalQAChain.fromLLM( 67 | model, 68 | vectorStore.asRetriever(), 69 | { verbose: true } // Add verbose option here 70 | ); 71 | 72 | // It requires two inputs: a question and the chat history. It first combines the chat history and the question into a standalone question, then looks up relevant documents from the retriever, and then passes those documents and the question to a question answering chain to return a response. 73 | const response = await chain.call({ 74 | question: initialPrompt, 75 | chat_history: chatHistory, 76 | }); 77 | 78 | // Update history 79 | chatHistory.push({ 80 | role: "assistant", 81 | content: response.text, 82 | }); 83 | 84 | console.log({ chatHistory }); 85 | return response; 86 | } catch (error) { 87 | console.error(error); 88 | } 89 | }; 90 | 91 | export default async function handler(req, res) { 92 | if (req.method === "POST") { 93 | // DO THIS FIRST 94 | // First we'll destructure the prompt and firstMsg from the POST request body 95 | const { prompt } = req.body; 96 | const { firstMsg } = req.body; 97 | 98 | // Then if it's the first message, we want to initialize the chain, since it doesn't exist yet 99 | if (firstMsg) { 100 | console.log("Initializing chain"); 101 | 102 | try { 103 | // So first of all, we want to give it our human message, which was to ask for a summary of the YouTube URL 104 | const initialPrompt = `Give me a summary of the transcript: ${prompt}`; 105 | 106 | chatHistory.push({ 107 | role: "user", 108 | content: initialPrompt, 109 | }); 110 | 111 | // Here, we'll use a generic YouTube Transcript API to get the transcript of a youtube video 112 | // As you can see, the Transcript takes videoId/videoURL has the first argument to the function 113 | const transcriptResponse = await YoutubeTranscript.fetchTranscript( 114 | prompt 115 | ); 116 | 117 | // and we'll just add some error handling in case the API fails 118 | if (!transcriptResponse) { 119 | return res.status(400).json({ error: "Failed to get transcript" }); 120 | } 121 | 122 | // Now let's see what that transcriptResponse looks like 123 | 124 | console.log({ transcriptResponse }); 125 | 126 | // We can see that it's a big array of lines. Let's squish it down into one string first to make it easier to use. 127 | 128 | // We initialize the transcript string 129 | let transcript = ""; 130 | 131 | // Then the forEach method calls each element in the array, e.g. line = element, and we can do something what that value 132 | 133 | // in this case, we'll add each line of text to the empty string variable to get a single string with the entire transcript 134 | transcriptResponse.forEach((line) => { 135 | transcript += line.text; 136 | }); 137 | 138 | // Now, let's create a separate function called initialize chain 139 | // We'll pass in the first prompt and the context, in this case the transcript 140 | const response = await initializeChain(initialPrompt, transcript); 141 | console.log("Chain:", chain); 142 | console.log(response); 143 | 144 | // And then we'll jsut get the response back and the chatHistory 145 | return res.status(200).json({ output: response, chatHistory }); 146 | } catch (err) { 147 | console.error(err); 148 | return res 149 | .status(500) 150 | .json({ error: "An error occurred while fetching transcript" }); 151 | } 152 | 153 | // DO THIS THIRD 154 | } else { 155 | // If it's not the first message, we can chat with the bot 156 | console.log("Received question"); 157 | try { 158 | console.log("Asking:", prompt); 159 | console.log("Chain:", chain); 160 | 161 | // First we'll add the user message 162 | chatHistory.push({ 163 | role: "user", 164 | content: prompt, 165 | }); 166 | // Then we'll pass the entire chat history with all the previous messages back 167 | const response = await chain.call({ 168 | question: prompt, 169 | chat_history: chatHistory, 170 | }); 171 | // And we'll add the response back as well 172 | chatHistory.push({ 173 | role: "assistant", 174 | content: response.text, 175 | }); 176 | 177 | return res.status(200).json({ output: response, chatHistory }); 178 | } catch (error) { 179 | // Generic error handling 180 | console.error(error); 181 | res 182 | .status(500) 183 | .json({ error: "An error occurred during the conversation." }); 184 | } 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /pages/api/streaming.js: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import SSE from "express-sse"; 3 | 4 | const sse = new SSE(); 5 | 6 | export default function handler(req, res) { 7 | if (req.method === "POST") { 8 | const { input } = req.body; 9 | 10 | if (!input) { 11 | throw new Error("No input"); 12 | } 13 | // Initialize model 14 | 15 | // create the prompt 16 | 17 | // call frontend to backend 18 | 19 | return res.status(200).json({ result: "OK" }); 20 | } else if (req.method === "GET") { 21 | sse.init(req, res); 22 | } else { 23 | res.status(405).json({ message: "Method not allowed" }); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /pages/api/video-chat.js: -------------------------------------------------------------------------------- 1 | // /pages/api/transcript.js 2 | import { YoutubeTranscript } from "youtube-transcript"; 3 | import { ChatOpenAI } from "langchain/chat_models/openai"; 4 | import { ConversationalRetrievalQAChain } from "langchain/chains"; 5 | import { HNSWLib } from "langchain/vectorstores/hnswlib"; 6 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 7 | import { CharacterTextSplitter } from "langchain/text_splitter"; 8 | import { OpenAI } from "langchain"; 9 | 10 | // Global variables 11 | 12 | // DO THIS SECOND 13 | const initializeChain = async (initialPrompt, transcript) => { 14 | try { 15 | console.log({ chatHistory }); 16 | return response; 17 | } catch (error) { 18 | console.error(error); 19 | } 20 | }; 21 | 22 | export default async function handler(req, res) { 23 | if (req.method === "POST") { 24 | // DO THIS FIRST 25 | 26 | // Then if it's the first message, we want to initialize the chain, since it doesn't exist yet 27 | if (x) { 28 | try { 29 | // And then we'll jsut get the response back and the chatHistory 30 | return res.status(200).json({ output: response, chatHistory }); 31 | } catch (err) { 32 | console.error(err); 33 | return res 34 | .status(500) 35 | .json({ error: "An error occurred while fetching transcript" }); 36 | } 37 | 38 | // do this third! 39 | } else { 40 | // If it's not the first message, we can chat with the bot 41 | 42 | try { 43 | return res.status(200).json({ output: response, chatHistory }); 44 | } catch (error) { 45 | // Generic error handling 46 | console.error(error); 47 | res 48 | .status(500) 49 | .json({ error: "An error occurred during the conversation." }); 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /playground/quickstart-soln.mjs: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { PromptTemplate } from "langchain/prompts"; 4 | import { LLMChain } from "langchain/chains"; 5 | import { initializeAgentExecutorWithOptions } from "langchain/agents"; 6 | import { SerpAPI } from "langchain/tools"; 7 | import { Calculator } from "langchain/tools/calculator"; 8 | import { BufferMemory } from "langchain/memory"; 9 | import { ConversationChain } from "langchain/chains"; 10 | import { PlanAndExecuteAgentExecutor } from "langchain/experimental/plan_and_execute"; 11 | 12 | /** 13 | * 14 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 15 | * 16 | */ 17 | 18 | // First, run this in your terminal: 19 | 20 | // export OPENAI_API_KEY=sk-12345 21 | // export SERPAPI_API_KEY=0ecaa8b9ecedb64f3c10e737cf5d6250b7b18c735d739631bbb0dac96b5b425b 22 | // Replace sk-12345 with your OpenAI API Key 23 | 24 | // https://platform.openai.com/account/api-keys 25 | 26 | /** 27 | * 28 | * Prompt Templates: Manage Prompts for LLMs 29 | 30 | 31 | When you make a call to ChatGPT, the LLM does not just see your response as it is. 32 | 33 | Basically your message is deconstructed and then fed into a template, so that the chatbot can better help you. 34 | 35 | They would take your message, then feed it into the user message. 36 | 37 | const messageTemplate = `You've been speaking with a user, and they just said: "${userMessage}". How would you respond?`; 38 | 39 | Langchain has already pre-defined some templates to make this SUPER easy! 40 | * 41 | * 42 | */ 43 | 44 | const template = 45 | "Please give me some ideas for content I should write about regarding {topic}? The content is for {socialplatform}. Translate to {language}."; 46 | const prompt = new PromptTemplate({ 47 | template: template, 48 | inputVariables: ["topic", "socialplatform", "language"], 49 | }); 50 | 51 | // This allows us to format the template into a string, which is finally passed to the LLM 52 | // const formattedTemplate = await prompt.format({ 53 | // topic: "artificial intelligence", 54 | // socialplatform: "twitter", 55 | // language: "spanish", 56 | // }); 57 | // console.log(formattedTemplate); 58 | 59 | /** 60 | * 61 | * To properly use the LLM, we generate chains. 62 | * 63 | * Chains are "chains" of complex tasks, linked together. Hence the name Lang Chain - it chains large language models tasks! 64 | * 65 | * The first "task" that we need to put together is putting together a Prompt and a Call to the OpenAI model 66 | * 67 | * This is the simplest task since it's only basically two steps. 68 | * 69 | * 70 | */ 71 | 72 | const model = new OpenAI({ temperature: 0.9 }); 73 | const chain = new LLMChain({ llm: model, prompt: prompt }); 74 | 75 | // Now that we've defined the chain, we can call the LLMChain, which does two steps: 76 | 77 | // First it properly formats the prompt according to the user input variables 78 | 79 | // Then it makes the call to Open AI's API! 80 | // const resChain = await chain.call({ 81 | // topic: "artificial intelligence", 82 | // socialplatform: "twitter", 83 | // language: "english", 84 | // }); 85 | 86 | // console.log({ resChain }); 87 | 88 | /** 89 | * 90 | * The second big topic in Langchain is Agents. 91 | * 92 | * We'll cover this in detail in Module 5: AI Content Generator! 93 | * 94 | * 95 | * The biggest difference between a Chain and an Agent? 96 | * 97 | * A chain must be predefined (like configuring a robot) 98 | * 99 | * An agent is given a task and tools, then it figures out how to do the job. 100 | * 101 | * 102 | * E.g. if we want to do research on the internet, a chain will be set up like this: 103 | * 104 | * Chain: First, search the internet using the query. Then summarize it for me. 105 | * 106 | * Agent: First, we give it tools to search the internet. Then we ask it - "Who is Pedro Pascal?" 107 | * 108 | * Then the agent will check its toolbox, figure out how it can get the job done, and do the steps in order. 109 | * 110 | * For example, let's ask on ChatGPT what langchain is. 111 | * 112 | * 113 | * 114 | */ 115 | // 0 = deterministic, 1 = creative 116 | // https://platform.openai.com/docs/models/ 117 | // Gpt3.5turbo = fast, 1/10th cost of davinci 118 | // davinci - default 119 | const agentModel = new OpenAI({ 120 | temperature: 0, 121 | modelName: "text-davinci-003", 122 | }); 123 | 124 | // serpTool.returnDirect = true; 125 | 126 | const tools = [ 127 | new SerpAPI(process.env.SERPAPI_API_KEY, { 128 | location: "Dallas,Texas,United States", 129 | hl: "en", 130 | gl: "us", 131 | }), 132 | new Calculator(), 133 | ]; 134 | 135 | // const executor = await initializeAgentExecutorWithOptions(tools, agentModel, { 136 | // agentType: "zero-shot-react-description", 137 | // verbose: true, 138 | // maxIterations: 5, 139 | // }); 140 | console.log("Loaded agent."); 141 | const input = "What is Langchain?"; 142 | 143 | console.log(`Executing with input "${input}"...`); 144 | // Awesome, so we can see it figured out that it needed to use a search engine. 145 | // const result = await executor.call({ input }); 146 | 147 | // console.log(`Got output ${result.output}`); 148 | 149 | /** 150 | * 151 | * Plan and Execute Agents 152 | * 153 | * Instead of the regular agents, which just try to evaluate their tools then do something, Plan and Execute Works a little diffrently. 154 | * 155 | * 156 | * This example shows how to use an agent that uses the Plan-and-Execute framework to answer a query. This framework works differently from the other currently supported agents (which are all classified as Action Agents) in that it uses a two step process: 157 | 158 | First, the agent uses an LLM to create a plan to answer the query with clear steps. 159 | 160 | Once it has a plan, it uses an embedded traditional Action Agent to solve each step. 161 | 162 | The idea is that the planning step keeps the LLM more "on track" by breaking up a larger task into simpler subtasks. However, this method requires more individual LLM queries and has higher latency compared to Action Agents. 163 | 164 | * 165 | * 166 | * 167 | */ 168 | 169 | const agentTools = [new Calculator(), new SerpAPI()]; 170 | // only works with Chat models 171 | const chatModel = new ChatOpenAI({ 172 | temperature: 0, 173 | modelName: "gpt-3.5-turbo", 174 | verbose: true, 175 | }); 176 | const executor = PlanAndExecuteAgentExecutor.fromLLMAndTools({ 177 | llm: chatModel, 178 | tools: agentTools, 179 | }); 180 | 181 | const result = await executor.call({ 182 | input: `Who is the current president of the United States? What is their current age raised to the second power?`, 183 | }); 184 | 185 | console.log({ result }); 186 | 187 | // const llm = new OpenAI({}); 188 | // const memory = new BufferMemory(); 189 | // const conversationChain = new ConversationChain({ llm: llm, memory: memory }); 190 | // const res1 = await conversationChain.call({ 191 | // input: "Hey. The president of the US is currently Lebron James.", 192 | // }); 193 | // console.log(res1); 194 | 195 | // const res2 = await conversationChain.call({ 196 | // input: "Who is hte president of the US?", 197 | // }); 198 | // console.log(res2); 199 | -------------------------------------------------------------------------------- /playground/quickstart.mjs: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { PromptTemplate } from "langchain/prompts"; 4 | import { LLMChain } from "langchain/chains"; 5 | import { initializeAgentExecutorWithOptions } from "langchain/agents"; 6 | import { SerpAPI } from "langchain/tools"; 7 | import { Calculator } from "langchain/tools/calculator"; 8 | import { BufferMemory } from "langchain/memory"; 9 | import { ConversationChain } from "langchain/chains"; 10 | import { PlanAndExecuteAgentExecutor } from "langchain/experimental/plan_and_execute"; 11 | import { exec } from "child_process"; 12 | 13 | // export OPENAI_API_KEY=<> 14 | // export SERPAPI_API_KEY=<> 15 | // Replace with your API keys! 16 | 17 | // to run, go to terminal and enter: cd playground 18 | // then enter: node quickstart.mjs 19 | console.log("Welcome to the LangChain Quickstart Module!"); 20 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /public/assets/images/brain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/brain.png -------------------------------------------------------------------------------- /public/assets/images/chatbot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/chatbot.png -------------------------------------------------------------------------------- /public/assets/images/green-square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/green-square.png -------------------------------------------------------------------------------- /public/assets/images/pdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/pdf.png -------------------------------------------------------------------------------- /public/assets/images/robohr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/robohr.png -------------------------------------------------------------------------------- /public/assets/images/stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/stream.png -------------------------------------------------------------------------------- /public/assets/images/tools.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/tools.png -------------------------------------------------------------------------------- /public/assets/images/wizard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/wizard.png -------------------------------------------------------------------------------- /public/assets/images/youtube.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/youtube.png -------------------------------------------------------------------------------- /public/hamburger.svg: -------------------------------------------------------------------------------- 1 | <svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg"> 2 | <path d="M18.3334 9.1665C18.3334 5.02442 14.6025 1.6665 10 1.6665C5.39752 1.6665 1.66669 5.02442 1.66669 9.1665H18.3334Z" stroke="#181818" stroke-linecap="round" stroke-linejoin="round"/> 3 | <path d="M1.66669 11.6665L3.9396 13.3332L6.9696 11.6665L10 13.3332L13.0304 11.6665L16.0604 13.3332L18.3334 11.6665M1.66669 15.8332H18.3334V18.3332H1.66669V15.8332Z" stroke="#181818" stroke-linecap="round" stroke-linejoin="round"/> 4 | </svg> 5 | -------------------------------------------------------------------------------- /public/next.svg: -------------------------------------------------------------------------------- 1 | <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg> -------------------------------------------------------------------------------- /public/vercel.svg: -------------------------------------------------------------------------------- 1 | <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 283 64"><path fill="black" d="M141 16c-11 0-19 7-19 18s9 18 20 18c7 0 13-3 16-7l-7-5c-2 3-6 4-9 4-5 0-9-3-10-7h28v-3c0-11-8-18-19-18zm-9 15c1-4 4-7 9-7s8 3 9 7h-18zm117-15c-11 0-19 7-19 18s9 18 20 18c6 0 12-3 16-7l-8-5c-2 3-5 4-8 4-5 0-9-3-11-7h28l1-3c0-11-8-18-19-18zm-10 15c2-4 5-7 10-7s8 3 9 7h-19zm-39 3c0 6 4 10 10 10 4 0 7-2 9-5l8 5c-3 5-9 8-17 8-11 0-19-7-19-18s8-18 19-18c8 0 14 3 17 8l-8 5c-2-3-5-5-9-5-6 0-10 4-10 10zm83-29v46h-9V5h9zM37 0l37 64H0L37 0zm92 5-27 48L74 5h10l18 30 17-30h10zm59 12v10l-3-1c-6 0-10 4-10 10v15h-9V17h9v9c0-5 6-9 13-9z"/></svg> -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | const { fontFamily } = require("tailwindcss/defaultTheme"); 3 | module.exports = { 4 | content: [ 5 | "./pages/**/*.{js,ts,jsx,tsx,mdx}", 6 | "./components/**/*.{js,ts,jsx,tsx,mdx}", 7 | "./app/**/*.{js,ts,jsx,tsx,mdx}", 8 | ], 9 | theme: { 10 | extend: { 11 | backgroundImage: { 12 | "gradient-radial": "radial-gradient(var(--tw-gradient-stops))", 13 | "gradient-conic": 14 | "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))", 15 | }, 16 | fontFamily: { 17 | heading: ["var(--font-press-start)", ...fontFamily.sans], 18 | }, 19 | fontSize: { 20 | 10: "10px", // Add this line 21 | }, 22 | }, 23 | }, 24 | plugins: [], 25 | }; 26 | -------------------------------------------------------------------------------- /tools/SerpAPI.js: -------------------------------------------------------------------------------- 1 | import { SerpAPI } from "langchain/tools"; 2 | 3 | const SerpAPITool = () => {}; 4 | 5 | export default SerpAPITool; 6 | -------------------------------------------------------------------------------- /tools/WebBrowser.js: -------------------------------------------------------------------------------- 1 | import { WebBrowser } from "langchain/tools/webbrowser"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 4 | 5 | const WebBrowserTool = () => { 6 | // do stuff! 7 | }; 8 | 9 | export default WebBrowserTool; 10 | -------------------------------------------------------------------------------- /tools/solutions/SerpAPI-soln.js: -------------------------------------------------------------------------------- 1 | import { SerpAPI } from "langchain/tools"; 2 | 3 | /** 4 | * 5 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 6 | * 7 | */ 8 | 9 | const SerpAPITool = () => { 10 | const serpAPI = new SerpAPI(process.env.SERPAPI_API_KEY, { 11 | baseUrl: "http://localhost:3000/agents", 12 | location: "Vancouver,British Columbia, Canada", 13 | hl: "en", 14 | gl: "us", 15 | }); 16 | serpAPI.returnDirect = true; 17 | 18 | return serpAPI; 19 | }; 20 | 21 | export default SerpAPITool; 22 | -------------------------------------------------------------------------------- /tools/solutions/WebBrowser-soln.js: -------------------------------------------------------------------------------- 1 | import { WebBrowser } from "langchain/tools/webbrowser"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 4 | 5 | /** 6 | * 7 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this. 8 | * 9 | */ 10 | const WebBrowserTool = () => { 11 | const model = new ChatOpenAI({ temperature: 0 }); 12 | const embeddings = new OpenAIEmbeddings({}); 13 | 14 | const browser = new WebBrowser({ model, embeddings }); 15 | browser.returnDirect = true; 16 | 17 | return browser; 18 | }; 19 | 20 | export default WebBrowserTool; 21 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": [ 4 | "dom", 5 | "dom.iterable", 6 | "esnext" 7 | ], 8 | "allowJs": true, 9 | "skipLibCheck": true, 10 | "strict": false, 11 | "forceConsistentCasingInFileNames": true, 12 | "noEmit": true, 13 | "incremental": true, 14 | "esModuleInterop": true, 15 | "module": "esnext", 16 | "moduleResolution": "node", 17 | "resolveJsonModule": true, 18 | "isolatedModules": true, 19 | "jsx": "preserve", 20 | "plugins": [ 21 | { 22 | "name": "next" 23 | } 24 | ], 25 | "strictNullChecks": true, 26 | "baseUrl": ".", 27 | "paths": { 28 | "@/utils/*": ["utils/*"] 29 | } 30 | }, 31 | "include": [ 32 | "next-env.d.ts", 33 | ".next/types/**/*.ts", 34 | "**/*.ts", 35 | "**/*.tsx" 36 | ], 37 | "exclude": [ 38 | "node_modules" 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /utils/extractVideoId.js: -------------------------------------------------------------------------------- 1 | export default function extractVideoId(url) { 2 | // do stuff 3 | } 4 | -------------------------------------------------------------------------------- /utils/getVideoMetaData.js: -------------------------------------------------------------------------------- 1 | import axios from "axios"; 2 | 3 | export default async function getVideoMetaData(videoId) { 4 | // enable api key and setup next.config.js 5 | const url = `https://www.googleapis.com/youtube/v3/videos?id=${videoId}&key=${process.env.GOOGLE_API_KEY}&part=snippet,contentDetails,statistics,status`; 6 | 7 | try { 8 | // { data: {items: [metadata]}} 9 | // Clean up the response 10 | } catch (err) { 11 | console.error(`Failed to get metadata: ${err}`); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /utils/solutions/extractVideoId-soln.js: -------------------------------------------------------------------------------- 1 | export default function extractVideoId(url) { 2 | const urlParams = new URLSearchParams(new URL(url).search); 3 | return urlParams.get("v"); 4 | } 5 | -------------------------------------------------------------------------------- /utils/solutions/getVideoMetadata-soln.js: -------------------------------------------------------------------------------- 1 | import axios from "axios"; 2 | 3 | export default async function getVideoMetaData(videoId) { 4 | // First, we need to make sure we have our GOOGLE_API_KEY set up 5 | // https://console.cloud.google.com/apis/ 6 | // Look up the YouTube Data API V3 7 | // Enable API key 8 | // Copy into .env as GOOGLE_API_KEY 9 | // Configure Next.Config.JS 10 | const url = `https://www.googleapis.com/youtube/v3/videos?id=${videoId}&key=${process.env.GOOGLE_API_KEY}&part=snippet,contentDetails,statistics,status`; 11 | 12 | try { 13 | // HTTP request { data: {items: [metadata ]}} 14 | const response = await axios.get(url); 15 | const data = response.data; 16 | const metadata = data.items[0]; 17 | 18 | console.log("GetMetadata", { metadata }); 19 | 20 | // Clean up the response 21 | const videoTitle = metadata.snippet.title; 22 | const videoDescription = metadata.snippet.description; 23 | const shortenedDescription = videoDescription.split(".")[0]; 24 | 25 | const videoId = metadata.id; 26 | // Create a small metadata object to return 27 | const shortMetadata = { 28 | videoTitle, 29 | videoDescription: shortenedDescription, 30 | videoId, 31 | }; 32 | return shortMetadata; // returns the first item, which should be the video if the id is valid 33 | } catch (error) { 34 | console.error(`Failed to fetch video metadata: ${error}`); 35 | } 36 | } 37 | --------------------------------------------------------------------------------