6 | {/* If data is a string */}
7 | {typeof data === "string" && (
8 |
{data}
9 | )}
10 | {/* If data is an object */}
11 | {data &&
{data?.output}
}
12 |
13 | {/* If data has source documents (e.g. when querying from a VectorDBQAChain and returnSourceDocuments is true) */}
14 | {data &&
15 | data.sourceDocuments &&
16 | data.sourceDocuments.map((doc, index) => (
17 |
6 | {/* If data is a string */}
7 | {typeof data === "string" && (
8 |
{data}
9 | )}
10 | {/* If data is an object */}
11 | {data &&
{data?.output}
}
12 |
13 | {/* If data has source documents (e.g. when querying from a VectorDBQAChain and returnSourceDocuments is true) */}
14 | {data &&
15 | data.sourceDocuments &&
16 | data.sourceDocuments.map((doc, index) => (
17 |
21 |
22 | Throughout this course, you'll be building stunning AI projects
23 | that are not only impressive but also have real-world
24 | applications.
25 |
26 | Whether you're aiming to generate a passive income, create a
27 | personal assistant to streamline your work, or simply to enhance
28 | your portfolio, the skills and knowledge you acquire here will be
29 | instrumental in achieving your goals.
30 |
31 |
32 | {/* Gallery */}
33 |
34 |
35 |
38 | Remember, this journey is yours. So let's{" "}
39 | roll up our sleeves, dive in, and start building. 🔨
40 |
41 |
42 | );
43 | }
44 |
--------------------------------------------------------------------------------
/app/pdf/page-namespace.jsx:
--------------------------------------------------------------------------------
1 | 'use client'
2 |
3 | import React, { useState } from 'react'
4 | import ResultWithSources from '../components/ResultWithSources'
5 | import PromptBox from '../components/PromptBox'
6 | import Button from '../components/Button'
7 | import PageHeader from '../components/PageHeader'
8 | import Title from '../components/Title'
9 | import TwoColumnLayout from '../components/TwoColumnLayout'
10 | import ButtonContainer from '../components/ButtonContainer'
11 | import '../globals.css'
12 |
13 | // This functional component is responsible for loading PDFs
14 | const PDFLoader = () => {
15 | // Managing prompt, messages, and error states with useState
16 | const [prompt, setPrompt] = useState('How to get rich?')
17 | const [messages, setMessages] = useState([
18 | {
19 | text: "Hi, I'm a Naval AI. What would you like to know?",
20 | type: 'bot',
21 | },
22 | ])
23 | const [error, setError] = useState('')
24 | const [bookId, setBookId] = useState('101')
25 |
26 | // This function updates the prompt value when the user types in the prompt box
27 | const handlePromptChange = (e) => {
28 | setPrompt(e.target.value)
29 | }
30 |
31 | const handleBookIdChange = (e) => {
32 | setBookId(e.target.value)
33 | }
34 |
35 | // This function handles the submission of the form when the user hits 'Enter' or 'Submit'
36 | // It sends a GET request to the provided endpoint with the current prompt as the query
37 | const handleSubmit = async (endpoint) => {
38 | try {
39 | console.log(`sending ${prompt}`)
40 | console.log(`using ${endpoint}`)
41 |
42 | // A GET request is sent to the backend
43 | const response = await fetch(`/api/${endpoint}?bookId=${bookId}`, {
44 | method: 'GET',
45 | })
46 |
47 | // The response from the backend is parsed as JSON
48 | const searchRes = await response.json()
49 | console.log(searchRes)
50 | setError('') // Clear any existing error messages
51 | } catch (error) {
52 | console.log(error)
53 | setError(error.message)
54 | }
55 | }
56 |
57 | // This function handles the submission of the user's prompt when the user hits 'Enter' or 'Submit'
58 | // It sends a POST request to the provided endpoint with the current prompt in the request body
59 | const handleSubmitPrompt = async (endpoint) => {
60 | try {
61 | setPrompt('')
62 |
63 | // Push the user's message into the messages array
64 | setMessages((prevMessages) => [
65 | ...prevMessages,
66 | { text: prompt, type: 'user', sourceDocuments: null },
67 | ])
68 |
69 | // A POST request is sent to the backend with the current prompt in the request body
70 | const response = await fetch(`/api/${endpoint}`, {
71 | method: 'POST',
72 | headers: {
73 | 'Content-Type': 'application/json',
74 | },
75 | body: JSON.stringify({ input: prompt, bookId }),
76 | })
77 |
78 | // Throw an error if the HTTP status is not OK
79 | if (!response.ok) {
80 | throw new Error(`HTTP error! status: ${response.status}`)
81 | }
82 |
83 | // Parse the response from the backend as JSON
84 | const searchRes = await response.json()
85 |
86 | console.log({ searchRes })
87 |
88 | // Push the response into the messages array
89 | setMessages((prevMessages) => [
90 | ...prevMessages,
91 | {
92 | text: searchRes.result.text,
93 | type: 'bot',
94 | sourceDocuments: searchRes.result.sourceDocuments,
95 | },
96 | ])
97 |
98 | setError('') // Clear any existing error messages
99 | } catch (error) {
100 | console.log(error)
101 | setError(error.message)
102 | }
103 | }
104 |
105 | // The component returns a two column layout with various child components
106 | return (
107 | <>
108 |
109 |
112 |
120 |
121 | {/*
134 |
138 | >
139 | }
140 | rightChildren={
141 | <>
142 |
143 | handleSubmitPrompt('/pdf-query')}
147 | // handleSubmit={() => handleSubmitQuery("/pdfquery-agent")}
148 | placeHolderText={'How to get rich?'}
149 | error={error}
150 | />
151 | >
152 | }
153 | />
154 | >
155 | )
156 | }
157 |
158 | export default PDFLoader
159 |
--------------------------------------------------------------------------------
/app/pdf/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import React, { useState } from "react";
4 | import ResultWithSources from "../components/ResultWithSources";
5 | import PromptBox from "../components/PromptBox";
6 | import Button from "../components/Button";
7 | import PageHeader from "../components/PageHeader";
8 | import Title from "../components/Title";
9 | import TwoColumnLayout from "../components/TwoColumnLayout";
10 | import ButtonContainer from "../components/ButtonContainer";
11 | import "../globals.css";
12 |
13 | // This functional component is responsible for loading PDFs
14 | const PDFLoader = () => {
15 | // Managing prompt, messages, and error states with useState
16 | const [prompt, setPrompt] = useState("How to get rich?");
17 | const [messages, setMessages] = useState([
18 | {
19 | text: "Hi, I'm a Naval AI. What would you like to know?",
20 | type: "bot",
21 | },
22 | ]);
23 | const [error, setError] = useState("");
24 |
25 | // This function updates the prompt value when the user types in the prompt box
26 | const handlePromptChange = (e) => {
27 | setPrompt(e.target.value);
28 | };
29 |
30 | // This function handles the submission of the form when the user hits 'Enter' or 'Submit'
31 | // It sends a GET request to the provided endpoint with the current prompt as the query
32 | const handleSubmit = async (endpoint) => {
33 | try {
34 | console.log(`sending ${prompt}`);
35 | console.log(`using ${endpoint}`);
36 |
37 | // A GET request is sent to the backend
38 | const response = await fetch(`/api/${endpoint}`, {
39 | method: "GET",
40 | });
41 |
42 | // The response from the backend is parsed as JSON
43 | const searchRes = await response.json();
44 | console.log(searchRes);
45 | setError(""); // Clear any existing error messages
46 | } catch (error) {
47 | console.log(error);
48 | setError(error.message);
49 | }
50 | };
51 |
52 | // This function handles the submission of the user's prompt when the user hits 'Enter' or 'Submit'
53 | // It sends a POST request to the provided endpoint with the current prompt in the request body
54 | const handleSubmitPrompt = async (endpoint) => {
55 | try {
56 | setPrompt("");
57 |
58 | // Push the user's message into the messages array
59 | setMessages((prevMessages) => [
60 | ...prevMessages,
61 | { text: prompt, type: "user", sourceDocuments: null },
62 | ]);
63 |
64 | // A POST request is sent to the backend with the current prompt in the request body
65 | const response = await fetch(`/api/${endpoint}`, {
66 | method: "POST",
67 | headers: {
68 | "Content-Type": "application/json",
69 | },
70 | body: JSON.stringify({ input: prompt }),
71 | });
72 |
73 | // Throw an error if the HTTP status is not OK
74 | if (!response.ok) {
75 | throw new Error(`HTTP error! status: ${response.status}`);
76 | }
77 |
78 | // Parse the response from the backend as JSON
79 | const searchRes = await response.json();
80 |
81 | console.log({ searchRes });
82 |
83 | // Push the response into the messages array
84 | setMessages((prevMessages) => [
85 | ...prevMessages,
86 | {
87 | text: searchRes.result.text,
88 | type: "bot",
89 | sourceDocuments: searchRes.result.sourceDocuments,
90 | },
91 | ]);
92 |
93 | setError(""); // Clear any existing error messages
94 | } catch (error) {
95 | console.log(error);
96 | setError(error.message);
97 | }
98 | };
99 |
100 | // The component returns a two column layout with various child components
101 | return (
102 | <>
103 |
104 |
107 |
115 |
116 | {/* {handleSubmit('pdfupload-book')}}
118 | endpoint="pdfuploadtest"
119 | buttonText="Upload Test Data ☁️"
120 | className="Button"
121 | /> */}
122 |
128 |
129 | >
130 | }
131 | rightChildren={
132 | <>
133 |
134 | handleSubmitPrompt("/pdf-query")}
138 | // handleSubmit={() => handleSubmitQuery("/pdfquery-agent")}
139 | placeHolderText={"How to get rich?"}
140 | error={error}
141 | />
142 | >
143 | }
144 | />
145 | >
146 | );
147 | };
148 |
149 | export default PDFLoader;
150 |
--------------------------------------------------------------------------------
/app/resume-reader/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import React, { useState } from "react";
4 | import PageHeader from "../components/PageHeader";
5 | import PromptBox from "../components/PromptBox";
6 | import Title from "../components/Title";
7 | import TwoColumnLayout from "../components/TwoColumnLayout";
8 | import ResultWithSources from "../components/ResultWithSources";
9 | import ButtonContainer from "../components/ButtonContainer";
10 | import Button from "../components/Button";
11 |
12 | const endpoint = "/api/resume-query-metadata";
13 |
14 | const ResumeReader = () => {
15 | const [prompt, setPrompt] = useState("Who has experience with Python?");
16 | const [error, setError] = useState(null);
17 |
18 | const [messages, setMessages] = useState([
19 | {
20 | text: "After loading the vector database, ask me anything about your documents! E.g., Has anyone worked at Meta? Where did Joanna Smith go to school? Does Kaito Esquivel have any recommendations?",
21 | type: "bot",
22 | },
23 | ]);
24 |
25 | const handlePromptChange = (e) => {
26 | setPrompt(e.target.value);
27 | };
28 | const handleSubmitUpload = async () => {
29 | try {
30 | // Push the response into the messages array
31 | setMessages((prevMessages) => [
32 | ...prevMessages,
33 | {
34 | text: "Uploading resumes...",
35 | type: "bot",
36 | },
37 | ]);
38 |
39 | const response = await fetch(`/api/resume-upload`);
40 | const transcriptRes = await response.json();
41 |
42 | if (!response.ok) {
43 | throw new Error(transcriptRes.error);
44 | }
45 |
46 | console.log({ transcriptRes });
47 |
48 | // assuming transcriptRes is an object
49 | const summariesArray = JSON.parse(transcriptRes.output);
50 |
51 | const newMessages = summariesArray.map((summary) => ({
52 | text: summary.summary,
53 | type: "bot",
54 | }));
55 |
56 | setMessages((prevMessages) => [...prevMessages, ...newMessages]);
57 |
58 | setPrompt("");
59 | } catch (err) {
60 | console.error(err);
61 | setError("Error");
62 | }
63 | };
64 |
65 | const handleSubmit = async () => {
66 | try {
67 | // Push the user's message into the messages array
68 | setMessages((prevMessages) => [
69 | ...prevMessages,
70 | { text: prompt, type: "user", sourceDocuments: null },
71 | ]);
72 |
73 | // set loading message
74 | setMessages((prevMessages) => [
75 | ...prevMessages,
76 | { text: "...", type: "bot", sourceDocuments: null },
77 | ]);
78 |
79 | const response = await fetch(`${endpoint}`, {
80 | method: "POST",
81 | headers: {
82 | "Content-Type": "application/json",
83 | },
84 | body: JSON.stringify({ prompt }),
85 | });
86 |
87 | const searchRes = await response.json();
88 | console.log({ searchRes });
89 |
90 | // remove loading message
91 | setMessages((prevMessages) => prevMessages.slice(0, -1));
92 |
93 | // Push the response into the messages array
94 | setMessages((prevMessages) => [
95 | ...prevMessages,
96 | {
97 | text: searchRes.output,
98 | type: "bot",
99 | sourceDocuments: searchRes.sourceDocuments,
100 | },
101 | ]);
102 | setPrompt("");
103 | } catch (err) {
104 | console.error(err);
105 | setError(err);
106 | }
107 | };
108 |
109 | return (
110 | <>
111 | <>
112 |
113 |
116 |
121 |
122 |
123 |
128 |
129 | >
130 | }
131 | rightChildren={
132 | <>
133 |
134 |
135 |
142 | >
143 | }
144 | />
145 | >
146 | >
147 | );
148 | };
149 |
150 | export default ResumeReader;
151 |
--------------------------------------------------------------------------------
/app/streaming/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import React, { useState, useEffect } from "react";
3 | import PageHeader from "../components/PageHeader";
4 | import PromptBox from "../components/PromptBox";
5 | import ResultStreaming from "../components/ResultStreaming";
6 | import Title from "../components/Title";
7 | import TwoColumnLayout from "app/components/TwoColumnLayout";
8 |
9 | const Streaming = () => {
10 | const [prompt, setPrompt] = useState("");
11 | const [error, setError] = useState(null);
12 | const [data, setData] = useState("");
13 | // add code
14 |
15 | const processToken = (token) => {
16 | // add code
17 | return;
18 | };
19 |
20 | const handlePromptChange = (e) => {
21 | setPrompt(e.target.value);
22 | };
23 |
24 | const handleSubmit = async () => {
25 | try {
26 | // add code
27 | } catch (err) {
28 | console.error(err);
29 | setError(error);
30 | }
31 | };
32 |
33 | // Clean up the EventSource on component unmount
34 | // add code
35 | return (
36 | <>
37 |
38 |
41 |
46 | >
47 | }
48 | rightChildren={
49 | <>
50 |
51 |
59 | >
60 | }
61 | />
62 | >
63 | );
64 | };
65 |
66 | export default Streaming;
67 |
--------------------------------------------------------------------------------
/app/streaming/streaming-solution.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import React, { useState, useEffect } from "react";
3 | import PageHeader from "../components/PageHeader";
4 | import PromptBox from "../components/PromptBox";
5 | import ResultStreaming from "../components/ResultStreaming";
6 | import Title from "../components/Title";
7 | import TwoColumnLayout from "app/components/TwoColumnLayout";
8 |
9 | /**
10 | *
11 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
12 | *
13 | */
14 | const Streaming = () => {
15 | const [prompt, setPrompt] = useState("");
16 | const [error, setError] = useState(null);
17 | const [data, setData] = useState("");
18 | const [source, setSource] = useState(null);
19 |
20 | const processToken = (token) => {
21 | return token.replace(/\\n/g, "\n").replace(/\"/g, "");
22 | };
23 |
24 | const handlePromptChange = (e) => {
25 | setPrompt(e.target.value);
26 | };
27 |
28 | const handleSubmit = async () => {
29 | try {
30 | console.log(`sending ${prompt}`);
31 | await fetch("/api/streaming", {
32 | method: "POST",
33 | headers: {
34 | "Content-Type": "application/json",
35 | },
36 | body: JSON.stringify({ input: prompt }),
37 | });
38 | // close existing sources
39 | if (source) {
40 | source.close();
41 | }
42 | // create new eventsource
43 |
44 | const newSource = new EventSource("/api/streaming");
45 |
46 | setSource(newSource);
47 |
48 | newSource.addEventListener("newToken", (event) => {
49 | const token = processToken(event.data);
50 | setData((prevData) => prevData + token);
51 | });
52 |
53 | newSource.addEventListener("end", () => {
54 | newSource.close();
55 | });
56 | } catch (err) {
57 | console.error(err);
58 | setError(error);
59 | }
60 | };
61 |
62 | // Clean up the EventSource on component unmount
63 | useEffect(() => {
64 | // stuff is gonna happen
65 | return () => {
66 | if (source) {
67 | source.close();
68 | }
69 | };
70 | }, [source]);
71 | return (
72 | <>
73 |
74 |
77 |
82 | >
83 | }
84 | rightChildren={
85 | <>
86 |
87 |
95 | >
96 | }
97 | />
98 | >
99 | );
100 | };
101 |
102 | export default Streaming;
103 |
--------------------------------------------------------------------------------
/app/styles/InstrumentSans-VariableFont_wdth,wght.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/app/styles/InstrumentSans-VariableFont_wdth,wght.ttf
--------------------------------------------------------------------------------
/app/styles/burger.css:
--------------------------------------------------------------------------------
1 | .bm-burger-button {
2 |
3 | width: 36px;
4 | height: 30px;
5 | left: 36px;
6 |
7 | top: 36px;
8 | }
9 |
10 | .bm-burger-bars {
11 | background: #373a47;
12 | }
13 |
14 | .bm-burger-bars-hover {
15 | background: #a90000;
16 |
17 | }
18 |
19 | .bm-cross-button {
20 | height: 24px;
21 | width: 24px;
22 | }
23 |
24 | .bm-cross {
25 | background: #bdc3c7;
26 | }
27 |
28 | .bm-menu-wrap {
29 | position: fixed;
30 | height: 100%;
31 | }
32 |
33 | .bm-menu {
34 | background: #373a47;
35 | padding: 0.3rem 0.3rem 0;
36 | font-size: 1.15em;
37 | }
38 |
39 | .bm-morph-shape {
40 | fill: #373a47;
41 | }
42 |
43 | .bm-item-list {
44 |
45 | color: #b8b7ad;
46 | padding: 0.8em;
47 | }
48 |
49 | .bm-item {
50 | display: inline-block;
51 | }
52 |
53 | .bm-overlay {
54 | background: rgba(0, 0, 0, 0.3);
55 | }
56 |
57 | /* styling next link component so that will be a column list of links*/
58 | .bm-item-list a {
59 | display: flex;
60 | flex-direction: column;
61 | }
--------------------------------------------------------------------------------
/app/styles/fonts.js:
--------------------------------------------------------------------------------
1 | import { Press_Start_2P, Source_Code_Pro } from "next/font/google";
2 | import localFont from "next/font/local";
3 |
4 | /**
5 | *
6 | *
7 | * GOOGLE FONTS
8 | *
9 | * Automatically self-host any Google Font. Fonts are included in the deployment and served from the same domain as your deployment. No requests are sent to Google by the browser.
10 | *
11 | * Get started by importing the font you would like to use from next/font/google as a function. We recommend using variable fonts for the best performance and flexibility.
12 | *
13 | * https://nextjs.org/docs/app/building-your-application/optimizing/fonts#google-fonts
14 | *
15 | *
16 | */
17 |
18 | const pressStart2P = Press_Start_2P({ subsets: ["latin"], weight: "400" });
19 | const sourceCodePro = Source_Code_Pro({ subsets: ["latin"], weight: "400" });
20 | const instrumentSans = localFont({
21 | src: "./InstrumentSans-VariableFont_wdth,wght.ttf",
22 | });
23 |
24 | export { pressStart2P, sourceCodePro, instrumentSans };
25 |
--------------------------------------------------------------------------------
/app/video-chat/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import React, { useState } from "react";
4 | import PageHeader from "../components/PageHeader";
5 | import PromptBox from "../components/PromptBox";
6 | import ResultWithSources from "../components/ResultWithSources";
7 | import Title from "../components/Title";
8 | import TwoColumnLayout from "../components/TwoColumnLayout";
9 |
10 | /**
11 | *
12 | * MODULE 4: YOUTUBE CHATBOT:
13 | *
14 | * Start with the UI.. no need to recreate!
15 | *
16 | * */
17 | const VideoChat = () => {
18 | // We'll set a default YouTube video so we don't have to copy and paste this every time
19 | const [prompt, setPrompt] = useState(
20 | "https://www.youtube.com/watch?v=0lJKucu6HJc"
21 | );
22 | const [error, setError] = useState(null);
23 | const [firstMsg, setFirstMsg] = useState(true);
24 |
25 | // And we'll set an initial message as well, to make the UI look a little nicer.
26 | const [messages, setMessages] = useState([
27 | {
28 | text: "Hi there! I'm YT chatbot. Please provide a YouTube video URL and I'll answer any questions you have.",
29 | type: "bot",
30 | },
31 | ]);
32 |
33 | const handlePromptChange = (e) => {
34 | setPrompt(e.target.value);
35 | };
36 |
37 | // The only differences here will be the "URL" for the api call
38 | // And the body will send a prompt as well as a firstMsg, which tells us if its the first message in the chat or not
39 | // Because the first message will tell us to create the YouTube Chat bot
40 | const handleSubmit = async () => {
41 | try {
42 | // Push the user's message into the messages array
43 | setMessages((prevMessages) => [
44 | ...prevMessages,
45 | { text: prompt, type: "user", sourceDocuments: null },
46 | ]);
47 |
48 | const response = await fetch(`/api/video-chat`, {
49 | method: "POST",
50 | headers: {
51 | "Content-Type": "application/json",
52 | },
53 | body: JSON.stringify({ prompt: prompt, firstMsg }),
54 | });
55 |
56 | console.log({ response });
57 | if (!response.ok) {
58 | throw new Error(`HTTP error! status: ${response.status}`);
59 | }
60 |
61 | const searchRes = await response.json();
62 |
63 | // Push the response into the messages array
64 | setMessages((prevMessages) => [
65 | ...prevMessages,
66 | {
67 | text: searchRes.output.text,
68 | type: "bot",
69 | },
70 | ]);
71 |
72 | setPrompt("");
73 | setFirstMsg(false);
74 | setError("");
75 | } catch (err) {
76 | console.error(err);
77 | setError("Error fetching transcript. Please try again.");
78 | }
79 | };
80 |
81 | return (
82 | <>
83 |
84 |
87 |
92 | >
93 | }
94 | rightChildren={
95 | <>
96 |
97 |
108 | >
109 | }
110 | />
111 | >
112 | );
113 | };
114 |
115 | export default VideoChat;
116 |
--------------------------------------------------------------------------------
/data/document_loaders/bitcoin.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/document_loaders/bitcoin.pdf
--------------------------------------------------------------------------------
/data/document_loaders/naval-ravikant-book.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/document_loaders/naval-ravikant-book.pdf
--------------------------------------------------------------------------------
/data/resumes/resume_aubrey_graham.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/resumes/resume_aubrey_graham.pdf
--------------------------------------------------------------------------------
/data/resumes/resume_joanna_smith.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/resumes/resume_joanna_smith.pdf
--------------------------------------------------------------------------------
/data/resumes/resume_kaito_esquivel.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/data/resumes/resume_kaito_esquivel.pdf
--------------------------------------------------------------------------------
/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "paths": {
5 | "@/utils/*": ["utils/*"]
6 | }
7 | },
8 | "exclude": ["node_modules"]
9 | }
10 |
--------------------------------------------------------------------------------
/next.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | // https://js.langchain.com/docs/getting-started/install#vercel--nextjs
3 | // To use LangChain with Next.js (either with app/ or pages/), add the following to your next.config.js to enable support for WebAssembly modules (which is required by the tokenizer library @dqbd/tiktoken):
4 | const nextConfig = {
5 | webpack(config) {
6 | config.experiments = {
7 | asyncWebAssembly: true,
8 | layers: true,
9 | };
10 |
11 | return config;
12 | },
13 | // Add env { API_KEY: process.env.API_KEY}
14 | };
15 |
16 | module.exports = nextConfig;
17 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "yt-script-generator",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint"
10 | },
11 | "dependencies": {
12 | "@pinecone-database/pinecone": "^0.1.5",
13 | "autoprefixer": "10.4.14",
14 | "axios": "^1.4.0",
15 | "cheerio": "^1.0.0-rc.12",
16 | "cors": "^2.8.5",
17 | "debug": "^4.3.4",
18 | "dotenv": "^16.0.3",
19 | "hnswlib-node": "^1.4.2",
20 | "langchain": "^0.0.75",
21 | "next": "13.4.1",
22 | "openai": "^3.2.1",
23 | "pdf-parse": "^1.1.1",
24 | "postcss": "8.4.23",
25 | "react": "18.2.0",
26 | "react-burger-menu": "^3.0.9",
27 | "react-dom": "18.2.0",
28 | "serpapi": "^1.1.1",
29 | "supports-color": "^9.3.1",
30 | "tailwindcss": "3.3.2",
31 | "youtube-transcript": "^1.0.6"
32 | },
33 | "devDependencies": {
34 | "@types/cors": "^2.8.13",
35 | "@types/node": "20.1.0",
36 | "@types/react": "18.2.6",
37 | "express-sse": "^0.5.3",
38 | "typescript": "5.0.4"
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/pages/api/chatcompletions.js:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage } from "langchain/schema";
3 |
4 | // create instance of chatOpenAI
5 |
6 | export default async function handler(req, res) {
7 | if (req.method === "POST") {
8 | // Grab the user prompt
9 | // console.log(process.env.OPENAI_API_KEY);
10 | // console.log(process.env.SERPAPI_API_KEY);
11 |
12 | // Enter your code here
13 |
14 | // Modify output as needed
15 | return res.status(200).json({ result: response });
16 | } else {
17 | res.status(405).json({ message: "Method not allowed" });
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/pages/api/content-generator.js:
--------------------------------------------------------------------------------
1 | // /pages/api/transcript_chat.js
2 | import { YoutubeTranscript } from "youtube-transcript";
3 | import { ChatOpenAI } from "langchain/chat_models/openai";
4 | import { LLMChain } from "langchain/chains";
5 | import {
6 | ChatPromptTemplate,
7 | HumanMessagePromptTemplate,
8 | SystemMessagePromptTemplate,
9 | } from "langchain/prompts";
10 | import extractVideoId from "../../utils/extractVideoId";
11 | import getVideoMetaData from "../../utils/getVideoMetaData";
12 | import ResearchAgent from "../../agents/ResearchAgent";
13 |
14 | // Global Variables
15 |
16 | // Initialize Chain with Data
17 | const initChain = async (transcript, metadataString, research, topic) => {
18 | try {
19 | // do stuff
20 |
21 | return response;
22 | } catch (error) {
23 | console.error(
24 | `An error occurred during the initialization of the Chat Prompt: ${error.message}`
25 | );
26 | throw error; // rethrow the error to let the calling function know that an error occurred
27 | }
28 | };
29 |
30 | export default async function handler(req, res) {
31 | const { prompt, topic, firstMsg } = req.body;
32 | console.log(`Prompt: ${prompt} Topic: ${topic}`);
33 |
34 | if (
35 | chain === undefined &&
36 | !prompt.includes("https://www.youtube.com/watch?v=")
37 | ) {
38 | return res.status(400).json({
39 | error:
40 | "Chain not initialized. Please send a YouTube URL to initialize the chain.",
41 | });
42 | }
43 |
44 | chatHistory.push({
45 | role: "user",
46 | content: prompt,
47 | });
48 |
49 | // Just like in the previous section, if we have a firstMsg set to true, we need to initialize with chain with the context
50 | if (firstMsg) {
51 | console.log("Received URL");
52 | try {
53 | // Initialize chain with transcript, metadata, research, and topic
54 |
55 | // return res.status(200).json({ output: research });
56 | return res.status(200).json({
57 | output: response,
58 | chatHistory,
59 | transcript,
60 | metadata,
61 | research,
62 | });
63 | } catch (err) {
64 | console.error(err);
65 | return res
66 | .status(500)
67 | .json({ error: "An error occurred while fetching transcript" });
68 | }
69 | } else {
70 | // Very similar to previous section, don't worry too much about this just copy and paste it from the previous section!
71 | console.log("Received question");
72 | try {
73 | // do stuff
74 |
75 | // just make sure to modify this response as necessary.
76 | return res.status(200).json({
77 | output: response,
78 | metadata: metadataString,
79 | transcript,
80 | chatHistory,
81 | });
82 | } catch (error) {
83 | console.error(error);
84 | res
85 | .status(500)
86 | .json({ error: "An error occurred during the conversation." });
87 | }
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/pages/api/memory.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/pages/api/memory.js
--------------------------------------------------------------------------------
/pages/api/pdf-query.js:
--------------------------------------------------------------------------------
1 | import { PineconeClient } from "@pinecone-database/pinecone";
2 | import { VectorDBQAChain } from "langchain/chains";
3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
4 | import { OpenAI } from "langchain/llms/openai";
5 | import { PineconeStore } from "langchain/vectorstores/pinecone";
6 |
7 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf
8 | export default async function handler(req, res) {
9 | try {
10 | if (req.method !== "POST") {
11 | throw new Error("Method not allowed");
12 | }
13 |
14 | console.log("Query PDF");
15 |
16 | // Grab the user prompt
17 | const { input } = req.body;
18 |
19 | if (!input) {
20 | throw new Error("No input");
21 | }
22 |
23 | console.log("input received:", input);
24 |
25 | /* Use as part of a chain (currently no metadata filters) */
26 |
27 | // Initialize Pinecone
28 |
29 | // Search!
30 |
31 | return res.status(200).json({ result: response });
32 | } catch (error) {
33 | console.error(error);
34 | res.status(500).json({ message: error.message });
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/pages/api/pdf-upload.js:
--------------------------------------------------------------------------------
1 | import { PDFLoader } from "langchain/document_loaders/fs/pdf";
2 | import { PineconeClient } from "@pinecone-database/pinecone";
3 | import { Document } from "langchain/document";
4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
5 | import { PineconeStore } from "langchain/vectorstores/pinecone";
6 | import { CharacterTextSplitter } from "langchain/text_splitter";
7 |
8 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf
9 | export default async function handler(req, res) {
10 | if (req.method === "GET") {
11 | console.log("Inside the PDF handler");
12 | // Enter your code here
13 | /** STEP ONE: LOAD DOCUMENT */
14 |
15 | // Chunk it
16 |
17 | // Reduce the size of the metadata
18 |
19 | /** STEP TWO: UPLOAD TO DATABASE */
20 |
21 | // upload documents to Pinecone
22 | return res.status(200).json({ result: docs });
23 | } else {
24 | res.status(405).json({ message: "Method not allowed" });
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/pages/api/resume-query-metadata.js:
--------------------------------------------------------------------------------
1 | /**
2 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database.
3 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory
4 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization
5 | * Dependencies: npm install pdf-parse
6 | */
7 |
8 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
9 | import { PineconeStore } from "langchain/vectorstores/pinecone";
10 | import { PineconeClient } from "@pinecone-database/pinecone";
11 | import { OpenAI } from "langchain/llms/openai";
12 | import { VectorDBQAChain } from "langchain/chains";
13 | import { PromptTemplate } from "langchain/prompts";
14 |
15 | export default async function handler(req, res) {
16 | try {
17 | // do stuff
18 |
19 | return res.status(200).json({
20 | output: response.text,
21 | sourceDocuments: response.sourceDocuments,
22 | });
23 | } catch (err) {
24 | console.error(err);
25 | return res.status(500).json({ error: "Error" });
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/pages/api/resume-upload.js:
--------------------------------------------------------------------------------
1 | // /pages/api/resume_upload.js
2 | // Import dependencies
3 |
4 | /**
5 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database.
6 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory
7 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization
8 | * Dependencies: npm install pdf-parse
9 | */
10 |
11 | import { DirectoryLoader } from "langchain/document_loaders/fs/directory";
12 | import { PDFLoader } from "langchain/document_loaders/fs/pdf";
13 | import { CharacterTextSplitter } from "langchain/text_splitter";
14 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
15 | import { PineconeStore } from "langchain/vectorstores/pinecone";
16 | import { PineconeClient } from "@pinecone-database/pinecone";
17 | import { loadSummarizationChain } from "langchain/chains";
18 | import { OpenAI } from "langchain/llms/openai";
19 |
20 | export default async function handler(req, res) {
21 | // Grab the prompt from the url (?prompt=[value])
22 | // console.log(process.env.PINECONE_API_KEY);
23 | // console.log(process.env.PINECONE_ENVIRONMENT);
24 | // console.log(process.env.PINECONE_INDEX);
25 | // Always use a try catch block to do asynchronous requests and catch any errors
26 | try {
27 | // do stuff
28 | } catch (err) {
29 | // If we have an error
30 |
31 | console.error(err);
32 | return res.status(500).json({ error: err });
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/pages/api/solutions/chatcompletions-soln.js:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage, SystemChatMessage } from "langchain/schema";
3 |
4 | /**
5 | *
6 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
7 | *
8 | */
9 |
10 | const chat = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" });
11 |
12 | export default async function handler(req, res) {
13 | if (req.method === "POST") {
14 | // Grab the user prompt
15 | const { input } = req.body;
16 |
17 | if (!input) {
18 | throw new Error("No input");
19 | }
20 |
21 | // Enter your code here
22 | const response = await chat.call([
23 | new HumanChatMessage(`How do I write a for loop in ${input}?`),
24 | ]);
25 |
26 | console.log(response);
27 |
28 | // Modify output as needed
29 | return res.status(200).json({ result: response });
30 | } else {
31 | res.status(405).json({ message: "Method not allowed" });
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/pages/api/solutions/content-generator-soln.js:
--------------------------------------------------------------------------------
1 | // /pages/api/transcript_chat.js
2 |
3 | import { YoutubeTranscript } from "youtube-transcript";
4 | import extractVideoId from "../../utils/extractVideoId";
5 | import getVideoMetaData from "../../utils/getVideoMetaData";
6 | import { ChatOpenAI } from "langchain/chat_models/openai";
7 | import { LLMChain } from "langchain/chains";
8 | import ResearchAgent from "../../agents/ResearchAgent";
9 | import {
10 | ChatPromptTemplate,
11 | HumanMessagePromptTemplate,
12 | SystemMessagePromptTemplate,
13 | } from "langchain/prompts";
14 |
15 | /**
16 | *
17 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
18 | *
19 | */
20 |
21 | // Global Variables
22 | let chain;
23 | let chatHistory = [];
24 | let transcript = "";
25 | let metadataString = "";
26 | let research;
27 |
28 | // Initialize Chain with Data
29 | const initChain = async (transcript, metadataString, research, topic) => {
30 | try {
31 | // For chat models, we provide a `ChatPromptTemplate` class that can be used to format chat prompts.
32 | const llm = new ChatOpenAI({
33 | temperature: 0.7,
34 | modelName: "gpt-3.5-turbo",
35 | });
36 |
37 | console.log(`Initializing Chat Prompt`);
38 |
39 | // For chat models, we provide a `ChatPromptTemplate` class that can be used to format chat prompts.
40 | // This allows us to set the template that the bot sees every time
41 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([
42 | SystemMessagePromptTemplate.fromTemplate(
43 | "You are a helpful social media assistant that provides research, new content, and advice to me. \n You are given the transcript of the video: {transcript} \n and video metadata: {metadata} as well as additional research: {research}"
44 | ),
45 | HumanMessagePromptTemplate.fromTemplate(
46 | "{input}. Remember to use the video transcript and research as reference."
47 | ),
48 | ]);
49 |
50 | const question = `Write me a script for a new video that provides commentary on this video in a lighthearted, joking manner. It should compliment ${topic} with puns.`;
51 | console.log(question);
52 |
53 | chain = new LLMChain({
54 | prompt: chatPrompt,
55 | llm: llm,
56 | // memory,
57 | });
58 |
59 | const response = await chain.call({
60 | transcript,
61 | metadata: metadataString,
62 | research,
63 | input: question,
64 | });
65 |
66 | console.log({ response });
67 |
68 | chatHistory.push({
69 | role: "assistant",
70 | content: response.text,
71 | });
72 |
73 | return response;
74 | } catch (error) {
75 | console.error(
76 | `An error occurred during the initialization of the Chat Prompt: ${error.message}`
77 | );
78 | throw error; // rethrow the error to let the calling function know that an error occurred
79 | }
80 | };
81 |
82 | export default async function handler(req, res) {
83 | const { prompt, topic, firstMsg } = req.body;
84 | console.log(`Prompt: ${prompt} Topic: ${topic}`);
85 |
86 | if (
87 | chain === undefined &&
88 | !prompt.includes("https://www.youtube.com/watch?v=")
89 | ) {
90 | return res.status(400).json({
91 | error:
92 | "Chain not initialized. Please send a YouTube URL to initialize the chain.",
93 | });
94 | }
95 |
96 | chatHistory.push({
97 | role: "user",
98 | content: prompt,
99 | });
100 |
101 | // Just like in the previous section, if we have a firstMsg set to true, we need to initialize with chain with the context
102 | if (firstMsg) {
103 | console.log("Received URL");
104 | try {
105 | const videoId = extractVideoId(prompt);
106 | // API call for video transcript (same as last video, but we just grab the array and flatten it into a variable)[{text:" "},{ text: ""}]
107 | const transcriptResponse = await YoutubeTranscript.fetchTranscript(
108 | prompt
109 | );
110 | transcriptResponse.forEach((line) => {
111 | transcript += line.text;
112 | });
113 | // Some error handling
114 | if (!transcriptResponse) {
115 | return res.status(400).json({ error: "Failed to get transcript" });
116 | }
117 |
118 | // API call for video metadata –– go to VideoMetaData and explain this
119 | const metadata = await getVideoMetaData(videoId);
120 |
121 | // JSON object { [], [], [] } , null (no characters between), and use 2 spaces for indentation
122 | metadataString = JSON.stringify(metadata, null, 2);
123 | console.log({ metadataString });
124 |
125 | // ResearchAgent
126 | research = await ResearchAgent(topic);
127 |
128 | console.log({ research });
129 |
130 | // Alright, finally we have all the context and we can initialize the chain!
131 | const response = await initChain(
132 | transcript,
133 | metadataString,
134 | research,
135 | topic
136 | );
137 |
138 | // return res.status(200).json({ output: research });
139 | return res.status(200).json({
140 | output: response,
141 | chatHistory,
142 | transcript,
143 | metadata,
144 | research,
145 | });
146 | } catch (err) {
147 | console.error(err);
148 | return res
149 | .status(500)
150 | .json({ error: "An error occurred while fetching transcript" });
151 | }
152 | } else {
153 | // Very similar to previous section, don't worry too much about this just copy and paste it from the previous section!
154 | console.log("Received question");
155 | try {
156 | const question = prompt;
157 |
158 | console.log("Asking:", question);
159 | console.log("Using old chain:", chain);
160 | // Everytime we call the chain we need to pass all the context back so that it can fill in the prompt template appropriately
161 | const response = await chain.call({
162 | transcript,
163 | metadata: metadataString,
164 | research,
165 | input: question,
166 | });
167 |
168 | // update chat history
169 | chatHistory.push({
170 | role: "assistant",
171 | content: response.text,
172 | });
173 | // just make sure to modify this response as necessary.
174 | return res.status(200).json({
175 | output: response,
176 | metadata: metadataString,
177 | transcript,
178 | chatHistory,
179 | });
180 | } catch (error) {
181 | console.error(error);
182 | res
183 | .status(500)
184 | .json({ error: "An error occurred during the conversation." });
185 | }
186 | }
187 | }
188 |
--------------------------------------------------------------------------------
/pages/api/solutions/memory-soln.js:
--------------------------------------------------------------------------------
1 | // Solution
2 | import { OpenAI } from "langchain/llms/openai";
3 | import { BufferMemory } from "langchain/memory";
4 | import { ConversationChain } from "langchain/chains";
5 |
6 | /**
7 | *
8 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
9 | *
10 | */
11 |
12 | let model;
13 | let memory;
14 | let chain;
15 | export default async function handler(req, res) {
16 | if (req.method === "POST") {
17 | const { input, firstMsg } = req.body;
18 |
19 | if (!input) {
20 | throw new Error("No input!");
21 | }
22 |
23 | if (firstMsg) {
24 | console.log("initializing chain");
25 | model = new OpenAI({ modelName: "gpt-3.5-turbo" });
26 | memory = new BufferMemory();
27 | chain = new ConversationChain({ llm: model, memory: memory });
28 | }
29 |
30 | console.log({ input });
31 | const response = await chain.call({ input });
32 | console.log({ response });
33 | return res.status(200).json({ output: response });
34 | } else {
35 | res.status(405).json({ message: "Only POST is allowed" });
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/pages/api/solutions/pdf-query-namespace-soln.js:
--------------------------------------------------------------------------------
1 | import { PineconeClient } from '@pinecone-database/pinecone'
2 | import { VectorDBQAChain } from 'langchain/chains'
3 | import { OpenAIEmbeddings } from 'langchain/embeddings/openai'
4 | import { OpenAI } from 'langchain/llms/openai'
5 | import { PineconeStore } from 'langchain/vectorstores/pinecone'
6 |
7 | export default async function handler(req, res) {
8 | try {
9 | if (req.method !== 'POST') {
10 | throw new Error('Method not allowed')
11 | }
12 |
13 | console.log('Query PDF')
14 |
15 | // Grab the user prompt
16 | const { input, bookId } = req.body
17 |
18 | if (!input) {
19 | throw new Error('No input')
20 | }
21 |
22 | console.log('input received:', input)
23 |
24 | const client = new PineconeClient()
25 | await client.init({
26 | apiKey: process.env.PINECONE_API_KEY,
27 | environment: process.env.PINECONE_ENVIRONMENT,
28 | })
29 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX)
30 |
31 | const vectorStore = await PineconeStore.fromExistingIndex(new OpenAIEmbeddings(), {
32 | pineconeIndex,
33 | namespace: bookId.toString(),
34 | })
35 |
36 | /* Part Two: Use as part of a chain (currently no metadata filters) */
37 |
38 | const model = new OpenAI()
39 | const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
40 | k: 1,
41 | returnSourceDocuments: true,
42 | })
43 | const response = await chain.call({ query: input })
44 |
45 | console.log(response)
46 |
47 | return res.status(200).json({ result: response })
48 | } catch (error) {
49 | console.error(error)
50 | res.status(500).json({ message: error.message })
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/pages/api/solutions/pdf-query-soln.js:
--------------------------------------------------------------------------------
1 | import { PineconeClient } from "@pinecone-database/pinecone";
2 | import { VectorDBQAChain } from "langchain/chains";
3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
4 | import { OpenAI } from "langchain/llms/openai";
5 | import { PineconeStore } from "langchain/vectorstores/pinecone";
6 | /**
7 | *
8 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
9 | *
10 | */
11 |
12 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf
13 | export default async function handler(req, res) {
14 | try {
15 | if (req.method !== "POST") {
16 | throw new Error("Method not allowed");
17 | }
18 |
19 | console.log("Query PDF");
20 |
21 | // Grab the user prompt
22 | const { input } = req.body;
23 |
24 | if (!input) {
25 | throw new Error("No input");
26 | }
27 |
28 | console.log("input received:", input);
29 |
30 | const client = new PineconeClient();
31 | await client.init({
32 | apiKey: process.env.PINECONE_API_KEY,
33 | environment: process.env.PINECONE_ENVIRONMENT,
34 | });
35 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX);
36 |
37 | const vectorStore = await PineconeStore.fromExistingIndex(
38 | new OpenAIEmbeddings(),
39 | { pineconeIndex }
40 | );
41 |
42 | /* Part Two: Use as part of a chain (currently no metadata filters) */
43 |
44 | const model = new OpenAI();
45 | const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
46 | k: 1,
47 | returnSourceDocuments: true,
48 | });
49 | const response = await chain.call({ query: input });
50 |
51 | console.log(response);
52 |
53 | return res.status(200).json({ result: response });
54 | } catch (error) {
55 | console.error(error);
56 | res.status(500).json({ message: error.message });
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/pages/api/solutions/pdf-upload-namespace-soln.js:
--------------------------------------------------------------------------------
1 | import { PDFLoader } from 'langchain/document_loaders/fs/pdf'
2 | import { PineconeClient } from '@pinecone-database/pinecone'
3 | import { Document } from 'langchain/document'
4 | import { OpenAIEmbeddings } from 'langchain/embeddings/openai'
5 | import { PineconeStore } from 'langchain/vectorstores/pinecone'
6 | import { CharacterTextSplitter } from 'langchain/text_splitter'
7 |
8 | export default async function handler(req, res) {
9 | if (req.method === 'GET') {
10 | console.log('Uploading book')
11 | /** STEP ONE: LOAD DOCUMENT */
12 | const { bookId } = req.query
13 | const bookDb = {
14 | 101: 'c:/bitcoin.pdf',
15 | 102: 'c:/naval.pdf',
16 | }
17 | const bookPath = bookDb[bookId]
18 | const loader = new PDFLoader(bookPath)
19 |
20 | const docs = await loader.load()
21 |
22 | if (docs.length === 0) {
23 | console.log('No documents found.')
24 | return
25 | }
26 |
27 | const splitter = new CharacterTextSplitter({
28 | separator: ' ',
29 | chunkSize: 250,
30 | chunkOverlap: 10,
31 | })
32 |
33 | const splitDocs = await splitter.splitDocuments(docs)
34 |
35 | // Reduce the size of the metadata for each document -- lots of useless pdf information
36 | const reducedDocs = splitDocs.map((doc) => {
37 | const reducedMetadata = { ...doc.metadata }
38 | delete reducedMetadata.pdf // Remove the 'pdf' field
39 | return new Document({
40 | pageContent: doc.pageContent,
41 | metadata: reducedMetadata,
42 | })
43 | })
44 |
45 | /** STEP TWO: UPLOAD TO DATABASE */
46 |
47 | const client = new PineconeClient()
48 |
49 | await client.init({
50 | apiKey: process.env.PINECONE_API_KEY,
51 | environment: process.env.PINECONE_ENVIRONMENT,
52 | })
53 |
54 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX)
55 |
56 | await PineconeStore.fromDocuments(reducedDocs, new OpenAIEmbeddings(), {
57 | pineconeIndex,
58 | namespace: bookId.toString(),
59 | })
60 |
61 | console.log('Successfully uploaded to DB')
62 | // Modify output as needed
63 | return res.status(200).json({
64 | result: `Uploaded to Pinecone! Before splitting: ${docs.length}, After splitting: ${splitDocs.length}`,
65 | })
66 | } else {
67 | res.status(405).json({ message: 'Method not allowed' })
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/pages/api/solutions/pdf-upload-soln.js:
--------------------------------------------------------------------------------
1 | import { PDFLoader } from "langchain/document_loaders/fs/pdf";
2 | import { PineconeClient } from "@pinecone-database/pinecone";
3 | import { Document } from "langchain/document";
4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
5 | import { PineconeStore } from "langchain/vectorstores/pinecone";
6 | import { CharacterTextSplitter } from "langchain/text_splitter";
7 |
8 | /**
9 | *
10 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
11 | *
12 | */
13 |
14 | // Example: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/pdf
15 |
16 | /**
17 | *
18 | * INSTRUCTIONS
19 | * 1. Run with book
20 | error {
21 | name: 'PineconeError',
22 | source: 'server',
23 | message: 'PineconeClient: Error calling upsert: PineconeError: metadata size is 140052 bytes, which exceeds the limit of 40960 bytes per vector',
24 | stack: ''
25 | }
26 | * 2. Explain why -- vector meta data sizes is too big.
27 | Language Models are often limited by the amount of text that you can pass to them. Therefore, it is neccessary to split them up into smaller chunks. LangChain provides several utilities for doing so.
28 | https://js.langchain.com/docs/modules/indexes/text_splitters/
29 |
30 | Play with chunk sizes... too small and you can't understand.
31 | Fine tune this to your liking.
32 | More vectors = more $$
33 |
34 |
35 | 3. Pinecone size 1536
36 | https://platform.openai.com/docs/guides/embeddings/second-generation-models
37 |
38 | 4. Upsert metadata size -- add this after split Docs
39 |
40 | // Reduce the size of the metadata for each document
41 | const reducedDocs = splitDocs.map(doc => {
42 | const reducedMetadata = { ...doc.metadata };
43 | delete reducedMetadata.pdf; // Remove the 'pdf' field
44 | return new Document({
45 | pageContent: doc.pageContent,
46 | metadata: reducedMetadata,
47 | });
48 | });
49 |
50 |
51 |
52 |
53 |
54 | * */
55 |
56 | export default async function handler(req, res) {
57 | if (req.method === "GET") {
58 | console.log("Uploading book");
59 | // Enter your code here
60 | /** STEP ONE: LOAD DOCUMENT */
61 | const bookPath =
62 | "/Users/shawnesquivel/GitHub/yt-script-generator/data/document_loaders/naval-ravikant-book.pdf";
63 | const loader = new PDFLoader(bookPath);
64 |
65 | const docs = await loader.load();
66 |
67 | if (docs.length === 0) {
68 | console.log("No documents found.");
69 | return;
70 | }
71 |
72 | const splitter = new CharacterTextSplitter({
73 | separator: " ",
74 | chunkSize: 250,
75 | chunkOverlap: 10,
76 | });
77 |
78 | const splitDocs = await splitter.splitDocuments(docs);
79 |
80 | // Reduce the size of the metadata for each document -- lots of useless pdf information
81 | const reducedDocs = splitDocs.map((doc) => {
82 | const reducedMetadata = { ...doc.metadata };
83 | delete reducedMetadata.pdf; // Remove the 'pdf' field
84 | return new Document({
85 | pageContent: doc.pageContent,
86 | metadata: reducedMetadata,
87 | });
88 | });
89 |
90 | // docs.forEach((doc) => {
91 | // console.log(doc);
92 | // });
93 |
94 | // console.log(`Uploading documents to Pinecone: ${docs}`);
95 |
96 | console.log(docs[100]);
97 | console.log(splitDocs[100].metadata);
98 | console.log(reducedDocs[100].metadata);
99 |
100 | /** STEP TWO: UPLOAD TO DATABASE */
101 |
102 | const client = new PineconeClient();
103 |
104 | await client.init({
105 | apiKey: process.env.PINECONE_API_KEY,
106 | environment: process.env.PINECONE_ENVIRONMENT,
107 | });
108 |
109 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX);
110 |
111 | await PineconeStore.fromDocuments(reducedDocs, new OpenAIEmbeddings(), {
112 | pineconeIndex,
113 | });
114 |
115 | console.log("Successfully uploaded to DB");
116 | // Modify output as needed
117 | return res.status(200).json({
118 | result: `Uploaded to Pinecone! Before splitting: ${docs.length}, After splitting: ${splitDocs.length}`,
119 | });
120 | } else {
121 | res.status(405).json({ message: "Method not allowed" });
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/pages/api/solutions/resume-query-metadata-soln.js:
--------------------------------------------------------------------------------
1 | /**
2 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database.
3 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory
4 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization
5 | * Dependencies: npm install pdf-parse
6 | */
7 |
8 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
9 | import { PineconeStore } from "langchain/vectorstores/pinecone";
10 | import { PineconeClient } from "@pinecone-database/pinecone";
11 | import { OpenAI } from "langchain/llms/openai";
12 | import { VectorDBQAChain } from "langchain/chains";
13 | import { PromptTemplate } from "langchain/prompts";
14 |
15 | /**
16 | *
17 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
18 | *
19 | */
20 |
21 | export default async function handler(req, res) {
22 | try {
23 | // do stuff
24 | const { prompt } = req.body;
25 |
26 | /** Load vector database */
27 | const client = new PineconeClient();
28 | await client.init({
29 | apiKey: process.env.PINECONE_API_KEY,
30 | environment: process.env.PINECONE_ENVIRONMENT,
31 | });
32 |
33 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX);
34 |
35 | const vectorStore = await PineconeStore.fromExistingIndex(
36 | new OpenAIEmbeddings(),
37 | { pineconeIndex }
38 | );
39 |
40 | // Create Vector DBQA CHain
41 | const model = new OpenAI();
42 | const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
43 | k: 1,
44 | returnSourceDocuments: true,
45 | });
46 |
47 | // Prompt Template
48 | const promptTemplate = new PromptTemplate({
49 | template: `Assume you are a Human Resources Director. According to the resumes, answer this question: {question}`,
50 | inputVariables: ["question"],
51 | });
52 |
53 | const formattedPrompt = await promptTemplate.format({
54 | question: prompt,
55 | });
56 |
57 | // console.log({ formattedPrompt });
58 |
59 | const response = await chain.call({
60 | query: formattedPrompt,
61 | });
62 |
63 | console.log({ response });
64 |
65 | return res.status(200).json({
66 | // String
67 | output: response.text,
68 | // [Document, Document]
69 | sourceDocuments: response.sourceDocuments,
70 | });
71 | } catch (err) {
72 | console.error(err);
73 | return res.status(500).json({ error: "Error" });
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/pages/api/solutions/resume-upload-soln.js:
--------------------------------------------------------------------------------
1 | // /pages/api/resume_upload.js
2 | // Import dependencies
3 |
4 | /**
5 | * This endpoint is used to load the resumes into the chain, then upload them to the Pinecone database.
6 | * Tutorial: https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/directory
7 | * Summarization: https://js.langchain.com/docs/modules/chains/other_chains/summarization
8 | * Dependencies: npm install pdf-parse
9 | */
10 |
11 | import { DirectoryLoader } from "langchain/document_loaders/fs/directory";
12 | import { PDFLoader } from "langchain/document_loaders/fs/pdf";
13 | import { CharacterTextSplitter } from "langchain/text_splitter";
14 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
15 | import { PineconeStore } from "langchain/vectorstores/pinecone";
16 | import { PineconeClient } from "@pinecone-database/pinecone";
17 | import { loadSummarizationChain } from "langchain/chains";
18 | import { OpenAI } from "langchain/llms/openai";
19 |
20 | /**
21 | *
22 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
23 | *
24 | */
25 |
26 | export default async function handler(req, res) {
27 | // Grab the prompt from the url (?prompt=[value])
28 | // console.log(process.env.PINECONE_API_KEY);
29 | // console.log(process.env.PINECONE_ENVIRONMENT);
30 | // console.log(process.env.PINECONE_INDEX);
31 | // Always use a try catch block to do asynchronous requests and catch any errors
32 | try {
33 | // Load the directory
34 | const loader = new DirectoryLoader(
35 | "/Users/shawnesquivel/Desktop/openai-javascript-course/data/resumes",
36 | {
37 | ".pdf": (path) => new PDFLoader(path, "/pdf"),
38 | }
39 | );
40 |
41 | const docs = await loader.load();
42 | // 3
43 | // console.log(`Loaded ${docs.length}`);
44 |
45 | // Split the documents with their metadata
46 | const splitter = new CharacterTextSplitter({
47 | separator: " ",
48 | chunkSize: 200,
49 | chunkOverlap: 20,
50 | });
51 |
52 | //
53 | const splitDocs = await splitter.splitDocuments(docs);
54 |
55 | // console.log(`Split Docs: ${splitDocs.length}`);
56 |
57 | // console.log(docs[0]);
58 | // console.log(splitDocs[0]);
59 |
60 | // reduce the metadata and make it more searchable
61 | const reducedDocs = splitDocs.map((doc) => {
62 | // ["Users", "shawnesquivel", ... "resume_aubrey_graham.pdf"]
63 | const fileName = doc.metadata.source.split("/").pop();
64 | // ["resume", "aubrey", "graham.pdf"]
65 | const [_, firstName, lastName] = fileName.split("_");
66 |
67 | return {
68 | ...doc,
69 | metadata: {
70 | first_name: firstName,
71 | last_name: lastName.slice(0, -4),
72 | docType: "resume",
73 | },
74 | };
75 | });
76 |
77 | // console.log(reducedDocs[4]);
78 | let summaries = [];
79 | const model = new OpenAI({ temperature: 0 });
80 | const summarizeAllChain = loadSummarizationChain(model, {
81 | type: "map_reduce",
82 | });
83 |
84 | // raw documents
85 | const summarizeRes = await summarizeAllChain.call({
86 | input_documents: docs,
87 | });
88 | summaries.push({ summary: summarizeRes.text });
89 |
90 | /** Summarize each candidate */
91 | for (let doc of docs) {
92 | const summarizeOneChain = loadSummarizationChain(model, {
93 | type: "map_reduce",
94 | });
95 | const summarizeOneRes = await summarizeOneChain.call({
96 | input_documents: [doc],
97 | });
98 |
99 | console.log({ summarizeOneRes });
100 | summaries.push({ summary: summarizeOneRes.text });
101 | }
102 |
103 | /** Upload the reducedDocs */
104 | const client = new PineconeClient();
105 | await client.init({
106 | apiKey: process.env.PINECONE_API_KEY,
107 | environment: process.env.PINECONE_ENVIRONMENT,
108 | });
109 |
110 | const pineconeIndex = client.Index(process.env.PINECONE_INDEX);
111 |
112 | await PineconeStore.fromDocuments(reducedDocs, new OpenAIEmbeddings(), {
113 | pineconeIndex,
114 | });
115 |
116 | console.log("Uploaded to Pinecone");
117 |
118 | console.log({ summaries });
119 | // [{summary: 'gdajkljgadkl'}, {summary: 'gdjaklgkadl'}]
120 | const summaryStr = JSON.stringify(summaries, null, 2);
121 |
122 | return res.status(200).json({ output: summaryStr });
123 | } catch (err) {
124 | // If we have an error
125 |
126 | console.error(err);
127 | return res.status(500).json({ error: err });
128 | }
129 | }
130 |
--------------------------------------------------------------------------------
/pages/api/solutions/streaming-soln.js:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import SSE from "express-sse";
3 |
4 | /**
5 | *
6 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
7 | *
8 | */
9 |
10 | const sse = new SSE();
11 |
12 | export default function handler(req, res) {
13 | if (req.method === "POST") {
14 | const { input } = req.body;
15 |
16 | if (!input) {
17 | throw new Error("No input");
18 | }
19 | // Initialize model
20 | const chat = new OpenAI({
21 | streaming: true,
22 | callbacks: [
23 | {
24 | handleLLMNewToken(token) {
25 | sse.send(token, "newToken");
26 | },
27 | },
28 | ],
29 | });
30 |
31 | // create the prompt
32 | const prompt = `Create me a short rap about my name and city. Make it funny and punny. Name: ${input}`;
33 |
34 | console.log({ prompt });
35 | // call frontend to backend
36 | chat.call(prompt).then(() => {
37 | sse.send(null, "end");
38 | });
39 |
40 | return res.status(200).json({ result: "Streaming complete" });
41 | } else if (req.method === "GET") {
42 | sse.init(req, res);
43 | } else {
44 | res.status(405).json({ message: "Method not allowed" });
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/pages/api/solutions/video-chat-soln.js:
--------------------------------------------------------------------------------
1 | // /pages/api/transcript.js
2 | import { YoutubeTranscript } from "youtube-transcript";
3 | import { ChatOpenAI } from "langchain/chat_models/openai";
4 | import { ConversationalRetrievalQAChain } from "langchain/chains";
5 | import { HNSWLib } from "langchain/vectorstores/hnswlib";
6 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
7 | import { CharacterTextSplitter } from "langchain/text_splitter";
8 |
9 | /**
10 | *
11 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
12 | *
13 | */
14 |
15 | // First, we'll initialize the chain and the chat history so that they can be preserved on multiple calls to the API
16 | let chain;
17 | // Remember, the chat history is where we store each human/chatbot message.
18 | let chatHistory = [];
19 |
20 | // DO THIS SECOND
21 | const initializeChain = async (initialPrompt, transcript) => {
22 | try {
23 | // Initialize model with GPT-3.5
24 | const model = new ChatOpenAI({
25 | temperature: 0.8,
26 | modelName: "gpt-3.5-turbo",
27 | });
28 | // Create a text splitter, we use a smaller chunk size and chunk overlap since we are working with small sentences
29 | const splitter = new CharacterTextSplitter({
30 | separator: " ",
31 | chunkSize: 7,
32 | chunkOverlap: 3,
33 | });
34 |
35 | // Using the splitter, we create documents from a bigger document, in this case the YouTube Transcript
36 | const docs = await splitter.createDocuments([transcript]);
37 |
38 | console.log(`Loading data ${docs[0]}`);
39 |
40 | // Upload chunks to database as documents
41 | // We'll be using HNSWLib for this one.
42 | // The nice thing about this one is that we don't need to create any accounts or get any API keys besides our OpenAI key to use this library
43 | // So I find that it's nice for doing some quick prototyping.
44 | // But the downside is that you don't get the nice dashboard like we had in Pinecone.
45 | const vectorStore = await HNSWLib.fromDocuments(
46 | [{ pageContent: transcript }],
47 | new OpenAIEmbeddings()
48 | );
49 |
50 | // Just to show you, we'll also save the vector store as a file in case you want to retrieve it later.
51 | // We'll copy our root directory and save it as a variable
52 | const directory = "/Users/shawnesquivel/GitHub/yt-script-generator/";
53 | await vectorStore.save(directory);
54 | // it will create some files for us, including a way for us to view the vector store documents which is helpful.
55 | // then you can access it like this:
56 | const loadedVectorStore = await HNSWLib.load(
57 | directory,
58 | new OpenAIEmbeddings()
59 | );
60 |
61 | // The ConversationalRetrievalQA chain builds on RetrievalQAChain to provide a chat history component.
62 |
63 | // To create one, you will need a retriever. In the below example, we will create one from a vectorstore, which can be created from embeddings.
64 |
65 | // Remember we can use the loadedVectorStore or the vectorStore, in case for example you want to scale this application up and use the same vector store to store multiple Youtube transcripts.
66 | chain = ConversationalRetrievalQAChain.fromLLM(
67 | model,
68 | vectorStore.asRetriever(),
69 | { verbose: true } // Add verbose option here
70 | );
71 |
72 | // It requires two inputs: a question and the chat history. It first combines the chat history and the question into a standalone question, then looks up relevant documents from the retriever, and then passes those documents and the question to a question answering chain to return a response.
73 | const response = await chain.call({
74 | question: initialPrompt,
75 | chat_history: chatHistory,
76 | });
77 |
78 | // Update history
79 | chatHistory.push({
80 | role: "assistant",
81 | content: response.text,
82 | });
83 |
84 | console.log({ chatHistory });
85 | return response;
86 | } catch (error) {
87 | console.error(error);
88 | }
89 | };
90 |
91 | export default async function handler(req, res) {
92 | if (req.method === "POST") {
93 | // DO THIS FIRST
94 | // First we'll destructure the prompt and firstMsg from the POST request body
95 | const { prompt } = req.body;
96 | const { firstMsg } = req.body;
97 |
98 | // Then if it's the first message, we want to initialize the chain, since it doesn't exist yet
99 | if (firstMsg) {
100 | console.log("Initializing chain");
101 |
102 | try {
103 | // So first of all, we want to give it our human message, which was to ask for a summary of the YouTube URL
104 | const initialPrompt = `Give me a summary of the transcript: ${prompt}`;
105 |
106 | chatHistory.push({
107 | role: "user",
108 | content: initialPrompt,
109 | });
110 |
111 | // Here, we'll use a generic YouTube Transcript API to get the transcript of a youtube video
112 | // As you can see, the Transcript takes videoId/videoURL has the first argument to the function
113 | const transcriptResponse = await YoutubeTranscript.fetchTranscript(
114 | prompt
115 | );
116 |
117 | // and we'll just add some error handling in case the API fails
118 | if (!transcriptResponse) {
119 | return res.status(400).json({ error: "Failed to get transcript" });
120 | }
121 |
122 | // Now let's see what that transcriptResponse looks like
123 |
124 | console.log({ transcriptResponse });
125 |
126 | // We can see that it's a big array of lines. Let's squish it down into one string first to make it easier to use.
127 |
128 | // We initialize the transcript string
129 | let transcript = "";
130 |
131 | // Then the forEach method calls each element in the array, e.g. line = element, and we can do something what that value
132 |
133 | // in this case, we'll add each line of text to the empty string variable to get a single string with the entire transcript
134 | transcriptResponse.forEach((line) => {
135 | transcript += line.text;
136 | });
137 |
138 | // Now, let's create a separate function called initialize chain
139 | // We'll pass in the first prompt and the context, in this case the transcript
140 | const response = await initializeChain(initialPrompt, transcript);
141 | console.log("Chain:", chain);
142 | console.log(response);
143 |
144 | // And then we'll jsut get the response back and the chatHistory
145 | return res.status(200).json({ output: response, chatHistory });
146 | } catch (err) {
147 | console.error(err);
148 | return res
149 | .status(500)
150 | .json({ error: "An error occurred while fetching transcript" });
151 | }
152 |
153 | // DO THIS THIRD
154 | } else {
155 | // If it's not the first message, we can chat with the bot
156 | console.log("Received question");
157 | try {
158 | console.log("Asking:", prompt);
159 | console.log("Chain:", chain);
160 |
161 | // First we'll add the user message
162 | chatHistory.push({
163 | role: "user",
164 | content: prompt,
165 | });
166 | // Then we'll pass the entire chat history with all the previous messages back
167 | const response = await chain.call({
168 | question: prompt,
169 | chat_history: chatHistory,
170 | });
171 | // And we'll add the response back as well
172 | chatHistory.push({
173 | role: "assistant",
174 | content: response.text,
175 | });
176 |
177 | return res.status(200).json({ output: response, chatHistory });
178 | } catch (error) {
179 | // Generic error handling
180 | console.error(error);
181 | res
182 | .status(500)
183 | .json({ error: "An error occurred during the conversation." });
184 | }
185 | }
186 | }
187 | }
188 |
--------------------------------------------------------------------------------
/pages/api/streaming.js:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import SSE from "express-sse";
3 |
4 | const sse = new SSE();
5 |
6 | export default function handler(req, res) {
7 | if (req.method === "POST") {
8 | const { input } = req.body;
9 |
10 | if (!input) {
11 | throw new Error("No input");
12 | }
13 | // Initialize model
14 |
15 | // create the prompt
16 |
17 | // call frontend to backend
18 |
19 | return res.status(200).json({ result: "OK" });
20 | } else if (req.method === "GET") {
21 | sse.init(req, res);
22 | } else {
23 | res.status(405).json({ message: "Method not allowed" });
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/pages/api/video-chat.js:
--------------------------------------------------------------------------------
1 | // /pages/api/transcript.js
2 | import { YoutubeTranscript } from "youtube-transcript";
3 | import { ChatOpenAI } from "langchain/chat_models/openai";
4 | import { ConversationalRetrievalQAChain } from "langchain/chains";
5 | import { HNSWLib } from "langchain/vectorstores/hnswlib";
6 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
7 | import { CharacterTextSplitter } from "langchain/text_splitter";
8 | import { OpenAI } from "langchain";
9 |
10 | // Global variables
11 |
12 | // DO THIS SECOND
13 | const initializeChain = async (initialPrompt, transcript) => {
14 | try {
15 | console.log({ chatHistory });
16 | return response;
17 | } catch (error) {
18 | console.error(error);
19 | }
20 | };
21 |
22 | export default async function handler(req, res) {
23 | if (req.method === "POST") {
24 | // DO THIS FIRST
25 |
26 | // Then if it's the first message, we want to initialize the chain, since it doesn't exist yet
27 | if (x) {
28 | try {
29 | // And then we'll jsut get the response back and the chatHistory
30 | return res.status(200).json({ output: response, chatHistory });
31 | } catch (err) {
32 | console.error(err);
33 | return res
34 | .status(500)
35 | .json({ error: "An error occurred while fetching transcript" });
36 | }
37 |
38 | // do this third!
39 | } else {
40 | // If it's not the first message, we can chat with the bot
41 |
42 | try {
43 | return res.status(200).json({ output: response, chatHistory });
44 | } catch (error) {
45 | // Generic error handling
46 | console.error(error);
47 | res
48 | .status(500)
49 | .json({ error: "An error occurred during the conversation." });
50 | }
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/playground/quickstart-soln.mjs:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import { PromptTemplate } from "langchain/prompts";
4 | import { LLMChain } from "langchain/chains";
5 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
6 | import { SerpAPI } from "langchain/tools";
7 | import { Calculator } from "langchain/tools/calculator";
8 | import { BufferMemory } from "langchain/memory";
9 | import { ConversationChain } from "langchain/chains";
10 | import { PlanAndExecuteAgentExecutor } from "langchain/experimental/plan_and_execute";
11 |
12 | /**
13 | *
14 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
15 | *
16 | */
17 |
18 | // First, run this in your terminal:
19 |
20 | // export OPENAI_API_KEY=sk-12345
21 | // export SERPAPI_API_KEY=0ecaa8b9ecedb64f3c10e737cf5d6250b7b18c735d739631bbb0dac96b5b425b
22 | // Replace sk-12345 with your OpenAI API Key
23 |
24 | // https://platform.openai.com/account/api-keys
25 |
26 | /**
27 | *
28 | * Prompt Templates: Manage Prompts for LLMs
29 |
30 |
31 | When you make a call to ChatGPT, the LLM does not just see your response as it is.
32 |
33 | Basically your message is deconstructed and then fed into a template, so that the chatbot can better help you.
34 |
35 | They would take your message, then feed it into the user message.
36 |
37 | const messageTemplate = `You've been speaking with a user, and they just said: "${userMessage}". How would you respond?`;
38 |
39 | Langchain has already pre-defined some templates to make this SUPER easy!
40 | *
41 | *
42 | */
43 |
44 | const template =
45 | "Please give me some ideas for content I should write about regarding {topic}? The content is for {socialplatform}. Translate to {language}.";
46 | const prompt = new PromptTemplate({
47 | template: template,
48 | inputVariables: ["topic", "socialplatform", "language"],
49 | });
50 |
51 | // This allows us to format the template into a string, which is finally passed to the LLM
52 | // const formattedTemplate = await prompt.format({
53 | // topic: "artificial intelligence",
54 | // socialplatform: "twitter",
55 | // language: "spanish",
56 | // });
57 | // console.log(formattedTemplate);
58 |
59 | /**
60 | *
61 | * To properly use the LLM, we generate chains.
62 | *
63 | * Chains are "chains" of complex tasks, linked together. Hence the name Lang Chain - it chains large language models tasks!
64 | *
65 | * The first "task" that we need to put together is putting together a Prompt and a Call to the OpenAI model
66 | *
67 | * This is the simplest task since it's only basically two steps.
68 | *
69 | *
70 | */
71 |
72 | const model = new OpenAI({ temperature: 0.9 });
73 | const chain = new LLMChain({ llm: model, prompt: prompt });
74 |
75 | // Now that we've defined the chain, we can call the LLMChain, which does two steps:
76 |
77 | // First it properly formats the prompt according to the user input variables
78 |
79 | // Then it makes the call to Open AI's API!
80 | // const resChain = await chain.call({
81 | // topic: "artificial intelligence",
82 | // socialplatform: "twitter",
83 | // language: "english",
84 | // });
85 |
86 | // console.log({ resChain });
87 |
88 | /**
89 | *
90 | * The second big topic in Langchain is Agents.
91 | *
92 | * We'll cover this in detail in Module 5: AI Content Generator!
93 | *
94 | *
95 | * The biggest difference between a Chain and an Agent?
96 | *
97 | * A chain must be predefined (like configuring a robot)
98 | *
99 | * An agent is given a task and tools, then it figures out how to do the job.
100 | *
101 | *
102 | * E.g. if we want to do research on the internet, a chain will be set up like this:
103 | *
104 | * Chain: First, search the internet using the query. Then summarize it for me.
105 | *
106 | * Agent: First, we give it tools to search the internet. Then we ask it - "Who is Pedro Pascal?"
107 | *
108 | * Then the agent will check its toolbox, figure out how it can get the job done, and do the steps in order.
109 | *
110 | * For example, let's ask on ChatGPT what langchain is.
111 | *
112 | *
113 | *
114 | */
115 | // 0 = deterministic, 1 = creative
116 | // https://platform.openai.com/docs/models/
117 | // Gpt3.5turbo = fast, 1/10th cost of davinci
118 | // davinci - default
119 | const agentModel = new OpenAI({
120 | temperature: 0,
121 | modelName: "text-davinci-003",
122 | });
123 |
124 | // serpTool.returnDirect = true;
125 |
126 | const tools = [
127 | new SerpAPI(process.env.SERPAPI_API_KEY, {
128 | location: "Dallas,Texas,United States",
129 | hl: "en",
130 | gl: "us",
131 | }),
132 | new Calculator(),
133 | ];
134 |
135 | // const executor = await initializeAgentExecutorWithOptions(tools, agentModel, {
136 | // agentType: "zero-shot-react-description",
137 | // verbose: true,
138 | // maxIterations: 5,
139 | // });
140 | console.log("Loaded agent.");
141 | const input = "What is Langchain?";
142 |
143 | console.log(`Executing with input "${input}"...`);
144 | // Awesome, so we can see it figured out that it needed to use a search engine.
145 | // const result = await executor.call({ input });
146 |
147 | // console.log(`Got output ${result.output}`);
148 |
149 | /**
150 | *
151 | * Plan and Execute Agents
152 | *
153 | * Instead of the regular agents, which just try to evaluate their tools then do something, Plan and Execute Works a little diffrently.
154 | *
155 | *
156 | * This example shows how to use an agent that uses the Plan-and-Execute framework to answer a query. This framework works differently from the other currently supported agents (which are all classified as Action Agents) in that it uses a two step process:
157 |
158 | First, the agent uses an LLM to create a plan to answer the query with clear steps.
159 |
160 | Once it has a plan, it uses an embedded traditional Action Agent to solve each step.
161 |
162 | The idea is that the planning step keeps the LLM more "on track" by breaking up a larger task into simpler subtasks. However, this method requires more individual LLM queries and has higher latency compared to Action Agents.
163 |
164 | *
165 | *
166 | *
167 | */
168 |
169 | const agentTools = [new Calculator(), new SerpAPI()];
170 | // only works with Chat models
171 | const chatModel = new ChatOpenAI({
172 | temperature: 0,
173 | modelName: "gpt-3.5-turbo",
174 | verbose: true,
175 | });
176 | const executor = PlanAndExecuteAgentExecutor.fromLLMAndTools({
177 | llm: chatModel,
178 | tools: agentTools,
179 | });
180 |
181 | const result = await executor.call({
182 | input: `Who is the current president of the United States? What is their current age raised to the second power?`,
183 | });
184 |
185 | console.log({ result });
186 |
187 | // const llm = new OpenAI({});
188 | // const memory = new BufferMemory();
189 | // const conversationChain = new ConversationChain({ llm: llm, memory: memory });
190 | // const res1 = await conversationChain.call({
191 | // input: "Hey. The president of the US is currently Lebron James.",
192 | // });
193 | // console.log(res1);
194 |
195 | // const res2 = await conversationChain.call({
196 | // input: "Who is hte president of the US?",
197 | // });
198 | // console.log(res2);
199 |
--------------------------------------------------------------------------------
/playground/quickstart.mjs:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import { PromptTemplate } from "langchain/prompts";
4 | import { LLMChain } from "langchain/chains";
5 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
6 | import { SerpAPI } from "langchain/tools";
7 | import { Calculator } from "langchain/tools/calculator";
8 | import { BufferMemory } from "langchain/memory";
9 | import { ConversationChain } from "langchain/chains";
10 | import { PlanAndExecuteAgentExecutor } from "langchain/experimental/plan_and_execute";
11 | import { exec } from "child_process";
12 |
13 | // export OPENAI_API_KEY=<>
14 | // export SERPAPI_API_KEY=<>
15 | // Replace with your API keys!
16 |
17 | // to run, go to terminal and enter: cd playground
18 | // then enter: node quickstart.mjs
19 | console.log("Welcome to the LangChain Quickstart Module!");
20 |
--------------------------------------------------------------------------------
/postcss.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/public/assets/images/brain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/brain.png
--------------------------------------------------------------------------------
/public/assets/images/chatbot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/chatbot.png
--------------------------------------------------------------------------------
/public/assets/images/green-square.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/green-square.png
--------------------------------------------------------------------------------
/public/assets/images/pdf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/pdf.png
--------------------------------------------------------------------------------
/public/assets/images/robohr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/robohr.png
--------------------------------------------------------------------------------
/public/assets/images/stream.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/stream.png
--------------------------------------------------------------------------------
/public/assets/images/tools.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/tools.png
--------------------------------------------------------------------------------
/public/assets/images/wizard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/wizard.png
--------------------------------------------------------------------------------
/public/assets/images/youtube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shawnesquivel/openai-javascript-course/2cd839c8f990c6238fc14942228951b57be156ed/public/assets/images/youtube.png
--------------------------------------------------------------------------------
/public/hamburger.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | const { fontFamily } = require("tailwindcss/defaultTheme");
3 | module.exports = {
4 | content: [
5 | "./pages/**/*.{js,ts,jsx,tsx,mdx}",
6 | "./components/**/*.{js,ts,jsx,tsx,mdx}",
7 | "./app/**/*.{js,ts,jsx,tsx,mdx}",
8 | ],
9 | theme: {
10 | extend: {
11 | backgroundImage: {
12 | "gradient-radial": "radial-gradient(var(--tw-gradient-stops))",
13 | "gradient-conic":
14 | "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))",
15 | },
16 | fontFamily: {
17 | heading: ["var(--font-press-start)", ...fontFamily.sans],
18 | },
19 | fontSize: {
20 | 10: "10px", // Add this line
21 | },
22 | },
23 | },
24 | plugins: [],
25 | };
26 |
--------------------------------------------------------------------------------
/tools/SerpAPI.js:
--------------------------------------------------------------------------------
1 | import { SerpAPI } from "langchain/tools";
2 |
3 | const SerpAPITool = () => {};
4 |
5 | export default SerpAPITool;
6 |
--------------------------------------------------------------------------------
/tools/WebBrowser.js:
--------------------------------------------------------------------------------
1 | import { WebBrowser } from "langchain/tools/webbrowser";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
4 |
5 | const WebBrowserTool = () => {
6 | // do stuff!
7 | };
8 |
9 | export default WebBrowserTool;
10 |
--------------------------------------------------------------------------------
/tools/solutions/SerpAPI-soln.js:
--------------------------------------------------------------------------------
1 | import { SerpAPI } from "langchain/tools";
2 |
3 | /**
4 | *
5 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
6 | *
7 | */
8 |
9 | const SerpAPITool = () => {
10 | const serpAPI = new SerpAPI(process.env.SERPAPI_API_KEY, {
11 | baseUrl: "http://localhost:3000/agents",
12 | location: "Vancouver,British Columbia, Canada",
13 | hl: "en",
14 | gl: "us",
15 | });
16 | serpAPI.returnDirect = true;
17 |
18 | return serpAPI;
19 | };
20 |
21 | export default SerpAPITool;
22 |
--------------------------------------------------------------------------------
/tools/solutions/WebBrowser-soln.js:
--------------------------------------------------------------------------------
1 | import { WebBrowser } from "langchain/tools/webbrowser";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
4 |
5 | /**
6 | *
7 | * WARNING: THIS IS THE SOLUTION! Please try coding before viewing this.
8 | *
9 | */
10 | const WebBrowserTool = () => {
11 | const model = new ChatOpenAI({ temperature: 0 });
12 | const embeddings = new OpenAIEmbeddings({});
13 |
14 | const browser = new WebBrowser({ model, embeddings });
15 | browser.returnDirect = true;
16 |
17 | return browser;
18 | };
19 |
20 | export default WebBrowserTool;
21 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "lib": [
4 | "dom",
5 | "dom.iterable",
6 | "esnext"
7 | ],
8 | "allowJs": true,
9 | "skipLibCheck": true,
10 | "strict": false,
11 | "forceConsistentCasingInFileNames": true,
12 | "noEmit": true,
13 | "incremental": true,
14 | "esModuleInterop": true,
15 | "module": "esnext",
16 | "moduleResolution": "node",
17 | "resolveJsonModule": true,
18 | "isolatedModules": true,
19 | "jsx": "preserve",
20 | "plugins": [
21 | {
22 | "name": "next"
23 | }
24 | ],
25 | "strictNullChecks": true,
26 | "baseUrl": ".",
27 | "paths": {
28 | "@/utils/*": ["utils/*"]
29 | }
30 | },
31 | "include": [
32 | "next-env.d.ts",
33 | ".next/types/**/*.ts",
34 | "**/*.ts",
35 | "**/*.tsx"
36 | ],
37 | "exclude": [
38 | "node_modules"
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/utils/extractVideoId.js:
--------------------------------------------------------------------------------
1 | export default function extractVideoId(url) {
2 | // do stuff
3 | }
4 |
--------------------------------------------------------------------------------
/utils/getVideoMetaData.js:
--------------------------------------------------------------------------------
1 | import axios from "axios";
2 |
3 | export default async function getVideoMetaData(videoId) {
4 | // enable api key and setup next.config.js
5 | const url = `https://www.googleapis.com/youtube/v3/videos?id=${videoId}&key=${process.env.GOOGLE_API_KEY}&part=snippet,contentDetails,statistics,status`;
6 |
7 | try {
8 | // { data: {items: [metadata]}}
9 | // Clean up the response
10 | } catch (err) {
11 | console.error(`Failed to get metadata: ${err}`);
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/utils/solutions/extractVideoId-soln.js:
--------------------------------------------------------------------------------
1 | export default function extractVideoId(url) {
2 | const urlParams = new URLSearchParams(new URL(url).search);
3 | return urlParams.get("v");
4 | }
5 |
--------------------------------------------------------------------------------
/utils/solutions/getVideoMetadata-soln.js:
--------------------------------------------------------------------------------
1 | import axios from "axios";
2 |
3 | export default async function getVideoMetaData(videoId) {
4 | // First, we need to make sure we have our GOOGLE_API_KEY set up
5 | // https://console.cloud.google.com/apis/
6 | // Look up the YouTube Data API V3
7 | // Enable API key
8 | // Copy into .env as GOOGLE_API_KEY
9 | // Configure Next.Config.JS
10 | const url = `https://www.googleapis.com/youtube/v3/videos?id=${videoId}&key=${process.env.GOOGLE_API_KEY}&part=snippet,contentDetails,statistics,status`;
11 |
12 | try {
13 | // HTTP request { data: {items: [metadata ]}}
14 | const response = await axios.get(url);
15 | const data = response.data;
16 | const metadata = data.items[0];
17 |
18 | console.log("GetMetadata", { metadata });
19 |
20 | // Clean up the response
21 | const videoTitle = metadata.snippet.title;
22 | const videoDescription = metadata.snippet.description;
23 | const shortenedDescription = videoDescription.split(".")[0];
24 |
25 | const videoId = metadata.id;
26 | // Create a small metadata object to return
27 | const shortMetadata = {
28 | videoTitle,
29 | videoDescription: shortenedDescription,
30 | videoId,
31 | };
32 | return shortMetadata; // returns the first item, which should be the video if the id is valid
33 | } catch (error) {
34 | console.error(`Failed to fetch video metadata: ${error}`);
35 | }
36 | }
37 |
--------------------------------------------------------------------------------