├── .gitignore ├── LICENSE ├── README.md ├── package-lock.json ├── package.json ├── public ├── favicon.ico ├── index.html ├── logo192.png ├── logo512.png ├── manifest.json └── robots.txt ├── src ├── App.css ├── App.js ├── App.test.js ├── components │ ├── ActionProvider.js │ ├── Config.js │ ├── LangchainProcessor.js │ ├── MessageParser.js │ └── langchain_options │ │ ├── AWSRoute.js │ │ ├── CloudflareWorkersRoute.js │ │ ├── LLMChat.js │ │ └── SimpleChain.js ├── index.css ├── index.js ├── logo.svg ├── reportWebVitals.js └── setupTests.js └── tailwind.config.js /.gitignore: -------------------------------------------------------------------------------- 1 | # dependencies 2 | /node_modules 3 | /.pnp 4 | .pnp.js 5 | 6 | # testing 7 | /coverage 8 | 9 | # production 10 | /build 11 | 12 | # misc 13 | .DS_Store 14 | .env 15 | .env.local 16 | .env.development.local 17 | .env.test.local 18 | .env.production.local 19 | 20 | # logs 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | 25 | # IDEs and editors 26 | .vscode/ 27 | .idea/ 28 | *.swp 29 | *.swo 30 | *.swn 31 | 32 | # OS-specific 33 | *.DS_Store 34 | *.log 35 | *.tmp 36 | Thumbs.db -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 ida silfverskiold 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # React-Langchain-Chatbot 2 | 3 | Basic ChatGPT clone using OpenAI GPT-3.5-Turbo or GPT-4 via Langchain. Good playground for Langchain AI chatbots. 4 | 5 | Boostrapping with [React-Chatbot-Kit](https://fredrikoseberg.github.io/react-chatbot-kit-docs/docs/getting-started) and [LangchainJS](https://js.langchain.com/docs/get_started/introduction). 6 | 7 | ## Setup and Run the Project 8 | 9 | ### Prerequisites 10 | 11 | - Node.js v18x and npm (If not installed, download from [Node.js official website](https://nodejs.org/)) 12 | 13 | ### Steps 14 | 15 | 1. **Clone the Repository**: 16 | 17 | ```bash 18 | git clone https://github.com/ilsilfverskiold/react-langchain-chatbot.git 19 | cd react-langchain-chatbot 20 | 21 | 3. **Install Dependencies**: 22 | 23 | ```bash 24 | npm install 25 | 26 | 4. **Set Up OpenAI API Key**: 27 | 28 | - Obtain your OpenAI API key. 29 | - Create a .env file in the root directory and add your OpenAI key. 30 | 31 | ```bash 32 | REACT_APP_OPEN_AI_API_KEY=your_openai_api_key 33 | 34 | 4. **Start the Server**: 35 | 36 | ```bash 37 | npm start 38 | 39 | ## Options 40 | 41 | 1. **ChatGPT Clone**: The script has already been set in `src/components/LangchainProcessor.js` so you can run it as is. If you want to change the prompt template you can do so directly in this file. 42 | 43 | ```javascript 44 | // The default prompt template is 45 | const promptTemplate = ` 46 | You are an ironic and nihilistic chatbot so always answer like so. Don't answer in a "response: answer" format. 47 | Question: {question} 48 | `; 49 | ``` 50 | 51 | 2. **A Simple Chain**: Go to `/langchain_options` and find `SimpleChain.js` rename it `LangchainProcessor.js` and replace it with the `LangchainProcessor.js` in the components folder. You can build on top of this yourself but at the moment it is only using the question and not allowing for past answers. Make sure you tweak the prompt template in there. 52 | 53 | 3. **Q/A with URL**: Here you will first need to set up your Workers route correctly. See [this repository](https://github.com/ilsilfverskiold/cloudflare-workers-langchain) that will go through it step by step. When you have a working endpoint, go to `/langchain_options` in this directory and find `CloudflateWorkersRoute.js` rename it `LangchainProcessor.js` and replace it with the `LangchainProcessor.js` in the components folder. 54 | - Make sure you set REACT_APP_CLOUDFLARE_WORKERS_AUTH in your .env file that you have set up with your Worker 55 | - Make sure you set the URL for the POST request to the endpoint that you'll receive with the deployment of your worker 56 | - If you are experiencing CORS errors Make sure you allow your IP to access the endpoint (this you set up via your worker) 57 | 58 | 4. **Q/A with Text file**: Here you will first need to set up your AWS application correctly. See [this repository](https://github.com/ilsilfverskiold/langchainjs-aws-service) or [this tutorial](https://medium.com/gitconnected/deploying-an-ai-powered-q-a-bot-on-aws-with-langchainjs-and-serverless-9361d0778fbd) that will go through it step by step. When you have a working endpoint, go to `/langchain_options` in this directory and find `AWSRoute.js` rename it `LangchainProcessor.js` and replace it with the `LangchainProcessor.js` in the components folder. Or just replace the code directly in the `LangchainProcessor.js` component. 59 | 60 | - Make sure you set all your process.env keys in a .env file 61 | 62 | ```bash 63 | REACT_APP_OPEN_AI_API_KEY= 64 | REACT_APP_AWS_POST_URL=https://xxxx.execute-api.eu-central-1.amazonaws.com/dev/question 65 | REACT_APP_AWS_API_KEY= 66 | REACT_APP_AWS_BUCKET_NAME=my-langchain-bucket 67 | 68 | - To set your system and prompt template look into the code in `AWSRoute.js` 69 | 70 | ```javascript 71 | // Define the request body 72 | const requestBody = { 73 | ... 74 | promptTemplate: "Use the following pieces of context to answer the question at the end. \n {context}\n Question: {question}\nHelpful Answer:", 75 | systemTemplate: "I want you to act as a customer service bot called Socky the Happy bot that I am having a conversation with.\nYou are a bot that will provide funny answers to the customer. \n If you can't answer the question say I don't know." 76 | }; 77 | ``` 78 | 79 | - If you are experiencing CORS errors Make sure you allow your IP to access the endpoint (look at your AWS lambda scripts) 80 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sandbox", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@testing-library/jest-dom": "^5.17.0", 7 | "@testing-library/react": "^13.4.0", 8 | "@testing-library/user-event": "^13.5.0", 9 | "langchain": "^0.0.131", 10 | "react": "^18.2.0", 11 | "react-chatbot-kit": "^2.1.2", 12 | "react-dom": "^18.2.0", 13 | "react-scripts": "5.0.1", 14 | "web-vitals": "^2.1.4" 15 | }, 16 | "scripts": { 17 | "start": "react-scripts start", 18 | "build": "react-scripts build", 19 | "test": "react-scripts test", 20 | "eject": "react-scripts eject" 21 | }, 22 | "eslintConfig": { 23 | "extends": [ 24 | "react-app", 25 | "react-app/jest" 26 | ] 27 | }, 28 | "browserslist": { 29 | "production": [ 30 | ">0.2%", 31 | "not dead", 32 | "not op_mini all" 33 | ], 34 | "development": [ 35 | "last 1 chrome version", 36 | "last 1 firefox version", 37 | "last 1 safari version" 38 | ] 39 | }, 40 | "devDependencies": { 41 | "tailwindcss": "^3.3.3" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilsilfverskiold/react-langchain-chatbot/0f8b290e5c182698863d06faf6eefa3cea622a10/public/favicon.ico -------------------------------------------------------------------------------- /public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 17 | 18 | 27 | React App 28 | 29 | 30 | 31 |
32 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilsilfverskiold/react-langchain-chatbot/0f8b290e5c182698863d06faf6eefa3cea622a10/public/logo192.png -------------------------------------------------------------------------------- /public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilsilfverskiold/react-langchain-chatbot/0f8b290e5c182698863d06faf6eefa3cea622a10/public/logo512.png -------------------------------------------------------------------------------- /public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | text-align: center; 3 | } 4 | 5 | .App-logo { 6 | height: 40vmin; 7 | pointer-events: none; 8 | } 9 | 10 | @media (prefers-reduced-motion: no-preference) { 11 | .App-logo { 12 | animation: App-logo-spin infinite 20s linear; 13 | } 14 | } 15 | 16 | .App-header { 17 | background-color: #282c34; 18 | min-height: 100vh; 19 | display: flex; 20 | flex-direction: column; 21 | align-items: center; 22 | justify-content: center; 23 | font-size: calc(10px + 2vmin); 24 | color: white; 25 | } 26 | 27 | .App-link { 28 | color: #61dafb; 29 | } 30 | 31 | @keyframes App-logo-spin { 32 | from { 33 | transform: rotate(0deg); 34 | } 35 | to { 36 | transform: rotate(360deg); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/App.js: -------------------------------------------------------------------------------- 1 | import Chatbot from "react-chatbot-kit"; 2 | import "react-chatbot-kit/build/main.css"; 3 | 4 | import ActionProvider from "./components/ActionProvider"; 5 | import MessageParser from "./components/MessageParser"; 6 | import config from "./components/Config"; 7 | 8 | // we're using react-chatbot-kit to create the chatbot. See their docs here: https://fredrikoseberg.github.io/react-chatbot-kit-docs/docs 9 | // we're using tailwindcss to style 10 | // we're using OpenAI via langchain to process messages 11 | 12 | function App() { 13 | return ( 14 |
15 |
16 | {/* Add the Chatbot component to the header */} 17 | 22 |
23 |
24 | ); 25 | } 26 | 27 | export default App; 28 | -------------------------------------------------------------------------------- /src/App.test.js: -------------------------------------------------------------------------------- 1 | import { render, screen } from '@testing-library/react'; 2 | import App from './App'; 3 | 4 | test('renders learn react link', () => { 5 | render(); 6 | const linkElement = screen.getByText(/learn react/i); 7 | expect(linkElement).toBeInTheDocument(); 8 | }); 9 | -------------------------------------------------------------------------------- /src/components/ActionProvider.js: -------------------------------------------------------------------------------- 1 | class ActionProvider { 2 | constructor(createChatBotMessage, setStateFunc) { 3 | this.createChatBotMessage = createChatBotMessage; 4 | this.setState = setStateFunc; 5 | } 6 | 7 | sendBotResponse(message) { 8 | const botMessage = this.createChatBotMessage(message); 9 | this.updateChatbotState(botMessage); 10 | } 11 | 12 | updateChatbotState(message) { 13 | this.setState(prevState => ({ 14 | ...prevState, 15 | messages: [...prevState.messages, message] 16 | })); 17 | } 18 | } 19 | 20 | export default ActionProvider; -------------------------------------------------------------------------------- /src/components/Config.js: -------------------------------------------------------------------------------- 1 | // Config starter code 2 | import { createChatBotMessage } from "react-chatbot-kit"; 3 | 4 | const config = { 5 | // change this to the message you want to be sent to the user when they first open the chatbot 6 | initialMessages: [createChatBotMessage(`Hey there!`)], 7 | customStyles: { 8 | botMessageBox: { 9 | backgroundColor: '#376B7E', 10 | }, 11 | chatButton: { 12 | backgroundColor: '#5ccc9d', 13 | }, 14 | }, 15 | } 16 | 17 | export default config -------------------------------------------------------------------------------- /src/components/LangchainProcessor.js: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { HumanMessage, SystemMessage } from "langchain/schema"; 3 | 4 | // using functional components instead of class components to keep it simple here 5 | 6 | // this component is responsible for processing new messages from the user and getting a reply from OpenAI 7 | // it uses a human/system messages array that is sent in continously to OpenAI 8 | 9 | const LangchainProcessor = async (newMessage, oldMessages) => { 10 | 11 | // The default prompt template is 12 | const promptTemplate = ` 13 | You are an ironic and nihilistic chatbot so always answer like so. Don't answer in a "response: answer" format. 14 | Question: {question} 15 | `; 16 | 17 | const prompt = promptTemplate.replace("{question}", newMessage); 18 | 19 | const chat = new ChatOpenAI({ 20 | temperature: 0, 21 | openAIApiKey: process.env.REACT_APP_OPEN_AI_API_KEY 22 | }); 23 | 24 | try { 25 | // recreate the formatted messages array with the previous messages every time a new message comes in from the user 26 | const formattedMessages = oldMessages.map(msg => { 27 | if (msg.type === "bot") { 28 | return new SystemMessage(msg.message); 29 | } else { 30 | return new HumanMessage(msg.message); 31 | } 32 | }); 33 | 34 | // Add the new human message to the list with the prompt template 35 | formattedMessages.push(new HumanMessage(prompt)); 36 | 37 | // call OpenAI to get a reply 38 | const result = await chat.predictMessages(formattedMessages); 39 | 40 | // Extract the content from the AIMessage 41 | const botResponseContent = result.content; 42 | 43 | // return the response 44 | return botResponseContent; 45 | 46 | } catch (error) { 47 | console.error("Error processing message with OpenAI:", error); 48 | return "Sorry, I faced an error processing your message."; 49 | } 50 | } 51 | 52 | export default LangchainProcessor; 53 | -------------------------------------------------------------------------------- /src/components/MessageParser.js: -------------------------------------------------------------------------------- 1 | import LangchainProcessor from './LangchainProcessor'; 2 | 3 | class MessageParser { 4 | constructor(actionProvider, state, createChatBotMessage) { 5 | this.actionProvider = actionProvider; 6 | this.state = state; 7 | this.createChatBotMessage = createChatBotMessage; 8 | } 9 | 10 | async parse(incomingMessage) { 11 | // Use the processor to get a reply 12 | const reply = await LangchainProcessor(incomingMessage, this.state.messages); 13 | 14 | // Send the reply using the actionProvider 15 | this.actionProvider.sendBotResponse(reply); 16 | } 17 | } 18 | 19 | export default MessageParser; 20 | -------------------------------------------------------------------------------- /src/components/langchain_options/AWSRoute.js: -------------------------------------------------------------------------------- 1 | const LangchainProcessor = async (newMessage, oldMessages = []) => { 2 | 3 | // Check if newMessage contains "thank you" or "thanks" 4 | if (newMessage.toLowerCase().includes("thank you") || newMessage.toLowerCase().includes("thanks")) { 5 | oldMessages = []; 6 | } 7 | 8 | // Construct the history from oldMessages 9 | const historyArr = oldMessages.map((item) => `${item.type === "user" ? "human" : item.type}: ${item.message}`); 10 | 11 | // Check if a user message exists in oldMessages 12 | const hasPreviousUserMessage = oldMessages.some( 13 | (item) => item.type === "user" 14 | ); 15 | 16 | // Determine the chatHistory based on previous user messages 17 | const chatHistory = hasPreviousUserMessage ? historyArr.join("\n") : ""; 18 | 19 | // Define the request body 20 | const requestBody = { 21 | question: newMessage, 22 | chatHistory: chatHistory, 23 | bucketName: process.env.REACT_APP_AWS_BUCKET_NAME, 24 | promptTemplate: "Use the following pieces of context to answer the question at the end. \n {context}\n Question: {question}\nHelpful Answer:", 25 | systemTemplate: "I want you to act as a customer service bot called Socky the Happy bot that I am having a conversation with.\nYou are a bot that will provide funny answers to the customer. \n If you can't answer the question say I don't know." 26 | }; 27 | 28 | try { 29 | // Send a POST request to the endpoint 30 | const response = await fetch(process.env.REACT_APP_AWS_POST_URL, { 31 | method: "POST", 32 | headers: { 33 | "Content-Type": "application/json", 34 | "x-api-key": process.env.REACT_APP_AWS_API_KEY, 35 | }, 36 | body: JSON.stringify(requestBody), 37 | }); 38 | 39 | if (!response.ok) { 40 | throw new Error("Failed to get a response from the server."); 41 | } 42 | 43 | const responseData = await response.text(); 44 | const parsedData = JSON.parse(responseData); 45 | return parsedData.text.trim(); 46 | } catch (error) { 47 | console.error("Error processing message with OpenAI:", error); 48 | return "Sorry, I faced an error processing your message."; 49 | } 50 | }; 51 | 52 | export default LangchainProcessor; -------------------------------------------------------------------------------- /src/components/langchain_options/CloudflareWorkersRoute.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | // using functional components instead of class components to keep it simple here 4 | 5 | // this component is responsible for processing new messages from the user and getting a reply via a Cloudflare Worker route 6 | // see this repo to create your worker: https://github.com/ilsilfverskiold/cloudflare-workers-langchain 7 | 8 | // replace your correct url below and set your secret key in your .env file 9 | 10 | const LangchainProcessor = async (newMessage, oldMessages = []) => { 11 | 12 | // Construct the history from oldMessages 13 | const historyArr = oldMessages.map(item => `${item.type}: ${item.message}`); 14 | historyArr.push(`user: ${newMessage}`); 15 | const history = historyArr.join('\n'); 16 | 17 | // Check if a user message exists in oldMessages 18 | const hasPreviousUserMessage = oldMessages.some(item => item.type === 'user'); 19 | 20 | // Define the request body 21 | const requestBody = { 22 | question: newMessage, 23 | history: hasPreviousUserMessage ? history : "", 24 | }; 25 | 26 | try { 27 | // Send a POST request to the endpoint 28 | const response = await fetch("URL_HERE", { 29 | method: "POST", 30 | headers: { 31 | "Content-Type": "application/json", 32 | "Authorization": process.env.REACT_APP_CLOUDFLARE_WORKERS_AUTH 33 | }, 34 | body: JSON.stringify(requestBody) 35 | }); 36 | 37 | if (!response.ok) { 38 | throw new Error("Failed to get a response from the server."); 39 | } 40 | 41 | const data = await response.text(); 42 | return data.trim(); 43 | } catch (error) { 44 | console.error("Error processing message with OpenAI:", error); 45 | return "Sorry, I faced an error processing your message."; 46 | } 47 | }; 48 | 49 | export default LangchainProcessor; -------------------------------------------------------------------------------- /src/components/langchain_options/LLMChat.js: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { HumanMessage, SystemMessage } from "langchain/schema"; 3 | 4 | // using functional components instead of class components to keep it simple here 5 | 6 | // this component is responsible for processing new messages from the user and getting a reply from OpenAI 7 | // it uses a human/system messages array that is sent in continously to OpenAI 8 | 9 | const LangchainProcessor = async (newMessage, oldMessages) => { 10 | 11 | // CHANGE THIS (!) 12 | const promptTemplate = ` 13 | You are an ironic and nihilistic chatbot so always answer like so. Don't answer in a "response: answer" format. 14 | Question: {question} 15 | `; 16 | 17 | const prompt = promptTemplate.replace("{question}", newMessage); 18 | 19 | const chat = new ChatOpenAI({ 20 | temperature: 0, 21 | openAIApiKey: process.env.REACT_APP_OPEN_AI_API_KEY 22 | }); 23 | 24 | try { 25 | // recreate the formatted messages array with the previous messages every time a new message comes in from the user 26 | const formattedMessages = oldMessages.map(msg => { 27 | if (msg.type === "bot") { 28 | return new SystemMessage(msg.message); 29 | } else { 30 | return new HumanMessage(msg.message); 31 | } 32 | }); 33 | 34 | // Add the new human message to the list with the prompt template 35 | formattedMessages.push(new HumanMessage(prompt)); 36 | 37 | // call OpenAI to get a reply 38 | const result = await chat.predictMessages(formattedMessages); 39 | 40 | // Extract the content from the AIMessage 41 | const botResponseContent = result.content; 42 | 43 | // return the response 44 | return botResponseContent; 45 | 46 | } catch (error) { 47 | console.error("Error processing message with OpenAI:", error); 48 | return "Sorry, I faced an error processing your message."; 49 | } 50 | } 51 | 52 | export default LangchainProcessor; 53 | -------------------------------------------------------------------------------- /src/components/langchain_options/SimpleChain.js: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { PromptTemplate } from "langchain/prompts"; 3 | import { LLMChain } from "langchain/chains"; 4 | 5 | // uses a simple chain to process messages with langchain - rename the file LangchainProcessor.js to use this 6 | // will only return an answer to the question and does not use oldMessages to generate a response 7 | 8 | const LangchainProcessor = async (newMessage, oldMessages) => { 9 | 10 | const model = new OpenAI({ 11 | temperature: 0, 12 | openAIApiKey: process.env.REACT_APP_OPEN_AI_API_KEY 13 | }); 14 | 15 | // The default prompt template is 16 | const template = "You are an ironic and nihilistic chatbot so always answer like so. Don't answer in a 'response: answer' format. Question: {question}" 17 | 18 | try { 19 | const prompt = new PromptTemplate({ template, inputVariables: ["question"] }); 20 | const chain = new LLMChain({ llm: model, prompt }); 21 | const result = await chain.call({ question: newMessage }); 22 | const text = result.text.trim(); 23 | // return the response 24 | return text; 25 | 26 | } catch (error) { 27 | console.error("Error processing message with OpenAI:", error); 28 | return "Sorry, I faced an error processing your message."; 29 | } 30 | } 31 | 32 | export default LangchainProcessor; 33 | -------------------------------------------------------------------------------- /src/index.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | 6 | .react-chatbot-kit-chat-container { 7 | width: 100% !important; 8 | } 9 | 10 | .react-chatbot-kit-chat-bot-message { 11 | width: 98% !important; 12 | margin-left: 2% !important; 13 | } 14 | 15 | body { 16 | margin: 0; 17 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 18 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 19 | sans-serif; 20 | -webkit-font-smoothing: antialiased; 21 | -moz-osx-font-smoothing: grayscale; 22 | } 23 | 24 | code { 25 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 26 | monospace; 27 | } 28 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import reportWebVitals from './reportWebVitals'; 6 | 7 | const root = ReactDOM.createRoot(document.getElementById('root')); 8 | root.render( 9 | 10 | 11 | 12 | ); 13 | 14 | // If you want to start measuring performance in your app, pass a function 15 | // to log results (for example: reportWebVitals(console.log)) 16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals 17 | reportWebVitals(); 18 | -------------------------------------------------------------------------------- /src/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/reportWebVitals.js: -------------------------------------------------------------------------------- 1 | const reportWebVitals = onPerfEntry => { 2 | if (onPerfEntry && onPerfEntry instanceof Function) { 3 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { 4 | getCLS(onPerfEntry); 5 | getFID(onPerfEntry); 6 | getFCP(onPerfEntry); 7 | getLCP(onPerfEntry); 8 | getTTFB(onPerfEntry); 9 | }); 10 | } 11 | }; 12 | 13 | export default reportWebVitals; 14 | -------------------------------------------------------------------------------- /src/setupTests.js: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom'; 6 | -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: [ 4 | "./src/**/*.{js,jsx,ts,tsx}", 5 | ], 6 | theme: { 7 | extend: {}, 8 | }, 9 | plugins: [], 10 | } 11 | 12 | --------------------------------------------------------------------------------