├── .devcontainer ├── Dockerfile ├── devcontainer.json └── postAttach.sh ├── .env ├── .env.test ├── .gitignore ├── .prettierignore ├── Dockerfile ├── LICENSE.txt ├── README.md ├── TODO.txt ├── babel.config.js ├── bin ├── deploy ├── server ├── server-backend ├── server-frontend ├── setup ├── setup-db └── test ├── db ├── Luxury_Products_Apparel_Data.csv ├── README.md ├── clean.js ├── create.js ├── embedding.js ├── lambdarag.db ├── node │ └── sqlite-vss-linux-arm64 │ │ ├── README.md │ │ ├── lib │ │ ├── .gitkeep │ │ ├── vector0.so │ │ └── vss0.so │ │ └── package.json ├── schema.sql └── test.js ├── index.html ├── jest.config.js ├── log └── .keep ├── package-lock.json ├── package.json ├── postcss.config.js ├── prettier.config.js ├── public ├── ai-knowledge-retrieval-dark.png ├── ai-knowledge-retrieval-light.png ├── android-chrome-192x192.png ├── android-chrome-512x512.png ├── apple-touch-icon.png ├── avatar-assistant.png ├── avatar-function.png ├── avatar-user.png ├── context-window-siccos-dark.png ├── context-window-siccos-light.png ├── favicon-16x16.png ├── favicon-32x32.png ├── favicon.ico ├── lambda-rag-hats-dark.png ├── lambda-rag-hats-light.png ├── lambda-rag-hats-wfun-dark.png ├── lambda-rag-hats-wfun-light.png ├── lambda-rag-name-dark.png ├── lambda-rag-name-light.png ├── lambda-rag-start-dark.png ├── lambda-rag-start-light.png ├── logo.png ├── rags-to-riches.png └── site.webmanifest ├── src-frontend ├── App.css ├── App.vue ├── components │ ├── Message.vue │ ├── Messages.vue │ ├── Prompt.vue │ ├── PromptMessage.vue │ └── Welcome.vue ├── main.css ├── main.js ├── stores │ └── messages.js └── utils │ ├── backend.js │ ├── functionCaller.js │ └── roleprompt.js ├── src ├── app.js ├── env.js ├── middleware │ ├── .keep │ └── logger.js ├── models │ └── products.js ├── routes │ ├── functions.js │ └── messages.js └── utils │ ├── db.js │ ├── functions.js │ ├── functions.json │ └── openai.js ├── tailwind.config.js ├── template.yaml ├── test ├── globalSetup.js ├── helper.js └── models │ └── env.test.js ├── tmp └── .keep ├── vite.config.js └── vitest.config.js /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:18-bookworm 2 | # sqlite-vss 3 | RUN apt-get update -y && \ 4 | apt-get upgrade -y && \ 5 | apt-get install -y \ 6 | libgomp1 \ 7 | libatlas-base-dev \ 8 | liblapack-dev \ 9 | sqlite3 \ 10 | libsqlite3-dev \ 11 | libsqlite3-0 12 | # docker-in-docker 13 | # https://github.com/devcontainers/features/issues/573 14 | RUN apt-get install -y pipx && \ 15 | rm /usr/lib/python3.11/EXTERNALLY-MANAGED 16 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lambda-rag", 3 | "dockerFile": "Dockerfile", 4 | "customizations": { 5 | "vscode": { 6 | "extensions": [ 7 | "GitHub.copilot", 8 | "GitHub.copilot-chat", 9 | "github.vscode-github-actions", 10 | "esbenp.prettier-vscode", 11 | "ms-vscode.vscode-typescript-next", 12 | "alexcvzz.vscode-sqlite", 13 | "bradlc.vscode-tailwindcss", 14 | "Vue.volar", 15 | "42Crunch.vscode-openapi" 16 | ], 17 | "settings": { 18 | "[html]": { 19 | "editor.formatOnSave": true, 20 | "editor.defaultFormatter": "vscode.html-language-features" 21 | }, 22 | "[vue]": { 23 | "editor.formatOnSave": true, 24 | "editor.defaultFormatter": "Vue.volar" 25 | } 26 | } 27 | } 28 | }, 29 | "containerEnv": { 30 | "NODE_ENV": "development" 31 | }, 32 | "features": { 33 | "ghcr.io/devcontainers/features/common-utils:2": {}, 34 | "ghcr.io/devcontainers/features/docker-in-docker:2": {}, 35 | "ghcr.io/devcontainers/features/aws-cli:1": {}, 36 | "ghcr.io/customink/codespaces-features/sam-cli:1": {} 37 | }, 38 | "remoteUser": "vscode", 39 | "postAttachCommand": "./.devcontainer/postAttach.sh", 40 | "mounts": [ 41 | "source=${localEnv:HOME}/.aws,target=/home/vscode/.aws,type=bind,consistency=cached" 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /.devcontainer/postAttach.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # This should not be needed but at the time of this writing the install is not working for me. 5 | if [ -f /home/vscode/dotfiles/install.sh ] && [ ! -f /home/vscode/.dotfilesinstalled ]; then 6 | /home/vscode/dotfiles/install.sh 7 | touch /home/vscode/.dotfilesinstalled 8 | fi 9 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=secret 2 | -------------------------------------------------------------------------------- /.env.test: -------------------------------------------------------------------------------- 1 | STAGE_NAME=test 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /dist 2 | /node_modules 3 | /.aws-sam 4 | .env.* 5 | !.env.development 6 | !.env.test 7 | __diff_output__ 8 | log/*.log 9 | /db/embeddings-cache 10 | /db/lambdarag.db-journal 11 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/.prettierignore -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:18-bookworm 2 | COPY --from=ghcr.io/rails-lambda/crypteia-extension-debian:1.1.2 /opt /opt 3 | COPY --from=public.ecr.aws/awsguru/aws-lambda-adapter:0.7.0 /lambda-adapter /opt/extensions/lambda-adapter 4 | # sqlite-vss 5 | RUN apt-get update -y && \ 6 | apt-get upgrade -y && \ 7 | apt-get install -y \ 8 | libgomp1 \ 9 | libatlas-base-dev \ 10 | liblapack-dev \ 11 | sqlite3 \ 12 | libsqlite3-dev \ 13 | libsqlite3-0 14 | # Lambda Runtime 15 | RUN mkdir -p /var/task && \ 16 | chmod u+rwx /var/task && \ 17 | chown node /var/task 18 | WORKDIR /var/task 19 | ENV LAMBDA_TASK_ROOT=/var/task 20 | # Runtime Code 21 | COPY . . 22 | USER node 23 | CMD ["node", "src/app.js"] 24 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2023 Ken Collins 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Retrieval Augmented Generation Chat AI Demo 3 | 4 | ![LambdaRAG](./public/rags-to-riches.png) 5 | 6 | Please read the full "RAGs to Riches" blog series:
7 | https://dev.to/aws-heroes/rags-to-riches-part-1-generative-ai-retrieval-4pd7 8 | 9 | ## About 10 | 11 | This OpenAI based RAG chat application that can help you learn about AI retrieval patterns. The technologies here are beginner friendly and easy to deploy to AWS Lambda. As your needs grow, feel free to productionize this application with more robust components. What is a RAG? From [IBM Research](https://research.ibm.com/blog/retrieval-augmented-generation-RAG ): 12 | 13 | > RAG is an AI framework for retrieving facts from an external knowledge base to ground large language models (LLMs) on the most accurate, up-to-date information and to give users insight into LLMs' generative process. 14 | 15 | ![Screenshot of the LambdaRAG Demo application.](./public/lambda-rag-start-dark.png#gh-dark-mode-only) 16 | ![Screenshot of the LambdaRAG Demo application.](./public/lambda-rag-start-dark.png#gh-light-mode-only) 17 | 18 | ## Local Development 19 | 20 | You MUST have an OpenAI API key to run this application. You can get one for free using this [Where do I find my Secret API Key?](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) guide. Once you have your OpenAI key, create a `.env.development.local` file at the root of this project with the following, replacing `sk...` with your key: 21 | 22 | ``` 23 | OPENAI_API_KEY=sk... 24 | ``` 25 | 26 | This project supports [Development Containers](https://containers.dev) which means you can use VS Code to [open this folder in a container](https://code.visualstudio.com/docs/devcontainers/containers) and your development environment will be created for you. Run the following commands in your integrated terminal or on your local machine assuming you have Node installed. 27 | 28 | ```shell 29 | ./bin/setup 30 | ./bin/server 31 | ``` 32 | 33 | The server command will start both a front and back end development server. Use this URL to access your application. http://localhost:5173 34 | 35 | ## Technologies Used 36 | 37 | This demo application uses a split-stack architecture. Meaning there is a distinct front-end and back-end. The front-end is a [💚 Vue.js](https://vuejs.org/) application with [🍍 Pinia](https://pinia.vuejs.org) for state and [⚡️ Vite](https://vitejs.dev/) for development. The front-end also uses [🌊 Tailwind CSS](https://tailwindcss.com/) along with [🌼 daisyUI](https://daisyui.com) for styling. The back-end is a [🟨 Node.js](https://nodejs.org/) application that uses [❎ Express](https://expressjs.com/) for the HTTP framework, and [🪶 SQLite3 VSS](https://github.com/asg017/sqlite-vss) along with [🏆 better-sqlite3](https://github.com/WiseLibs/better-sqlite3) for vector storage and search. 38 | 39 | Throughout the post we will explore various technologies in more detail and how they help us build a RAG application while learning the basics of AI driven integrations and prompt engineering. This is such a fun space. I hope you enjoy it as much as I do! 40 | 41 | ⚠️ DISCLAIMIER: I used ChatGPT to build most of this application. It has been several years since I did any heavy client-side JavaScript. I used this RAG application as an opportunity to learn Vue.js with AI's help. 42 | 43 | ## Working Backwards - Why Lambda? 44 | 45 | So let's start with the end in mind. Our [LambdaRAG Demo](https://github.com/metaskills/lambda-rag) runs locally to make it easy to develop and learn. At some point though you may want to ship it to production or share your work with others. So why deploy to Lambda and what benefits does that deployment option offer? A few thoughts: 46 | 47 | 1. Lambda makes it easy to deploy [containerized applications](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html). 48 | 2. Lambda's [Function URLs](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html) are managed API Gateway reverse proxies. 49 | 3. The [Lambda Web Adapter](https://github.com/awslabs/aws-lambda-web-adapter) makes streaming API responses simple. 50 | 4. Container tools like [Crypteia](https://github.com/rails-lambda/crypteia) make secure SSM-backed secrets easy. 51 | 5. Lambda containers allow images up to 10GB in size. Great for an embedded SQLite DB. 52 | 53 | Of all of these, I think [Response Streaming](https://aws.amazon.com/blogs/compute/introducing-aws-lambda-response-streaming/) is the most powerful. A relatively new feature for Lambda, this enables our RAG to stream text back to the web client just like ChatGPT. It also allows Lambda to break the 6MB response payload and 30s timeout limit. These few lines in the project's `template.yaml` along with the Lambda Web Adapter make it all possible. 54 | 55 | ```yaml 56 | FunctionUrlConfig: 57 | AuthType: NONE 58 | InvokeMode: RESPONSE_STREAM 59 | ``` 60 | 61 | Before you run `./bin/deploy` for the first time. Make sure you to log into the AWS Console and navigate to [SSM Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) first. From there create a secret string parameter with the path `/lambda-rag/OPENAI_API_KEY` and paste in your OpenAI API key. 62 | 63 | ## OpenAI API Basics 64 | 65 | Our backend has a very basic [`src/utils/openai.js`](https://github.com/metaskills/lambda-rag/blob/main/src/utils/openai.js) module. This exports an OpenAI client as well as a helper function to create embeddings. We cover [Embeddings](https://platform.openai.com/docs/guides/embeddings) briefly in the [Basic Architect](https://dev.to/aws-heroes/rags-to-riches-part-1-generative-ai-retrieval-4pd7) section of the first part of this series. This function simply turns a user's query into a vector embedding which is later queried against our SQLite database. There are numerous ways to create and query embeddings. For now we are going to keep it simple and use OpenAI's `text-embedding-ada-002` model which outputs 1536 dimensional embeddings. 66 | 67 | ```javascript 68 | import { OpenAI } from "openai"; 69 | 70 | export const openai = new OpenAI({ 71 | apiKey: process.env.OPENAI_API_KEY, 72 | }); 73 | 74 | export const createEmbedding = async (query) => { 75 | const response = await openai.embeddings.create({ 76 | model: "text-embedding-ada-002", 77 | input: query, 78 | }); 79 | return JSON.stringify(response.data[0].embedding); 80 | }; 81 | ``` 82 | 83 | So how does OpenAI's API work to create a chat interface and how does the [Context Window](https://dev.to/aws-heroes/rags-to-riches-part-1-generative-ai-retrieval-4pd7) discussed in part one come into play? Consider the following screenshot where I tell LambdaRAG my name and then ask if it remembers. 84 | 85 | Screenshot of the LambdaRAG Demo application. 86 | 87 | ChatGPT is stateless, like most web applications. It has no session for the LLM model. Every time you send a message you have to send all the previous messages (context) to the [Completions](https://platform.openai.com/docs/api-reference/completions) endpoint. This is why we use [🍍 Pinia](https://pinia.vuejs.org) for client-side state management. So from an API perspective, it would look something like this below. 88 | 89 | ```javascript 90 | await openai.chat.completions.create({ 91 | model: "gpt-3.5-turbo-16k", 92 | messages: [ 93 | { role: "user", content: "Hello my name is Ken Collins." }, 94 | { role: "assistant", content: "Hello Ken Collins! How can I..." }, 95 | { role: "user", content: "Do you remember my name?" }, 96 | ] 97 | }); 98 | ``` 99 | 100 | Did you notice how the assistant responded not only with my name but also knew it was here to help us with Luxury Apparel? This is a technique called [Role Prompting](https://learnprompting.org/docs/basics/roles). We do this in the LambdaRAG Demo by prepending this role to the user's first message in the [`src-frontend/utils/roleprompt.js`](https://github.com/metaskills/lambda-rag/blob/main/src-frontend/utils/roleprompt.js) file. 101 | 102 | You may have noticed that the LambdaRAG Demo is written entirely in 💛 JavaScript vs. Python. As you learn more about building AI applications you may eventually have to learn Python as well as more advanced frameworks like [🦜️🔗 LangChain](https://js.langchain.com/docs/get_started/introduction/) or Hugging Face's [🤗 Transformers.js](https://huggingface.co/docs/transformers.js/index). All of which have JavaScript versions. I hope this trend of providing JavaScript clients will continue. It feels like a more accessible language. 103 | 104 | In the next section, we will cover how to create embeddings with your data and query for documents using SQLite's new VSS extension. 105 | 106 | ## Proprietary Data & Embeddings 107 | 108 | 💁‍♂️ The LambdaRAG Demo application contains a ready-to-use SQLite database with ~5,000 products from the [Luxury Apparel Dataset](https://www.kaggle.com/datasets/chitwanmanchanda/luxury-apparel-data) on Kaggle. It also has vector embeddings pre-seeded and ready to use! 109 | 110 | Before we dig into [sqlite-vss](https://github.com/asg017/sqlite-vss), I'd like to explain why I think this extension is so amazing. To date, I have found sqlite-vss the easiest and quickest way to explore vector embeddings. Many GenAI projects use [Supabase](https://supabase.com) which seems great but is difficult to run locally. The goal here is to learn! 111 | 112 | As your application grows, I highly recommend looking at [Amazon OpenSearch Serverless](https://aws.amazon.com/blogs/big-data/introducing-the-vector-engine-for-amazon-opensearch-serverless-now-in-preview/). It is a fully managed, highly scalable, and cost-effective service that supports vector similarity search. It even supports [pre-filtering with FAISS](https://opensearch.org/docs/latest/search-plugins/knn/filter-search-knn/). 113 | 114 | Let's look at [sqlite-vss](https://github.com/asg017/sqlite-vss) a bit closer. This article [A SQLite Extension for Vector Search](https://observablehq.com/@asg017/introducing-sqlite-vss) does an amazing job covering the creation of standard tables as well as virtual tables for embeddings and how to query them both. The LambdaRAG Demo follows all these patterns closely in our [`db/create.js`](https://github.com/metaskills/lambda-rag/blob/main/db/create.js) file. Our resulting schema is: 115 | 116 | ```sql 117 | CREATE TABLE products ( 118 | id INTEGER PRIMARY KEY, 119 | name TEXT, 120 | category TEXT, 121 | subCategory TEXT, 122 | description TEXT, 123 | embedding BLOB 124 | ); 125 | CREATE TABLE IF NOT EXISTS "vss_products_index"(rowid integer primary key autoincrement, idx); 126 | CREATE TABLE sqlite_sequence(name,seq); 127 | CREATE TABLE IF NOT EXISTS "vss_products_data"(rowid integer primary key autoincrement, _); 128 | CREATE VIRTUAL TABLE vss_products using vss0 ( 129 | embedding(1536) 130 | ); 131 | ``` 132 | 133 | If you want to re-create the SQLite database or build a custom dataset, you can do so by changing the `db/create.js` and running `npm run db:create`. This will drop the existing database and re-create it with data from any CSV file(s), supporting schema, or process you are willing to code up. 134 | 135 | ```shell 136 | > npm run db:create 137 | > lambda-rag@1.0.0 db:create 138 | > rm -rf db/lambdarag.db && node db/create.js 139 | Using sqlite-vss version: v0.1.1 140 | Inserting product data... 141 | ██████████████████████████████████░░░░░░ 84% | ETA: 2s | 4242/5001 142 | ``` 143 | 144 | Afterward you would need to run the `npm run db:embeddings` script which uses the OpenAI API to create embeddings for each product. This takes a few minutes to complete all the API calls. The task includes a local cache to make it faster to re-run. Lastly, there is a `npm run db:clean` script that calls a `VACUUM` on the DB to remove wasted space for the virtual tables. Again, all of this is only required if you want to re-create the database or build a custom dataset. There is a `./bin/setup-db` wrapper script that does all these steps for you. 145 | 146 | ## Retrieval with Function Calling 147 | 148 | OK, so we have a database of products and their matching vector embeddings to use for semantic search. How do we code up going from chat to retrieving items from the database? OpenAI has this amazing feature named [Function Calling](https://platform.openai.com/docs/guides/gpt/function-calling). In our demo, it allows the LLM to search for products and describe the results to you. 149 | 150 | Screenshot of the LambdaRAG Demo application. 151 | 152 | But how does it know? You simply describe an [array of functions](https://github.com/metaskills/lambda-rag/blob/main/src/utils/functions.json) that your application implments and during a chat completion API call. OpenAI will 1) automatically make a determination a function should be called 2) return the name of the function to call along with the needed parameters. Your request looks something like this. 153 | 154 | ```javascript 155 | await openai.chat.completions.create({ 156 | model: "gpt-3.5-turbo-16k", 157 | functions: '[{"search_products":{"parameters": {"query": "string"}}}]', 158 | messages: [ 159 | { role: "user", content: "I need a cool trucker hat." } 160 | ] 161 | }); 162 | ``` 163 | 164 | If a function has been selected, the response will include the name of the function and parameters. Your responsibility is to check for this, then call your application's code matching the function and parameters. For LambdaGPT, this will be querying the database and returning any matching rows. We do this in our [`src/models/products.js`](https://github.com/metaskills/lambda-rag/blob/main/src/models/products.js) file. 165 | 166 | For OpenAI to respond with the results, we send it another request that now has two additional messages included. The first is of type "function" and includes the name and parameters of the function you were asked to call. The second is of type "user" which includes the JSON data of the products returned from our retrieval process. OpenAI will now respond as if it has this knowledge all along! 167 | 168 | ```javascript 169 | await openai.chat.completions.create({ 170 | model: "gpt-3.5-turbo-16k", 171 | functions: '[{"search_products":{"parameters": {"query": "string"}}}]', 172 | messages: [ 173 | { role: "user", content: "I need a cool trucker hat." }, 174 | { role: "function", name: "search_products", content: '{"query":"trucker hats"}' }, 175 | { role: "user", content: '[{"id":3582,"name":"Mens Patagonia Logo Trucker Hat..."}]' }, 176 | ] 177 | }); 178 | ``` 179 | 180 | Since all messages are maintained in client-side state, you can see them using a neat debug technique. Open up the [`src-frontend/components/Message.vue`](https://github.com/metaskills/lambda-rag/blob/main/src-frontend/components/Message.vue) file and make the following change. 181 | 182 | ```diff 183 | 'border-b-base-300': true, 184 | 'bg-base-200': data.role === 'user', 185 | - 'hidden': data.hidden, 186 | + 'hidden': false, 187 | ``` 188 | 189 | You can now see all the messages' state in the UI. This is a great way to debug your application and see what is happening. 190 | 191 | Screenshot of the LambdaRAG Demo application. 192 | 193 | ## More To Explore 194 | 195 | I hope you found this quick overview of how OpenAI's chat completions can be augmented for knowledge retrieval. There is so much more to explore and do. Here are some ideas to get you started: 196 | 197 | - All responses are streamed from the server. The `fetchResponse` in the [`src-frontend/stores/messages.js`](https://github.com/metaskills/lambda-rag/blob/main/src-frontend/stores/messages.js) Pinia store does all the work here and manages client side state. 198 | - That same file also converts the streaming responses Markdown code into HTML. This is how the demo can build tables just like ChatGPT does. 199 | - Sometimes the keywords passed to the search products function can be sparse. Consider making an API call to extend the keywords of the query using the original message. You can use functions here too! 200 | - Consider adding more retrieval methods to the [`src/utils/functions.json`](https://github.com/metaskills/lambda-rag/blob/main/src/utils/functions.json) file. For example, a `find_style` by ID method that would directly query the database. 201 | 202 | ❤️ I hope you enjoyed these posts and find the LambdaRAG Demo application useful in learning how to use AI for knowledge retrieval. Feel free to ask questions and share your thoughts on this post. Thank you! 203 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | 2 | # To Dos 3 | 4 | - Change PromptMessage to link to final blog post. 5 | -------------------------------------------------------------------------------- /babel.config.js: -------------------------------------------------------------------------------- 1 | export default {}; 2 | -------------------------------------------------------------------------------- /bin/deploy: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # https://github.com/aws/aws-sam-cli/issues/2447 5 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 6 | IMAGE_REPOSITORY="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/lambda-rag" 7 | 8 | echo '== NPM Install (Dev) ==' 9 | npm install --no-audit --no-save 10 | 11 | echo '== Vite Build ==' 12 | NODE_ENV=production npm run v:build 13 | rm -rf public src-frontend 14 | mv dist public 15 | 16 | echo '== NPM Install (Prod) ==' 17 | rm -rf node_modules 18 | npm install --no-audit --no-save --omit=dev 19 | 20 | echo "== Cleanup Unused Files & Directories ==" 21 | rm -rf \ 22 | .git* \ 23 | log \ 24 | test \ 25 | tmp 26 | 27 | echo "== ECR Public Login ==" 28 | aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws 29 | 30 | echo "== SAM build ==" 31 | sam build 32 | 33 | echo "== SAM package ==" 34 | sam package \ 35 | --region "$AWS_REGION" \ 36 | --image-repository "$IMAGE_REPOSITORY" 37 | 38 | echo "== SAM deploy ==" 39 | sam deploy \ 40 | --region "$AWS_REGION" \ 41 | --stack-name "lambda-rag" \ 42 | --image-repository "$IMAGE_REPOSITORY" \ 43 | --capabilities "CAPABILITY_IAM" 44 | -------------------------------------------------------------------------------- /bin/server: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | ./bin/server-backend & 5 | ./bin/server-frontend & 6 | 7 | wait 8 | -------------------------------------------------------------------------------- /bin/server-backend: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Start backend express server. 5 | nodemon \ 6 | --watch src \ 7 | --ext 'js,yaml,json' \ 8 | --exec node src/app.js 9 | -------------------------------------------------------------------------------- /bin/server-frontend: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Start frontend vue dev server. 5 | npm run v:dev 6 | -------------------------------------------------------------------------------- /bin/setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | echo '== Installing dependencies ==' 5 | npm install 6 | 7 | if [ "$(uname -m)" = "aarch64" ] && [ "$(uname -s)" = "Linux" ]; then 8 | echo '== Installing sqlite3 for Linux arm64 ==' 9 | cp -r ./db/node/sqlite-vss-linux-arm64 ./node_modules 10 | fi 11 | -------------------------------------------------------------------------------- /bin/setup-db: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | npm run db:create 5 | npm run db:embeddings 6 | npm run db:clean 7 | npm run db:schema 8 | -------------------------------------------------------------------------------- /bin/test: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | export NODE_ENV="test" 5 | export NODE_OPTIONS=--experimental-vm-modules 6 | 7 | npx jest test 8 | -------------------------------------------------------------------------------- /db/README.md: -------------------------------------------------------------------------------- 1 | # LambdaRAG Data 2 | 3 | Use this directory to run scripts to pull data into a local SQLite DB file. This DB file is used for both the development & production environments. In a more formal setup, this file could be generated via a CI/CD pipeline and stored container package for easy installation. 4 | 5 | ## Luxury Apparel Data 6 | 7 | The db/Luxury_Products_Apparel_Data.csv comes from Kaggle. 8 | 9 | https://www.kaggle.com/datasets/chitwanmanchanda/luxury-apparel-data 10 | 11 | ## The Data File 12 | 13 | The `db/lambdarag.db` SQLite3 DB file is committed to this repo. It is ~30MB in size. If you want to create this file yourself from scratch, you can run the following command after creating a `.env.development.local` file with the your `OPENAI_API_KEY=` set. OpenAI's API will be used to create embeddings. 14 | 15 | ```bash 16 | ./bin/setup-db 17 | ``` 18 | 19 | This setup script will perform the following tasks: 20 | 21 | - `npm run db:create` - Create the `products` and `vss_products` tables using the Luxury data. 22 | - `npm run db:embeddings` - Create embeddings for each product using OpenAI's API. Caches results. 23 | - `npm run db:clean` - Removes the `embedding` shadow table and vacuums the DB to reduce size. 24 | - `npm run db:schema` - Generates a schema file for the DB. 25 | 26 | -------------------------------------------------------------------------------- /db/clean.js: -------------------------------------------------------------------------------- 1 | import { db } from "../src/utils/db.js"; 2 | 3 | db.exec("UPDATE products SET embedding = NULL"); 4 | db.exec("VACUUM"); 5 | -------------------------------------------------------------------------------- /db/create.js: -------------------------------------------------------------------------------- 1 | import { db } from "../src/utils/db.js"; 2 | import { readFileSync } from "fs"; 3 | import cliProgress from "cli-progress"; 4 | import Papa from "papaparse"; 5 | 6 | const vss_version = db.prepare("select vss_version()").pluck().get(); 7 | console.log(`Using sqlite-vss version: ${vss_version}\n`); 8 | 9 | /* ==== PRODUCTS ==== */ 10 | 11 | db.exec(` 12 | CREATE TABLE products ( 13 | id INTEGER PRIMARY KEY, 14 | name TEXT, 15 | category TEXT, 16 | subCategory TEXT, 17 | description TEXT, 18 | embedding BLOB 19 | ) 20 | `); 21 | 22 | db.exec(` 23 | CREATE VIRTUAL TABLE vss_products using vss0 ( 24 | embedding(1536) 25 | ) 26 | `); 27 | 28 | const luxuryFile = "./db/Luxury_Products_Apparel_Data.csv"; 29 | const luxuryData = readFileSync(luxuryFile, "utf-8"); 30 | 31 | const productsData = Papa.parse(luxuryData, { header: true }).data; 32 | const productsBar = new cliProgress.SingleBar( 33 | {}, 34 | cliProgress.Presets.shades_classic, 35 | ); 36 | const productsStmt = db.prepare(`INSERT INTO products ( 37 | id, 38 | name, 39 | category, 40 | subCategory, 41 | description 42 | ) VALUES (?, ?, ?, ?, ?)`); 43 | 44 | const insertProduct = async (p) => { 45 | let id = Number(p[""]); 46 | let productName = p["ProductName"]; 47 | let description = p["Description"]; 48 | if (!productName) { 49 | productsBar.increment(); 50 | return; 51 | } 52 | if (productName.startsWith('"') && productName.endsWith('"')) { 53 | productName = productName.slice(1, -1); 54 | } 55 | if (description.startsWith('"') && description.endsWith('"')) { 56 | description = description.slice(1, -1); 57 | } 58 | productsStmt.run( 59 | id, 60 | productName, 61 | p["Category"], 62 | p["SubCategory"], 63 | description, 64 | ); 65 | productsBar.increment(); 66 | }; 67 | 68 | console.log("Inserting product data..."); 69 | productsBar.start(productsData.length, 0); 70 | productsData.forEach((product) => { 71 | insertProduct(product); 72 | }); 73 | 74 | productsBar.stop(); 75 | db.close(); 76 | -------------------------------------------------------------------------------- /db/embedding.js: -------------------------------------------------------------------------------- 1 | import { mkdirSync } from "fs"; 2 | import "dotenv-flow/config"; 3 | import { db } from "../src/utils/db.js"; 4 | import { createEmbedding } from "../src/utils/openai.js"; 5 | import cliProgress from "cli-progress"; 6 | import PQueue from "p-queue"; 7 | import { caching } from "cache-manager"; 8 | import * as fsStore from "cache-manager-fs-hash"; 9 | 10 | mkdirSync("db/embeddings-cache", { recursive: true }); 11 | const fsStoreCache = fsStore.create({ 12 | path: "db/embeddings-cache", 13 | ttl: 31536000, 14 | subdirs: true, 15 | }); 16 | const embeddingsCache = await caching(fsStoreCache); 17 | 18 | /* ==== PRODUCTS ==== */ 19 | 20 | const productsQueue = new PQueue({ concurrency: 5, autoStart: false }); 21 | const productsBar = new cliProgress.SingleBar( 22 | {}, 23 | cliProgress.Presets.shades_classic, 24 | ); 25 | const productIds = db.prepare("SELECT id FROM products").pluck().all(); 26 | const productGetStmt = db.prepare("SELECT * FROM products WHERE id = ?"); 27 | const productUpdateStmt = db.prepare( 28 | "UPDATE products SET embedding = :embedding WHERE id = :id", 29 | ); 30 | 31 | const productEmbedding = async (productId) => { 32 | const p = productGetStmt.get(productId); 33 | const input = await `${p.name} ${p.category} ${p.description}`; 34 | const embedding = await embeddingsCache.wrap(input, () => 35 | createEmbedding(input), 36 | ); 37 | productUpdateStmt.run({ embedding: embedding, id: productId }); 38 | productsBar.increment(); 39 | }; 40 | 41 | productsQueue.add(() => { 42 | console.log("Inserting product embeddings..."); 43 | productsBar.start(productIds.length, 0); 44 | }); 45 | 46 | productsQueue.onIdle().then(() => { 47 | productsBar.stop(); 48 | console.log("Inserting into vss_products..."); 49 | db.exec(`DELETE FROM vss_products`); 50 | db.exec( 51 | `INSERT INTO vss_products(ROWID, embedding) SELECT ROWID, embedding FROM products`, 52 | ); 53 | }); 54 | 55 | productIds.forEach((productId) => { 56 | productsQueue.add(() => productEmbedding(productId)); 57 | }); 58 | 59 | productsQueue.start(); 60 | -------------------------------------------------------------------------------- /db/lambdarag.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/db/lambdarag.db -------------------------------------------------------------------------------- /db/node/sqlite-vss-linux-arm64/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # sqlite-vss-linux-arm64 4 | 5 | A `sqlite-vss` platform-specific package for `linux-arm64`. 6 | 7 | When `sqlite-vss` is installed and the host computer has a `linux` operating system with `arm64` architecture, then this package is downloaded with the pre-compiled SQLite extension bundled under `lib/vss0.so`. At runtime, the `sqlite-vss` package will resolve to this platform-specific package for use with [`better-sqlite3`](https://github.com/WiseLibs/better-sqlite3)' or [`node-sqlite3`](https://github.com/TryGhost/node-sqlite3). 8 | 9 | See the `sqlite-vss` package for more details. -------------------------------------------------------------------------------- /db/node/sqlite-vss-linux-arm64/lib/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/db/node/sqlite-vss-linux-arm64/lib/.gitkeep -------------------------------------------------------------------------------- /db/node/sqlite-vss-linux-arm64/lib/vector0.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/db/node/sqlite-vss-linux-arm64/lib/vector0.so -------------------------------------------------------------------------------- /db/node/sqlite-vss-linux-arm64/lib/vss0.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/db/node/sqlite-vss-linux-arm64/lib/vss0.so -------------------------------------------------------------------------------- /db/node/sqlite-vss-linux-arm64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "//": "Autogenerated by the npm_generate_platform_packages.sh script, do not edit by hand", 3 | "name": "sqlite-vss-linux-arm64", 4 | "version": "0.1.1", 5 | "repository": { 6 | "type": "git", 7 | "url": "https://github.com/asg017/sqlite-vss.git", 8 | "directory": "npm/sqlite-vss-linux-arm64" 9 | }, 10 | "author": "Alex Garcia ", 11 | "os": [ 12 | "linux" 13 | ], 14 | "cpu": [ 15 | "arm64" 16 | ] 17 | } -------------------------------------------------------------------------------- /db/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE products ( 2 | id INTEGER PRIMARY KEY, 3 | name TEXT, 4 | category TEXT, 5 | subCategory TEXT, 6 | description TEXT, 7 | embedding BLOB 8 | ); 9 | CREATE TABLE IF NOT EXISTS "vss_products_index"(rowid integer primary key autoincrement, idx); 10 | CREATE TABLE sqlite_sequence(name,seq); 11 | CREATE TABLE IF NOT EXISTS "vss_products_data"(rowid integer primary key autoincrement, _); 12 | CREATE VIRTUAL TABLE vss_products using vss0 ( 13 | embedding(1536) 14 | ); 15 | -------------------------------------------------------------------------------- /db/test.js: -------------------------------------------------------------------------------- 1 | import "dotenv-flow/config"; 2 | import { db } from "../src/utils/db.js"; 3 | import { createEmbedding } from "../src/utils/openai.js"; 4 | import inquirer from "inquirer"; 5 | 6 | const ask = await inquirer.prompt([ 7 | { 8 | name: "prompt", 9 | message: "Prompt >", 10 | }, 11 | ]); 12 | 13 | const embeddingQuery = db.prepare(` 14 | SELECT * FROM products WHERE ROWID IN ( 15 | SELECT ROWID FROM vss_products 16 | WHERE vss_search(embedding, vss_search_params(:embedding, 5)) 17 | ) 18 | `); 19 | 20 | const response = embeddingQuery.all({ 21 | embedding: await createEmbedding(ask.prompt), 22 | }); 23 | 24 | response.forEach((product) => { 25 | console.log(`ID: ${product.id}`); 26 | console.log(`Name: ${product.name}`); 27 | console.log(`Category: ${product.category}`); 28 | console.log(`SubCategory: ${product.subCategory}`); 29 | console.log(`Description: ${product.description}`); 30 | console.log(); 31 | }); 32 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | LambdaRAG 13 | 14 | 15 | 16 |
17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | testEnvironment: "node", 3 | transform: {}, 4 | testMatch: ["/test/**/*.test.js"], 5 | globalSetup: "./test/globalSetup.js", 6 | }; 7 | -------------------------------------------------------------------------------- /log/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/log/.keep -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lambda-rag", 3 | "version": "1.0.0", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "v:dev": "npx vite", 8 | "v:build": "npx vite build", 9 | "v:serve": "npx vite preview", 10 | "test": "./bin/test", 11 | "db:create": "rm -rf db/lambdarag.db && node db/create.js", 12 | "db:embeddings": "node db/embedding.js", 13 | "db:clean": "node db/clean.js", 14 | "db:test": "node db/test.js", 15 | "db:schema": "sqlite3 db/lambdarag.db .schema > db/schema.sql" 16 | }, 17 | "dependencies": { 18 | "better-sqlite3": "^8.5.0", 19 | "cors": "^2.8.5", 20 | "dotenv-flow": "^3.2.0", 21 | "express": "^4.18.2", 22 | "marked": "^7.0.2", 23 | "openai": "^4.0.0-beta.8", 24 | "sqlite-vss": "^0.1.1" 25 | }, 26 | "devDependencies": { 27 | "@babel/core": "^7.22.9", 28 | "@vitejs/plugin-vue": "^4.2.3", 29 | "babel-jest": "^29.6.2", 30 | "cache-manager": "^5.1.7", 31 | "cache-manager-fs-hash": "^1.0.0", 32 | "cli-progress": "^3.12.0", 33 | "daisyui": "^3.5.0", 34 | "got": "^11.8.3", 35 | "inquirer": "^9.2.8", 36 | "jest": "^29.6.2", 37 | "jest-environment-node": "^29.6.2", 38 | "nodemon": "^3.0.1", 39 | "p-queue": "^7.3.4", 40 | "papaparse": "^5.4.1", 41 | "pinia": "^2.1.6", 42 | "prettier": "^3.0.0", 43 | "supertest": "^6.3.3", 44 | "tailwindcss": "^3.3.3", 45 | "typescript": "^5.1.6", 46 | "vite": "^4.4.8", 47 | "vitest": "^0.34.1", 48 | "vue": "^3.3.4" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | plugins: { 3 | tailwindcss: {}, 4 | }, 5 | }; 6 | -------------------------------------------------------------------------------- /prettier.config.js: -------------------------------------------------------------------------------- 1 | export default {}; 2 | -------------------------------------------------------------------------------- /public/ai-knowledge-retrieval-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/ai-knowledge-retrieval-dark.png -------------------------------------------------------------------------------- /public/ai-knowledge-retrieval-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/ai-knowledge-retrieval-light.png -------------------------------------------------------------------------------- /public/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/android-chrome-192x192.png -------------------------------------------------------------------------------- /public/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/android-chrome-512x512.png -------------------------------------------------------------------------------- /public/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/apple-touch-icon.png -------------------------------------------------------------------------------- /public/avatar-assistant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/avatar-assistant.png -------------------------------------------------------------------------------- /public/avatar-function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/avatar-function.png -------------------------------------------------------------------------------- /public/avatar-user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/avatar-user.png -------------------------------------------------------------------------------- /public/context-window-siccos-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/context-window-siccos-dark.png -------------------------------------------------------------------------------- /public/context-window-siccos-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/context-window-siccos-light.png -------------------------------------------------------------------------------- /public/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/favicon-16x16.png -------------------------------------------------------------------------------- /public/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/favicon-32x32.png -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/favicon.ico -------------------------------------------------------------------------------- /public/lambda-rag-hats-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-hats-dark.png -------------------------------------------------------------------------------- /public/lambda-rag-hats-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-hats-light.png -------------------------------------------------------------------------------- /public/lambda-rag-hats-wfun-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-hats-wfun-dark.png -------------------------------------------------------------------------------- /public/lambda-rag-hats-wfun-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-hats-wfun-light.png -------------------------------------------------------------------------------- /public/lambda-rag-name-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-name-dark.png -------------------------------------------------------------------------------- /public/lambda-rag-name-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-name-light.png -------------------------------------------------------------------------------- /public/lambda-rag-start-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-start-dark.png -------------------------------------------------------------------------------- /public/lambda-rag-start-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/lambda-rag-start-light.png -------------------------------------------------------------------------------- /public/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/logo.png -------------------------------------------------------------------------------- /public/rags-to-riches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/public/rags-to-riches.png -------------------------------------------------------------------------------- /public/site.webmanifest: -------------------------------------------------------------------------------- 1 | {"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} -------------------------------------------------------------------------------- /src-frontend/App.css: -------------------------------------------------------------------------------- 1 | .markdown { 2 | max-width: none; 3 | } 4 | 5 | .markdown h1, 6 | .markdown h2 { 7 | font-weight: 600; 8 | } 9 | 10 | .markdown h2 { 11 | margin-bottom: 1rem; 12 | margin-top: 2rem; 13 | } 14 | 15 | .markdown h3 { 16 | font-weight: 600; 17 | } 18 | 19 | .markdown h3, 20 | .markdown h4 { 21 | margin-bottom: 0.5rem; 22 | margin-top: 1rem; 23 | } 24 | 25 | .markdown h4 { 26 | font-weight: 400; 27 | } 28 | 29 | .markdown h5 { 30 | font-weight: 600; 31 | } 32 | 33 | .markdown p { 34 | margin-bottom: 1rem; 35 | } 36 | 37 | .markdown table { 38 | margin-bottom: 1rem; 39 | } 40 | 41 | .markdown *:last-child { 42 | margin-bottom: 0rem; 43 | } 44 | 45 | .markdown blockquote { 46 | --tw-border-opacity: 1; 47 | border-color: rgba(142, 142, 160, var(--tw-border-opacity)); 48 | border-left-width: 2px; 49 | line-height: 1.5rem; 50 | margin: 0; 51 | padding-bottom: 0.5rem; 52 | padding-left: 1rem; 53 | padding-top: 0.5rem; 54 | } 55 | 56 | .markdown blockquote > p { 57 | margin: 0; 58 | } 59 | 60 | .markdown blockquote > p:after, 61 | .markdown blockquote > p:before { 62 | display: none; 63 | } 64 | 65 | .markdown ol { 66 | counter-reset: list-number; 67 | display: flex; 68 | flex-direction: column; 69 | list-style-type: none; 70 | padding-left: 0; 71 | } 72 | 73 | .markdown ol > li { 74 | counter-increment: list-number; 75 | display: block; 76 | margin-bottom: 0; 77 | margin-top: 0; 78 | min-height: 28px; 79 | } 80 | 81 | .markdown ol > li:before { 82 | --tw-translate-x: -100%; 83 | --tw-text-opacity: 1; 84 | color: rgba(142, 142, 160, var(--tw-text-opacity)); 85 | content: counters(list-number, ".") "."; 86 | padding-right: 0.5rem; 87 | position: absolute; 88 | -webkit-transform: translate(var(--tw-translate-x), var(--tw-translate-y)) 89 | rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) 90 | scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); 91 | transform: translate(var(--tw-translate-x), var(--tw-translate-y)) 92 | rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) 93 | scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); 94 | } 95 | 96 | :is(.dark .markdown ol > li):before { 97 | --tw-text-opacity: 1; 98 | color: rgba(172, 172, 190, var(--tw-text-opacity)); 99 | } 100 | 101 | .markdown ul, 102 | .markdown ol { 103 | display: flex; 104 | flex-direction: column; 105 | padding-left: 2rem; 106 | margin-bottom: 1rem; 107 | } 108 | 109 | .markdown ul > li { 110 | display: block; 111 | margin: 0; 112 | min-height: 28px; 113 | } 114 | 115 | .markdown ul > li:before { 116 | --tw-translate-x: -100%; 117 | content: "•"; 118 | font-size: 0.875rem; 119 | line-height: 1.25rem; 120 | padding-right: 0.5rem; 121 | position: absolute; 122 | -webkit-transform: translate(var(--tw-translate-x), var(--tw-translate-y)) 123 | rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) 124 | scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); 125 | transform: translate(var(--tw-translate-x), var(--tw-translate-y)) 126 | rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) 127 | scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); 128 | } 129 | 130 | .markdown ol li > p, 131 | .markdown ol li > pre, 132 | .markdown ul li > p, 133 | .markdown ul li > pre { 134 | margin: 0; 135 | } 136 | 137 | .markdown ol ol, 138 | .markdown ol ul, 139 | .markdown ul ol, 140 | .markdown ul ul { 141 | margin-bottom: 0; 142 | margin-left: 1rem; 143 | margin-top: 0; 144 | } 145 | 146 | .markdown table { 147 | --tw-border-spacing-x: 0px; 148 | --tw-border-spacing-y: 0px; 149 | border-collapse: separate; 150 | border-spacing: var(--tw-border-spacing-x) var(--tw-border-spacing-y); 151 | width: 100%; 152 | } 153 | 154 | .markdown th { 155 | background-color: rgba(236, 236, 241, 0.2); 156 | border-bottom-width: 1px; 157 | border-left-width: 1px; 158 | border-top-width: 1px; 159 | padding: 0.25rem 0.75rem; 160 | } 161 | 162 | .markdown th:first-child { 163 | border-top-left-radius: 0.375rem; 164 | } 165 | 166 | .markdown th:last-child { 167 | border-right-width: 1px; 168 | border-top-right-radius: 0.375rem; 169 | } 170 | 171 | .markdown td { 172 | border-bottom-width: 1px; 173 | border-left-width: 1px; 174 | padding: 0.25rem 0.75rem; 175 | } 176 | 177 | .markdown td:last-child { 178 | border-right-width: 1px; 179 | } 180 | 181 | .markdown tbody tr:last-child td:first-child { 182 | border-bottom-left-radius: 0.375rem; 183 | } 184 | 185 | .markdown tbody tr:last-child td:last-child { 186 | border-bottom-right-radius: 0.375rem; 187 | } 188 | 189 | .markdown a { 190 | text-decoration-line: underline; 191 | text-underline-offset: 2px; 192 | } 193 | -------------------------------------------------------------------------------- /src-frontend/App.vue: -------------------------------------------------------------------------------- 1 | 5 | 6 | 19 | 20 | 23 | -------------------------------------------------------------------------------- /src-frontend/components/Message.vue: -------------------------------------------------------------------------------- 1 | 8 | 9 | 39 | 40 | 42 | -------------------------------------------------------------------------------- /src-frontend/components/Messages.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | 30 | 31 | 33 | -------------------------------------------------------------------------------- /src-frontend/components/Prompt.vue: -------------------------------------------------------------------------------- 1 | 37 | 38 | 83 | 84 | 98 | -------------------------------------------------------------------------------- /src-frontend/components/PromptMessage.vue: -------------------------------------------------------------------------------- 1 | 3 | 4 | 13 | 14 | 20 | -------------------------------------------------------------------------------- /src-frontend/components/Welcome.vue: -------------------------------------------------------------------------------- 1 | 2 | 3 | 17 | 18 | 20 | -------------------------------------------------------------------------------- /src-frontend/main.css: -------------------------------------------------------------------------------- 1 | @import "tailwindcss/base"; 2 | @import "tailwindcss/components"; 3 | @import "tailwindcss/utilities"; 4 | -------------------------------------------------------------------------------- /src-frontend/main.js: -------------------------------------------------------------------------------- 1 | import { createPinia } from "pinia"; 2 | import { createApp } from "vue"; 3 | import App from "@/App.vue"; 4 | import "@/main.css"; 5 | 6 | const pinia = createPinia(); 7 | const app = createApp(App); 8 | 9 | app.use(pinia).mount("#app"); 10 | -------------------------------------------------------------------------------- /src-frontend/stores/messages.js: -------------------------------------------------------------------------------- 1 | import { defineStore } from "pinia"; 2 | import { backendHost } from "@/utils/backend.js"; 3 | import { rolePrompt } from "@/utils/roleprompt.js"; 4 | import { functionCaller } from "@/utils/functionCaller.js"; 5 | import { marked } from "marked"; 6 | 7 | export const useMessagesStore = defineStore({ 8 | id: "messages", 9 | state: () => ({ 10 | messages: [], 11 | loading: false, 12 | loadingState: false, 13 | }), 14 | actions: { 15 | add(message) { 16 | if (this.isEmpty) { 17 | this.messages.push({ 18 | role: "user", 19 | content: rolePrompt(message.content), 20 | contentDisplay: message.content, 21 | }); 22 | } else { 23 | this.messages.push(message); 24 | } 25 | }, 26 | remove(message) { 27 | const index = this.messages.indexOf(message); 28 | this.messages.splice(index, 1); 29 | }, 30 | async fetchResponse() { 31 | this.loading = true; 32 | this.loadedStateChange(); 33 | const messages = this.messagesForAPI; 34 | this.add({ role: "assistant", content: "..." }); 35 | const response = await fetch(backendHost("/messages"), { 36 | method: "POST", 37 | headers: { 38 | "Content-Type": "application/json", 39 | }, 40 | body: JSON.stringify({ messages: messages }), 41 | }); 42 | const functionCall = 43 | response.headers.get("content-type") === "text/function-call"; 44 | if (functionCall) { 45 | this.add({ role: "function", content: "", name: "", hidden: true }); 46 | } 47 | const reader = response.body.getReader(); 48 | const message = functionCall 49 | ? this.lastMessageOfRole("function") 50 | : this.lastMessageOfRole("assistant"); 51 | let firstRead = true; 52 | while (true) { 53 | if (firstRead) { 54 | message.content = ""; 55 | } 56 | firstRead = false; 57 | const { done, value } = await reader.read(); 58 | if (done) { 59 | this.loading = false; 60 | this.loadedStateChange(); 61 | break; 62 | } 63 | const content = new TextDecoder().decode(value); 64 | message.content += content; 65 | message.contentDisplay = marked.parse(message.content); 66 | this.loadedStateChange(); 67 | } 68 | if (functionCall) { 69 | const functionData = JSON.parse(message.content); 70 | const functionArgs = JSON.parse(functionData.arguments); 71 | message.name = functionData.name; 72 | message.content = JSON.stringify(functionArgs); 73 | message.contentDisplay = `${message.name}: ${message.content}`; 74 | const embedding = await functionCaller(message); 75 | this.remove(this.lastMessageOfRole("assistant")); 76 | this.add({ 77 | role: "user", 78 | content: JSON.stringify(embedding), 79 | hidden: true, 80 | }); 81 | this.fetchResponse(); 82 | } 83 | }, 84 | loadedStateChange() { 85 | this.loadingState = !this.loadingState; 86 | }, 87 | lastMessageOfRole(role) { 88 | return Array.from(this.messages) 89 | .reverse() 90 | .find((message) => message.role === role); 91 | }, 92 | }, 93 | getters: { 94 | isEmpty() { 95 | return this.messages.length === 0; 96 | }, 97 | isLoading() { 98 | return this.loading; 99 | }, 100 | messagesForAPI() { 101 | return this.messages.map((m) => { 102 | const apiMessage = { role: m.role, content: m.content }; 103 | if (m.role === "function") { 104 | apiMessage.name = m.name; 105 | } 106 | return apiMessage; 107 | }); 108 | }, 109 | }, 110 | }); 111 | -------------------------------------------------------------------------------- /src-frontend/utils/backend.js: -------------------------------------------------------------------------------- 1 | export function backendHost(path) { 2 | if (process.env.NODE_ENV === "production") { 3 | return `${path}`; 4 | } else { 5 | return `http://localhost:8080${path}`; 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src-frontend/utils/functionCaller.js: -------------------------------------------------------------------------------- 1 | import { backendHost } from "@/utils/backend.js"; 2 | 3 | export const functionCaller = async (message) => { 4 | const queryParams = JSON.parse(message.content); 5 | const queryString = new URLSearchParams(queryParams).toString(); 6 | const pathParams = `/functions/${message.name}?${queryString}`; 7 | const response = await fetch(backendHost(pathParams), { 8 | method: "GET", 9 | headers: { "Content-Type": "application/json" }, 10 | }); 11 | return response.json(); 12 | }; 13 | -------------------------------------------------------------------------------- /src-frontend/utils/roleprompt.js: -------------------------------------------------------------------------------- 1 | export const rolePrompt = (content) => { 2 | return ` 3 | Please Follow These Rules: 4 | 5 | - Act as an Luxury Apparel chat bot (RAG) retrieval augmented generative. 6 | - You have access to detailed data of Luxury Apparel. Information like Product Name, Product Category, Product Subcategory, and Product Description. 7 | - Your objective is to help demonstrate the use of Lambda using the Luxury Apparel data. 8 | - Use a voice that is welcoming yet business/sales oriented. Try to be concise. 9 | - Say 'I don't have data on that' when the question can't be answered. 10 | 11 | USER QUESTION: 12 | 13 | ${content}`; 14 | }; 15 | -------------------------------------------------------------------------------- /src/app.js: -------------------------------------------------------------------------------- 1 | import env from "./env.js"; 2 | import url from "url"; 3 | import path from "path"; 4 | import express from "express"; 5 | import cors from "cors"; 6 | import requestLoggingMiddleware from "./middleware/logger.js"; 7 | import { messagesRouter } from "./routes/messages.js"; 8 | import { functionsRouter } from "./routes/functions.js"; 9 | 10 | const __filename = url.fileURLToPath(import.meta.url); 11 | const __dirname = path.dirname(__filename); 12 | 13 | const app = express(); 14 | app.use(express.json()); 15 | app.use(requestLoggingMiddleware); 16 | app.get("/is_it_up", (_req, res, _next) => { 17 | res.json({ code: 200, status: "ok" }); 18 | }); 19 | 20 | if (env.isDevelopment) { 21 | app.use( 22 | cors({ 23 | origin: "http://localhost:5173", 24 | methods: "GET,HEAD,PUT,PATCH,POST,DELETE", 25 | credentials: true, 26 | optionsSuccessStatus: 204, 27 | }), 28 | ); 29 | } 30 | 31 | app.use("/messages", messagesRouter); 32 | app.use("/functions", functionsRouter); 33 | 34 | app.use(express.static(path.join(__dirname, "../public"))); 35 | app.use(express.static("/tmp")); 36 | 37 | app.listen(8080, () => { 38 | console.log("Listening on port: 8080"); 39 | }); 40 | 41 | export default app; 42 | -------------------------------------------------------------------------------- /src/env.js: -------------------------------------------------------------------------------- 1 | import "dotenv-flow/config"; 2 | 3 | const STAGE = process.env.NODE_ENV || "development"; 4 | 5 | class Env { 6 | constructor() { 7 | this.stage = STAGE; 8 | } 9 | 10 | get isDevelopment() { 11 | return this.stage === "development"; 12 | } 13 | 14 | get isTest() { 15 | return this.stage === "test"; 16 | } 17 | 18 | get isProduction() { 19 | return this.stage === "prod"; 20 | } 21 | } 22 | 23 | export default new Env(); 24 | -------------------------------------------------------------------------------- /src/middleware/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/src/middleware/.keep -------------------------------------------------------------------------------- /src/middleware/logger.js: -------------------------------------------------------------------------------- 1 | export default (req, _res, next) => { 2 | console.log("INCOMING REQUEST", req.method, req.url); 3 | console.log("INCOMING REQUEST HEADERS", JSON.stringify(req.headers, null, 2)); 4 | if (req.body && Object.keys(req.body).length > 0) { 5 | console.log("INCOMING REQUEST BODY", JSON.stringify(req.body, null, 2)); 6 | } 7 | next(); 8 | }; 9 | -------------------------------------------------------------------------------- /src/models/products.js: -------------------------------------------------------------------------------- 1 | import { db } from "../utils/db.js"; 2 | import { createEmbedding } from "../utils/openai.js"; 3 | 4 | export const searchProducts = async (params) => { 5 | const embedding = await createEmbedding(params.query); 6 | const binds = { limit: params.limit || 5, embedding: embedding }; 7 | let sql = ` 8 | SELECT * FROM products 9 | WHERE ROWID IN ( 10 | SELECT ROWID FROM vss_products 11 | WHERE vss_search(embedding, vss_search_params(:embedding, :limit)) 12 | ) 13 | `; 14 | if (params.category) { 15 | binds.category = params.category; 16 | sql += "AND category = :category\n"; 17 | } 18 | sql += "LIMIT :limit"; 19 | return db.prepare(sql).all(binds); 20 | }; 21 | -------------------------------------------------------------------------------- /src/routes/functions.js: -------------------------------------------------------------------------------- 1 | import express from "express"; 2 | import { searchProducts } from "../models/products.js"; 3 | 4 | export const functionsRouter = express.Router(); 5 | 6 | functionsRouter.get("/:functionName", async (req, res) => { 7 | const functionName = req.params.functionName; 8 | switch (functionName) { 9 | case "search_products": 10 | res.json(await searchProducts(req.query)); 11 | break; 12 | default: 13 | res.status(404).send("Function not found"); 14 | } 15 | }); 16 | -------------------------------------------------------------------------------- /src/routes/messages.js: -------------------------------------------------------------------------------- 1 | import express from "express"; 2 | import { openai } from "../utils/openai.js"; 3 | import { functions } from "../utils/functions.js"; 4 | 5 | export const messagesRouter = express.Router(); 6 | 7 | messagesRouter.post("", async (req, res, next) => { 8 | let functionCall, firstPartStreamed; 9 | const stream = await openai.chat.completions.create({ 10 | model: "gpt-3.5-turbo-16k", 11 | messages: req.body.messages, 12 | stream: true, 13 | functions: functions, 14 | }); 15 | res.setHeader("Content-Type", "text/plain"); 16 | res.setHeader("Transfer-Encoding", "chunked"); 17 | for await (const part of stream) { 18 | if ( 19 | !firstPartStreamed && 20 | !functionCall && 21 | part.choices[0]?.delta.function_call 22 | ) { 23 | functionCall = part.choices[0]?.delta.function_call; 24 | res.setHeader("Content-Type", "text/function-call"); 25 | } else { 26 | if (functionCall) { 27 | functionCall.arguments += 28 | part.choices[0]?.delta.function_call?.arguments || ""; 29 | } else { 30 | res.write(part.choices[0]?.delta.content || ""); 31 | } 32 | } 33 | firstPartStreamed = true; 34 | } 35 | if (functionCall) { 36 | res.write(JSON.stringify(functionCall)); 37 | } 38 | res.end(); 39 | }); 40 | -------------------------------------------------------------------------------- /src/utils/db.js: -------------------------------------------------------------------------------- 1 | import Database from "better-sqlite3"; 2 | import * as sqlite_vss from "sqlite-vss"; 3 | 4 | import url from "url"; 5 | import path from "path"; 6 | const __filename = url.fileURLToPath(import.meta.url); 7 | const __dirname = path.dirname(__filename); 8 | 9 | const dbFile = path.join(__dirname, "../../db/lambdarag.db"); 10 | export const db = new Database(dbFile, { 11 | readonly: !!process.env.AWS_EXECUTION_ENV, 12 | }); 13 | sqlite_vss.load(db); 14 | -------------------------------------------------------------------------------- /src/utils/functions.js: -------------------------------------------------------------------------------- 1 | import url from "url"; 2 | import path from "path"; 3 | const __filename = url.fileURLToPath(import.meta.url); 4 | const __dirname = path.dirname(__filename); 5 | import { readFileSync } from "fs"; 6 | 7 | const functionsFile = path.join(__dirname, "./functions.json"); 8 | const functions = JSON.parse(readFileSync(functionsFile, "utf8")); 9 | 10 | export { functions }; 11 | -------------------------------------------------------------------------------- /src/utils/functions.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "search_products", 4 | "description": "Vector search all products. Uses a product's name, heading, description, details, primary category name, variation info, available sizes, and sizing info", 5 | "parameters": { 6 | "type": "object", 7 | "properties": { 8 | "query": { 9 | "type": "string", 10 | "description": "The vector query to search for" 11 | }, 12 | "limit": { 13 | "type": "integer", 14 | "description": "The number of products to return. Defaults to 5", 15 | "default": 5 16 | }, 17 | "category": { 18 | "type": "string", 19 | "description": "The root category name to focus search results on", 20 | "enum": [ 21 | "Accessories", 22 | "Activewear", 23 | "Jackets/Coats", 24 | "Jewelry", 25 | "Pants", 26 | "Shirts", 27 | "Shoes", 28 | "Suits", 29 | "Sweaters", 30 | "Underwear and Nightwear" 31 | ] 32 | } 33 | }, 34 | "required": ["query"] 35 | } 36 | } 37 | ] 38 | -------------------------------------------------------------------------------- /src/utils/openai.js: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "openai"; 2 | 3 | export const openai = new OpenAI({ 4 | apiKey: process.env.OPENAI_API_KEY, 5 | }); 6 | 7 | export const createEmbedding = async (query) => { 8 | const response = await openai.embeddings.create({ 9 | model: "text-embedding-ada-002", 10 | input: query, 11 | }); 12 | return JSON.stringify(response.data[0].embedding); 13 | }; 14 | -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | export default { 3 | content: ["./index.html", "./src-frontend/**/*.{vue,js}"], 4 | theme: { 5 | extend: {}, 6 | }, 7 | plugins: [require("daisyui")], 8 | daisyui: { 9 | themes: ["light", "dark"], 10 | }, 11 | }; 12 | -------------------------------------------------------------------------------- /template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: LambdaRAG - RAG Demo 4 | 5 | Globals: 6 | Function: 7 | Architectures: 8 | - arm64 9 | AutoPublishAlias: live 10 | DeploymentPreference: 11 | Type: AllAtOnce 12 | Environment: 13 | Variables: 14 | NODE_ENV: production 15 | AWS_LWA_PORT: 8080 16 | AWS_LWA_READINESS_CHECK_PATH: /is_it_up 17 | AWS_LWA_INVOKE_MODE: response_stream 18 | LD_PRELOAD: /opt/lib/libcrypteia.so 19 | SECRET: x-crypteia-ssm:/lambda-rag/OPENAI_API_KEY 20 | Timeout: 120 21 | 22 | Resources: 23 | Lambda: 24 | Type: AWS::Serverless::Function 25 | Metadata: 26 | Dockerfile: Dockerfile 27 | DockerContext: . 28 | Properties: 29 | FunctionUrlConfig: 30 | AuthType: NONE 31 | InvokeMode: RESPONSE_STREAM 32 | MemorySize: 1792 33 | PackageType: Image 34 | Policies: 35 | - SSMParameterReadPolicy: 36 | ParameterName: lambda-rag/OPENAI_API_KEY 37 | 38 | Outputs: 39 | LambdaRAGLambdaArn: 40 | Description: Lambda Function Arn 41 | Value: !GetAtt Lambda.Arn 42 | LambdaRAGInvokeUrl: 43 | Description: Lambda Function URL 44 | Value: !GetAtt LambdaUrl.FunctionUrl 45 | -------------------------------------------------------------------------------- /test/globalSetup.js: -------------------------------------------------------------------------------- 1 | import env from "../src/env.js"; 2 | 3 | process.env.STAGE_NAME = "test"; 4 | process.env.NODE_ENV = "test"; 5 | 6 | export default function () {} 7 | -------------------------------------------------------------------------------- /test/helper.js: -------------------------------------------------------------------------------- 1 | export default { 2 | helpers: {}, 3 | }; 4 | -------------------------------------------------------------------------------- /test/models/env.test.js: -------------------------------------------------------------------------------- 1 | import env from "../../src/env"; 2 | 3 | let envStage; 4 | 5 | beforeEach(() => { 6 | envStage = env.stage; 7 | }); 8 | 9 | afterEach(() => { 10 | env.stage = envStage; 11 | }); 12 | 13 | test("isDevelopment", () => { 14 | env.stage = "development"; 15 | expect(env.isDevelopment).toBeTruthy(); 16 | }); 17 | 18 | test("isProduction", () => { 19 | env.stage = "prod"; 20 | expect(env.isProduction).toBeTruthy(); 21 | }); 22 | -------------------------------------------------------------------------------- /tmp/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metaskills/lambda-rag/e524756c58a646870aed0bf0af5229d7ac1fe657/tmp/.keep -------------------------------------------------------------------------------- /vite.config.js: -------------------------------------------------------------------------------- 1 | import { fileURLToPath, URL } from "node:url"; 2 | 3 | import { defineConfig } from "vite"; 4 | import vue from "@vitejs/plugin-vue"; 5 | 6 | export default defineConfig({ 7 | plugins: [vue()], 8 | resolve: { 9 | alias: { 10 | "@": fileURLToPath(new URL("./src-frontend", import.meta.url)), 11 | }, 12 | }, 13 | }); 14 | -------------------------------------------------------------------------------- /vitest.config.js: -------------------------------------------------------------------------------- 1 | import { fileURLToPath } from "node:url"; 2 | import { mergeConfig, defineConfig } from "vite"; 3 | import { configDefaults } from "vitest/config"; 4 | import viteConfig from "./vite.config"; 5 | 6 | export default mergeConfig( 7 | viteConfig, 8 | defineConfig({ 9 | test: { 10 | environment: "jsdom", 11 | exclude: [...configDefaults.exclude, "e2e/*"], 12 | root: fileURLToPath(new URL("./", import.meta.url)), 13 | transformMode: { 14 | web: [/\.[jt]sx$/], 15 | }, 16 | }, 17 | }), 18 | ); 19 | --------------------------------------------------------------------------------