Query my tweets
35 | 44 |Try some sample questions like...
50 |-
51 |
- What television shows does the author watch? 52 |
- Does the author like dogs? 53 |
- How does the author feel about web frameworks? 54 |
├── .gitignore ├── 1_import.py ├── 2_load_and_index.py ├── 3_query.py ├── README.md ├── docs ├── 1_create.png ├── 2_mongo.png ├── 3_vcore.png ├── 4_configure.png ├── 5_networking.png ├── 6_confirm.png ├── 7_connection.png ├── 8_env_vars.png └── 9_ip_addresses.png ├── flask_app ├── app.py └── requirements.txt ├── next_app ├── .gitignore ├── README.md ├── app │ ├── favicon.ico │ ├── globals.css │ ├── layout.js │ ├── page.js │ └── page.module.css ├── jsconfig.json ├── next.config.js ├── package-lock.json ├── package.json └── public │ ├── next.svg │ └── vercel.svg ├── poetry.lock ├── pyproject.toml └── tinytweets.json /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .env 3 | tweets.json 4 | fewertweets.json 5 | __pycache__ 6 | -------------------------------------------------------------------------------- /1_import.py: -------------------------------------------------------------------------------- 1 | ## This script imports the tinytweets.json file into your Cosmos database 2 | ## It will work for any json file containing a single array of objects 3 | ## There's nothing specific to llamaindex going on here 4 | ## You can get your data into mongo any way you like. 5 | 6 | json_file = 'tweets.json' 7 | 8 | # Load environment variables from local .env file 9 | from dotenv import load_dotenv 10 | load_dotenv() 11 | 12 | import os 13 | import json 14 | from pymongo.mongo_client import MongoClient 15 | 16 | # Load the tweets from a local file 17 | with open(json_file, 'r') as f: 18 | tweets = json.load(f) 19 | 20 | # Create a new client and connect to the server 21 | client = MongoClient(os.getenv('MONGODB_URI')) 22 | db = client[os.getenv("MONGODB_DATABASE")] 23 | collection = db[os.getenv("MONGODB_COLLECTION")] 24 | 25 | # Insert the tweets into mongo 26 | collection.insert_many(tweets) 27 | -------------------------------------------------------------------------------- /2_load_and_index.py: -------------------------------------------------------------------------------- 1 | ## This script loads data from a mongo database into an index 2 | ## This will convert all the documents in the database into vectors 3 | ## which requires a call to OpenAI for each one, so it can take some time. 4 | ## Once the data is indexed, it will be stored as a new collection in mongodb 5 | ## and you can query it without having to re-index every time. 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | # This will turn on really noisy logging if you want it, but it will slow things down 10 | # import logging 11 | # import sys 12 | # logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 13 | # logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) 14 | 15 | import os 16 | from llama_index.readers.mongo import SimpleMongoReader 17 | from pymongo.mongo_client import MongoClient 18 | from llama_index.vector_stores.azurecosmosmongo import AzureCosmosDBMongoDBVectorSearch 19 | from llama_index.indices.vector_store.base import VectorStoreIndex 20 | from llama_index.storage.storage_context import StorageContext 21 | 22 | # load objects from mongo and convert them into LlamaIndex Document objects 23 | # llamaindex has a special class that does this for you 24 | # it pulls every object in a given collection 25 | query_dict = {} 26 | reader = SimpleMongoReader(uri=os.getenv("MONGODB_URI")) 27 | documents = reader.load_data( 28 | os.getenv("MONGODB_DATABASE"), 29 | os.getenv("MONGODB_COLLECTION"), # this is the collection where the objects you loaded in 1_import got stored 30 | field_names=["full_text"], # these is a list of the top-level fields in your objects that will be indexed 31 | # make sure your objects have a field called "full_text" or that you change this value 32 | query_dict=query_dict # this is a mongo query dict that will filter your data if you don't want to index everything 33 | ) 34 | 35 | # Create a new client and connect to the server 36 | client = MongoClient(os.getenv("MONGODB_URI")) 37 | 38 | # create Atlas as a vector store 39 | store = AzureCosmosDBMongoDBVectorSearch( 40 | client, 41 | db_name=os.getenv('MONGODB_DATABASE'), 42 | collection_name=os.getenv('MONGODB_VECTORS'), # this is where your embeddings will be stored 43 | index_name=os.getenv('MONGODB_VECTOR_INDEX') # this is the name of the index you will need to create 44 | ) 45 | 46 | # now create an index from all the Documents and store them in Atlas 47 | storage_context = StorageContext.from_defaults(vector_store=store) 48 | index = VectorStoreIndex.from_documents( 49 | documents, storage_context=storage_context, 50 | show_progress=True, # this will show you a progress bar as the embeddings are created 51 | ) 52 | 53 | # you can't query your index yet because you need to create a vector search index in mongodb's UI now 54 | -------------------------------------------------------------------------------- /3_query.py: -------------------------------------------------------------------------------- 1 | ## This shows how to load your pre-indexed data from mongo and query it 2 | ## Note that you MUST manually create a vector search index before this will work 3 | ## and you must pass in the name of that index when connecting to Mongodb below 4 | from dotenv import load_dotenv 5 | load_dotenv() 6 | 7 | # Turns on really noisy logging 8 | import logging 9 | import sys 10 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 11 | logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) 12 | 13 | import os 14 | from pymongo.mongo_client import MongoClient 15 | from llama_index.vector_stores.azurecosmosmongo import AzureCosmosDBMongoDBVectorSearch 16 | from llama_index.indices.vector_store.base import VectorStoreIndex 17 | 18 | # Create a new client and connect to the server 19 | client = MongoClient(os.getenv("MONGODB_URI")) 20 | 21 | # connect to Atlas as a vector store 22 | store = AzureCosmosDBMongoDBVectorSearch( 23 | client, 24 | db_name=os.getenv('MONGODB_DATABASE'), # this is the database where you stored your embeddings 25 | collection_name=os.getenv('MONGODB_VECTORS'), # this is where your embeddings were stored in 2_load_and_index.py 26 | index_name=os.getenv('MONGODB_VECTOR_INDEX') # this is the name of the index you created after loading your data 27 | ) 28 | index = VectorStoreIndex.from_vector_store(store) 29 | 30 | # query your data! 31 | # here we have customized the number of documents returned per query to 20, because tweets are really short 32 | query_engine = index.as_query_engine(similarity_top_k=20) 33 | response = query_engine.query("What does the author think of web frameworks?") 34 | print(response) 35 | 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LlamaIndex retrieval augmented generation 2 | ## with Azure Cosmos DB, Flask and Next.js 3 | 4 | See this demo [in action](https://mongodb-demo-frontend.onrender.com/)! 5 | 6 | ## What is this? 7 | 8 | LlamaIndex is an open-source framework that lets you build AI applications powered by large language models (LLMs) like OpenAI's GPT-4. This application is a demonstration of how to do that, starting from scratch to a fully deployed web application. We'll show you how to run everything in this repo and then start customizing it for your own needs. 9 | 10 | In this example, we'll be using Azure Cosmos DB for MongoDB as a data store and a vector store. Our back end will be a Python API powered by Flask, and our front end will be a JavaScript web app written in Next.js. 11 | 12 | The app will load a series of tweets (loaded from a Twitter archive export) and then index them to answer questions about the author and their opinions. 13 | 14 | ## What we'll be doing 15 | 16 | The basic steps of this demo are: 17 | * Get data from a JSON file into a Mongo database (this step is optional if you already have data in Mongo) 18 | * Index the data using LlamaIndex. This will use OpenAI's gpt-3.5-turbo under the hood and convert your text into [vector embeddings](https://docs.llamaindex.ai/en/stable/understanding/indexing/indexing.html#what-is-an-embedding), so you'll need an OpenAI API key, and it can take some time depending how much data you have 19 | * Store the embedded data back into MongoDB. LlamaIndex will do this for you automatically. 20 | * Create a Vector Search Index in MongoDB. This is a manual step you'll need to perform in the MongoDB UI. 21 | * Query the data! This will demonstrate that the data is now queryable. Then you'll want to build an app on top of it. 22 | * Set up a Flask API in Python to answer questions about the data, hosted on [Render](https://render.com). 23 | * Set up a Next.js front-end in JavaScript, also hosted on Render. This will accept user questions, pass them to the API, and display the results. 24 | 25 | ## Instructions 26 | 27 | You can either use these instructions as a tutorial and rebuild the application from scratch, or clone the repo and use it as a template. 28 | 29 | ### Before you begin 30 | 31 | We'll assume you have the current version of Python installed (3.11.6 or better), as well as Node.js for the front-end (version 20 or better) and git for source control. 32 | 33 | ### Get the code 34 | 35 | First clone this repo 36 | 37 | ``` 38 | git clone git@github.com:run-llama/azure-cosmos-db-demo.git 39 | ``` 40 | 41 | ### Create an Azure Cosmos DB for MongoDB cluster 42 | 43 | We'll be using the hosted database service, Azure Cosmos DB for MongoDB. You can sign up for free and get a small hosted cluster for free. Navigate to Azure Cosmos DB and select "Create": 44 | 45 |  46 | 47 | The signup process will walk you through the process of creating your cluster and ensuring it's configured for you to access. It's important to select "Azure Cosmos DB for MongoDB" as the API: 48 | 49 |  50 | 51 | And because we want to do vector search, you'll need to select a vCore cluster: 52 | 53 |  54 | 55 | When configuring your cluster, make sure to select the Free tier, and also **record the username and password you use** since you'll be using them later to connect: 56 | 57 |  58 | 59 | When configuring networking, for the purposes of the demo select the "Add 0.0.0.0 - 255.255.255.255" link. This will open up your database to every IP in the world, which is not secure, but convenient for a demo. 60 | 61 |  62 | 63 | Once your cluster is provisioned you should select it and navigate to "connection strings" where you'll find the connection string we'll need; you'll need to combine it with the username and password you created and noted earlier. 64 | 65 |  66 | 67 | ### Set up environment variables 68 | 69 | Copy the connection string, add your username and password, and put it into a file called `.env` in the root of this repo. It should look something like this: 70 | 71 | ``` 72 | MONGODB_URI=mongodb+srv://xxxxxxxxx:yyyyyyyyyy@llamaindex-demo.mongocluster.cosmos.azure.com/?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000 73 | ``` 74 | 75 | You will also need to choose a name for your database, and the collection where we will store the tweets, and also include them in .env. They can be any string, but this is what we used: 76 | 77 | ``` 78 | MONGODB_DATABASE=tiny_tweets_db 79 | MONGODB_COLLECTION=tiny_tweets_collection 80 | ``` 81 | 82 | ### Set up a python virtual environment and install dependencies 83 | 84 | To avoid colliding with other Python dependencies, it's a good idea to create a python virtual environment to work in. There are lots of ways to do this, but the way we did it is to run this in the root of the repo: 85 | 86 | ```bash 87 | python3 -m venv .venv 88 | source .venv/bin/activate 89 | ``` 90 | 91 | Now we'll install all the dependencies we need in one go with pip: 92 | 93 | ```bash 94 | pip install -r requirements.txt 95 | ``` 96 | 97 | This installs the MongoDB drivers, LlamaIndex itself, and some utility libraries. 98 | 99 | ### Import tweets into MongoDB 100 | 101 | You are now ready to import our ready-made data set into Mongo. This is the file `tinytweets.json`, a selection of approximately 1000 tweets from @seldo on Twitter in mid-2019. With your environment set up you can do this by running 102 | 103 | ``` 104 | python 1_import.py 105 | ``` 106 | 107 | If you're curious, the code is below. If you don't want to use tweets, you can replace `json_file` with any other array of JSON objects, but you will need to modify some code later to make sure the correct field gets indexed. There is no LlamaIndex-specific code here; you can load your data into Mongo any way you want to. 108 | 109 | ```python 110 | json_file = 'tinytweets.json' 111 | 112 | # Load environment variables from local .env file 113 | from dotenv import load_dotenv 114 | load_dotenv() 115 | 116 | import os 117 | import json 118 | from pymongo.mongo_client import MongoClient 119 | 120 | # Load the tweets from a local file 121 | with open(json_file, 'r') as f: 122 | tweets = json.load(f) 123 | 124 | # Create a new client and connect to the server 125 | client = MongoClient(os.getenv('MONGODB_URI')) 126 | db = client[os.getenv("MONGODB_DATABASE")] 127 | collection = db[os.getenv("MONGODB_COLLECTION")] 128 | 129 | # Insert the tweets into mongo 130 | collection.insert_many(tweets) 131 | ``` 132 | 133 | ### Load and index your data 134 | 135 | Now we're ready to index our data. To do this, LlamaIndex will pull your text out of Mongo, split it into chunks, and then send those chunks to OpenAI to be turned into [vector embeddings](https://docs.llamaindex.ai/en/stable/understanding/indexing/indexing.html#what-is-an-embedding). The embeddings will then be stored in a new collection in Mongo. This will take a while depending how much text you have, but the good news is that once it's done you will be able to query quickly without needing to re-index. 136 | 137 | We'll be using OpenAI to do the embedding, so now is when you need to [generate an OpenAI API key](https://platform.openai.com/account/api-keys) if you haven't already and add it to your `.env` file like this: 138 | 139 | ``` 140 | OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 141 | ``` 142 | 143 | You'll also need to pick a name for the new collection where the embeddings will be stored, and add it to `.env`, along with the name of a vector search index (we'll be creating this in the next step, after you've indexed your data): 144 | 145 | ``` 146 | MONGODB_VECTORS=tiny_tweets_vectors 147 | MONGODB_VECTOR_INDEX=tiny_tweets_vector_index 148 | ``` 149 | 150 | If the data you're indexing is the tweets we gave you, you're ready to go: 151 | 152 | ```bash 153 | python 2_load_and_index.py 154 | ``` 155 | 156 | You can view the full source code of this script, but let's highlight a few important parts: 157 | 158 | ```python 159 | query_dict = {} 160 | reader = SimpleMongoReader(uri=os.getenv("MONGODB_URI")) 161 | documents = reader.load_data( 162 | os.getenv("MONGODB_DATABASE"), 163 | os.getenv("MONGODB_COLLECTION"), 164 | field_names=["full_text"], 165 | query_dict=query_dict 166 | ) 167 | ``` 168 | 169 | What you're doing here is creating a Reader which loads the data out of Mongo in the collection and database specified. It looks for text in a set of specific keys in each object. In this case we've given it just one key, "full_text". The final parameter is a mongo [query document](https://www.mongodb.com/docs/manual/tutorial/query-documents/), a JSON object you can use to filter your objects down to a subset. We're leaving it empty because we want all the tweets in the collection. 170 | 171 | ```python 172 | # Create a new client and connect to the server 173 | client = MongoClient(os.getenv("MONGODB_URI")) 174 | 175 | # create Azure Cosmos as a vector store 176 | store = AzureCosmosDBMongoDBVectorSearch( 177 | client, 178 | db_name=os.getenv('MONGODB_DATABASE'), 179 | collection_name=os.getenv('MONGODB_VECTORS'), 180 | index_name=os.getenv('MONGODB_VECTOR_INDEX') 181 | ) 182 | ``` 183 | 184 | Now you're creating a vector search client for Mongo. In addition to a MongoDB client object, you again tell it what database everything is in. This time you give it the name of the collection where you'll store the vector embeddings, and the name of the vector search index which will index the embeddings. 185 | 186 | This process can take a while, so when we kick it off we set the `show_progress` parameter to `True`, which prints a convenient little progress bar: 187 | 188 | ```python 189 | storage_context = StorageContext.from_defaults(vector_store=store) 190 | index = VectorStoreIndex.from_documents( 191 | documents, storage_context=storage_context, 192 | show_progress=True 193 | ) 194 | ``` 195 | 196 | Once that's complete, you're ready to query your data! 197 | 198 | ### Run a test query 199 | 200 | You can do this by running 201 | 202 | ```bash 203 | python 3_query.py 204 | ``` 205 | 206 | This sets up a connection to Azure Cosmos DB just like `2_load_and_index.py` did, then it creates a [query engine](https://docs.llamaindex.ai/en/stable/understanding/querying/querying.html#getting-started) and runs a query against it: 207 | 208 | ```python 209 | query_engine = index.as_query_engine(similarity_top_k=20) 210 | response = query_engine.query("What does the author think of web frameworks?") 211 | print(response) 212 | ``` 213 | 214 | If all is well, you should get a nuanced opinion about web frameworks. 215 | 216 | ### Set up a new repo 217 | 218 | Now we have a way to quickly query our data using an LLM. But we want an app! To do that, we're going to set up a Python Flash API as a backend and a JavaScript Next.js app as a front-end. We're going to deploy both of them to [Render](https://render.com), and to do that we need them to be in a GitHub repo. So let's do that: 219 | 220 | 1. Create a new public GitHub repository 221 | 2. Clone it to your local machine 222 | 3. Copy all the files from this repo to the new repo (make sure you don't include the `.git` folder) 223 | 4. Commit and push the files to GitHub 224 | 225 | For the rest of this tutorial we're going to assume you're working in the folder you just created, attached to a repo you control. 226 | 227 | ### Run the Flask API 228 | 229 | The details of creating a Flask app are out of scope for this tutorial, but you can find one already set up for you in `flask_app` in the repo. It sets up a Mongo client just like we did in `3_query.py`, and it has one real method, `process_form`, which accepts a `query` parameter: 230 | 231 | ```python 232 | @app.route('/process_form', methods=['POST']) 233 | @cross_origin() 234 | def process_form(): 235 | query = request.form.get('query') 236 | if query is not None: 237 | # here we have customized the number of documents returned per query to 20, because tweets are really short 238 | query_engine = index.as_query_engine(similarity_top_k=20) 239 | response = query_engine.query(query) 240 | return jsonify({"response": str(response)}) 241 | else: 242 | return jsonify({"error": "query field is missing"}), 400 243 | ``` 244 | 245 | (The `@cross_origin()` decorator is necessary to allow the front-end to make requests to the API.) 246 | 247 | You can run it locally by running 248 | 249 | ```bash 250 | flask run 251 | ``` 252 | 253 | And you can check it's running by going to [http://127.0.0.1:5000](http://127.0.0.1:5000) in your browser. You should get a "Hello, world!" response. 254 | 255 | ### Deploy the Flask API to Render 256 | 257 | Set up a Render account (we recommend logging in with your GitHub account, to simplify things) and create a new web service: 258 | 259 | 1. Select "build and deploy from a github repository" then select the repo you created above. 260 | 2. Give the service a unique name 261 | 3. Set the root directory to `flask_app` 262 | 4. Set the runtime to Python 3 263 | 5. Select the Free tier 264 | 6. Set `gunicorn app:app --timeout 120` as your start command (the responses can take a while to generate) 265 | 7. **Important: set `PYTHON_VERSION`**. Scroll down to "environment variables" and set the `key` to `PYTHON_VERSION` and the value to `3.11.6` (or whatever version you're using locally) 266 | 267 | ### Add your `.env` environment variables to Render 268 | 269 | In the same way that you set `PYTHON_VERSION` you should now set all the other environment variables from your `.env` file in your Render environment. Your code needs to know where to connect to Mongo, etc.. So it should eventually look like this: 270 | 271 |  272 | 273 | With all this done, your API should now be up and running and able to connect to MongoDB. Time to build a frontend! 274 | 275 | ### Run the Next.js app 276 | 277 | Just like the Flask app, we've already done the heavy lifting for you and you can find the app in `next_app`. To get it going locally, run these in the root of the Next app: 278 | 279 | ```bash 280 | npm install 281 | npm run dev 282 | ``` 283 | 284 | This will give you a local server on [http://127.0.0.1:3000](http://127.0.0.1:3000). If you're already running the Flask API, you should be able to run queries! 285 | 286 | ### Deploy the Next.js app to Render 287 | 288 | Just like the Flask app, we're going to deploy the Next.js app to Render on the free plan. The steps are very similar: 289 | 290 | 1. Select "build and deploy from a github repository" then select the repo you created above. 291 | 2. Give the service a unique name 292 | 3. Set the root directory to `next_app` 293 | 4. Set the runtime to Node.js 294 | 5. Select the Free tier 295 | 296 | ### Set environment variables for Next.js 297 | 298 | Just as with Python, you're going to need to set an environment variable called `NODE_VERSION` to `20` and rebuild your first deploy. 299 | 300 | You're also going to need to tell it where to find the Flask API. To do this, create an environment variable called `NEXT_PUBLIC_API_HOST` and set it to the hostname on Render of your Flask API (in our case, that was `https://mongodb-demo-zpxu.onrender.com/`). 301 | 302 | You don't need to set any of the other environment variables, only your Flask API needs to know how to connect to Mongo. 303 | 304 | Redeploy your Next.js application. 305 | 306 | ### Celebrate! 307 | 308 | If all is well, you should now have a demo app just like ours! You can begin customizing it to your use-case. 309 | -------------------------------------------------------------------------------- /docs/1_create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/1_create.png -------------------------------------------------------------------------------- /docs/2_mongo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/2_mongo.png -------------------------------------------------------------------------------- /docs/3_vcore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/3_vcore.png -------------------------------------------------------------------------------- /docs/4_configure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/4_configure.png -------------------------------------------------------------------------------- /docs/5_networking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/5_networking.png -------------------------------------------------------------------------------- /docs/6_confirm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/6_confirm.png -------------------------------------------------------------------------------- /docs/7_connection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/7_connection.png -------------------------------------------------------------------------------- /docs/8_env_vars.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/8_env_vars.png -------------------------------------------------------------------------------- /docs/9_ip_addresses.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/docs/9_ip_addresses.png -------------------------------------------------------------------------------- /flask_app/app.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from flask import Flask, request, jsonify 5 | from flask_cors import CORS, cross_origin 6 | import os 7 | from pymongo.mongo_client import MongoClient 8 | from llama_index.vector_stores.azurecosmosmongo import AzureCosmosDBMongoDBVectorSearch 9 | from llama_index.indices.vector_store.base import VectorStoreIndex 10 | 11 | import logging 12 | import sys 13 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 14 | logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) 15 | 16 | # Create a new client and connect to the server 17 | client = MongoClient(os.getenv("MONGODB_URI")) 18 | 19 | # connect to Atlas as a vector store 20 | store = AzureCosmosDBMongoDBVectorSearch( 21 | client, 22 | db_name=os.getenv('MONGODB_DATABASE'), # this is the database where you stored your embeddings 23 | collection_name=os.getenv('MONGODB_VECTORS'), # this is where your embeddings were stored in 2_load_and_index.py 24 | index_name=os.getenv('MONGODB_VECTOR_INDEX') # this is the name of the index you created after loading your data 25 | ) 26 | index = VectorStoreIndex.from_vector_store(store) 27 | 28 | app = Flask(__name__) 29 | cors = CORS(app) 30 | app.config['CORS_HEADERS'] = 'Content-Type' 31 | 32 | # This is just so you can easily tell the app is running 33 | @app.route('/') 34 | def hello_world(): 35 | return 'Hello, World!' 36 | 37 | @app.route('/process_form', methods=['POST']) 38 | @cross_origin() 39 | def process_form(): 40 | query = request.form.get('query') 41 | if query is not None: 42 | # query your data! 43 | # here we have customized the number of documents returned per query to 20, because tweets are really short 44 | query_engine = index.as_query_engine(similarity_top_k=20) 45 | response = query_engine.query(query) 46 | return jsonify({"response": str(response)}) 47 | else: 48 | return jsonify({"error": "query field is missing"}), 400 49 | -------------------------------------------------------------------------------- /flask_app/requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.6 2 | aiosignal==1.3.1 3 | aiostream==0.5.2 4 | annotated-types==0.6.0 5 | anyio==3.7.1 6 | async-timeout==4.0.3 7 | attrs==23.1.0 8 | certifi==2023.7.22 9 | charset-normalizer==3.3.1 10 | click==8.1.7 11 | dataclasses-json==0.5.14 12 | Deprecated==1.2.14 13 | dnspython==2.4.2 14 | Flask==2.2.5 15 | Flask-Cors==4.0.0 16 | frozenlist==1.4.0 17 | fsspec==2023.10.0 18 | greenlet==3.0.1 19 | gunicorn==21.2.0 20 | idna==3.4 21 | itsdangerous==2.1.2 22 | Jinja2==3.1.2 23 | joblib==1.3.2 24 | jsonpatch==1.33 25 | jsonpointer==2.4 26 | langchain==0.0.325 27 | langsmith==0.0.53 28 | llama-index==0.8.55 29 | MarkupSafe==2.1.3 30 | marshmallow==3.20.1 31 | multidict==6.0.4 32 | mypy-extensions==1.0.0 33 | nest-asyncio==1.5.8 34 | nltk==3.8.1 35 | numpy==1.26.1 36 | openai==0.28.1 37 | packaging==23.2 38 | pandas==2.1.2 39 | pydantic==2.4.2 40 | pydantic_core==2.10.1 41 | pymongo==4.5.0 42 | python-dateutil==2.8.2 43 | python-dotenv==1.0.0 44 | pytz==2023.3.post1 45 | PyYAML==6.0.1 46 | regex==2023.10.3 47 | requests==2.31.0 48 | six==1.16.0 49 | sniffio==1.3.0 50 | SQLAlchemy==2.0.22 51 | tenacity==8.2.3 52 | tiktoken==0.5.1 53 | tqdm==4.66.1 54 | typing-inspect==0.9.0 55 | typing_extensions==4.8.0 56 | tzdata==2023.3 57 | urllib3==1.26.18 58 | Werkzeug==2.2.3 59 | wrapt==1.15.0 60 | yarl==1.9.2 61 | -------------------------------------------------------------------------------- /next_app/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | 23 | # debug 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /next_app/README.md: -------------------------------------------------------------------------------- 1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). 2 | 3 | ## Getting Started 4 | 5 | First, run the development server: 6 | 7 | ```bash 8 | npm run dev 9 | # or 10 | yarn dev 11 | # or 12 | pnpm dev 13 | # or 14 | bun dev 15 | ``` 16 | 17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 18 | 19 | You can start editing the page by modifying `app/page.js`. The page auto-updates as you edit the file. 20 | 21 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. 22 | 23 | ## Learn More 24 | 25 | To learn more about Next.js, take a look at the following resources: 26 | 27 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 28 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 29 | 30 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! 31 | 32 | ## Deploy on Vercel 33 | 34 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 35 | 36 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. 37 | -------------------------------------------------------------------------------- /next_app/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/azure-cosmos-db-demo/6e6d35c5745ffd591b4117e84c092a26ab97bda7/next_app/app/favicon.ico -------------------------------------------------------------------------------- /next_app/app/globals.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --max-width: 1100px; 3 | --border-radius: 12px; 4 | --font-mono: ui-monospace, Menlo, Monaco, 'Cascadia Mono', 'Segoe UI Mono', 5 | 'Roboto Mono', 'Oxygen Mono', 'Ubuntu Monospace', 'Source Code Pro', 6 | 'Fira Mono', 'Droid Sans Mono', 'Courier New', monospace; 7 | 8 | --foreground-rgb: 0, 0, 0; 9 | --background-start-rgb: 214, 219, 220; 10 | --background-end-rgb: 255, 255, 255; 11 | 12 | --primary-glow: conic-gradient( 13 | from 180deg at 50% 50%, 14 | #16abff33 0deg, 15 | #0885ff33 55deg, 16 | #54d6ff33 120deg, 17 | #0071ff33 160deg, 18 | transparent 360deg 19 | ); 20 | --secondary-glow: radial-gradient( 21 | rgba(255, 255, 255, 1), 22 | rgba(255, 255, 255, 0) 23 | ); 24 | 25 | --tile-start-rgb: 239, 245, 249; 26 | --tile-end-rgb: 228, 232, 233; 27 | --tile-border: conic-gradient( 28 | #00000080, 29 | #00000040, 30 | #00000030, 31 | #00000020, 32 | #00000010, 33 | #00000010, 34 | #00000080 35 | ); 36 | 37 | --callout-rgb: 238, 240, 241; 38 | --callout-border-rgb: 172, 175, 176; 39 | --card-rgb: 180, 185, 188; 40 | --card-border-rgb: 131, 134, 135; 41 | } 42 | 43 | @media (prefers-color-scheme: dark) { 44 | :root { 45 | --foreground-rgb: 255, 255, 255; 46 | --background-start-rgb: 0, 0, 0; 47 | --background-end-rgb: 0, 0, 0; 48 | 49 | --primary-glow: radial-gradient(rgba(1, 65, 255, 0.4), rgba(1, 65, 255, 0)); 50 | --secondary-glow: linear-gradient( 51 | to bottom right, 52 | rgba(1, 65, 255, 0), 53 | rgba(1, 65, 255, 0), 54 | rgba(1, 65, 255, 0.3) 55 | ); 56 | 57 | --tile-start-rgb: 2, 13, 46; 58 | --tile-end-rgb: 2, 5, 19; 59 | --tile-border: conic-gradient( 60 | #ffffff80, 61 | #ffffff40, 62 | #ffffff30, 63 | #ffffff20, 64 | #ffffff10, 65 | #ffffff10, 66 | #ffffff80 67 | ); 68 | 69 | --callout-rgb: 20, 20, 20; 70 | --callout-border-rgb: 108, 108, 108; 71 | --card-rgb: 100, 100, 100; 72 | --card-border-rgb: 200, 200, 200; 73 | } 74 | } 75 | 76 | * { 77 | box-sizing: border-box; 78 | padding: 0; 79 | margin: 0; 80 | } 81 | 82 | html, 83 | body { 84 | max-width: 100vw; 85 | overflow-x: hidden; 86 | } 87 | 88 | body { 89 | color: rgb(var(--foreground-rgb)); 90 | background: linear-gradient( 91 | to bottom, 92 | transparent, 93 | rgb(var(--background-end-rgb)) 94 | ) 95 | rgb(var(--background-start-rgb)); 96 | } 97 | 98 | a { 99 | color: inherit; 100 | text-decoration: none; 101 | } 102 | 103 | @media (prefers-color-scheme: dark) { 104 | html { 105 | color-scheme: dark; 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /next_app/app/layout.js: -------------------------------------------------------------------------------- 1 | import { Inter } from 'next/font/google' 2 | import './globals.css' 3 | 4 | const inter = Inter({ subsets: ['latin'] }) 5 | 6 | export const metadata = { 7 | title: 'MongoDB LlamaIndex demo', 8 | description: '', 9 | } 10 | 11 | export default function RootLayout({ children }) { 12 | return ( 13 | 14 |
{children} 15 | 16 | ) 17 | } 18 | -------------------------------------------------------------------------------- /next_app/app/page.js: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import styles from './page.module.css' 4 | import React, { useState } from 'react'; 5 | 6 | export default function Home() { 7 | const [query, setQuery] = useState(''); 8 | const [responseText, setResponseText] = useState(''); 9 | 10 | const handleSubmit = async (event) => { 11 | event.preventDefault(); 12 | setResponseText('Thinking...') 13 | 14 | const formData = new FormData(); 15 | formData.append('query', query); 16 | 17 | const response = await fetch(process.env.NEXT_PUBLIC_API_HOST + '/process_form', { 18 | method: 'POST', 19 | body: formData, 20 | }); 21 | 22 | if (response.ok) { 23 | const responseData = await response.json(); 24 | console.log(responseData); 25 | setResponseText(responseData.response) 26 | } else { 27 | console.error('Failed to submit:', response.statusText); 28 | } 29 | }; 30 | 31 | return ( 32 |