├── .gitignore ├── LICENSE ├── README.md ├── app ├── .chainlit │ └── config.toml ├── app_chroma.py ├── app_chroma_openai.py ├── app_faiss.py ├── app_pinecone.py └── chainlit.md ├── data └── state_of_the_union.txt ├── example.env ├── ingest ├── ingest_chroma.py ├── ingest_chroma_openai.py ├── ingest_faiss.py └── ingest_pinecone.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Sudarshan Koirala 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Llama2-different-vectorstores 2 | Simple Chainlit app to have interaction with your documents using different vectorstores. 3 | 4 | ### Chat with your documents 🚀 5 | - [LLama2 from Huggingface Website](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q8_0.bin) as Large Language model 6 | - [LangChain](https://python.langchain.com/docs/get_started/introduction.html) as a Framework for LLM 7 | - [Chainlit](https://docs.chainlit.io/overview) for deploying. 8 | - [GGML](https://github.com/ggerganov/ggml) to run in commodity hardware (cpu) 9 | - [CTransformers](https://github.com/marella/ctransformers) to load the model. 10 | - [Embedding model from Huggingface Website](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) 11 | 12 | ## System Requirements 13 | 14 | You must have Python 3.11 or later installed. Earlier versions of python may not compile. 15 | 16 | --- 17 | 18 | ## Steps to Replicate 19 | 20 | 1. Fork this repository and create a codespace in GitHub as I showed you in the youtube video OR Clone it locally. 21 | ``` 22 | git clone https://github.com/sudarshan-koirala/llama2-different-vectorstores.git 23 | cd llama2-different-vectorstores 24 | ``` 25 | 26 | 2. Rename example.env to .env with `cp example.env .env`and input the HuggingfaceHub API token as follows. Get HuggingfaceHub API key from this [URL](https://huggingface.co/settings/tokens). You need to create an account in Huggingface webiste if you haven't already. 27 | ``` 28 | HUGGINGFACEHUB_API_TOKEN=your_huggingface_api_token 29 | ``` 30 | 31 | Get the Pinecone API key and Env variable name from this [URL](https://platform.openai.com/account/api-keys). You need to create an account if you haven't already. 32 | ``` 33 | PINECONE_ENV=***** 34 | PINECONE_API_KEY=***** 35 | ``` 36 | 37 | OPTIONAL: If you want to test with openai models. Get the OpenAI API key from this [URL](https://platform.openai.com/account/api-keys). You need to create an account if you haven't already. 38 | ``` 39 | OPENAI_API_KEY=your_openai_api_key 40 | ``` 41 | 42 | 3. Create a virtualenv and activate it 43 | ``` 44 | python3 -m venv .venv && source .venv/bin/activate 45 | ``` 46 | 47 | If you have python 3.11, then the above command is fine. But, if you have python version less than 3.11. Using conda is easier. First make sure that you have conda installed. Then run the following command. 48 | ``` 49 | conda create -n .venv python=3.11 -y && source activate .venv 50 | ``` 51 | 52 | 4. Run the following command in the terminal to install necessary python packages: 53 | ``` 54 | pip install -r requirements.txt 55 | ``` 56 | 57 | 5. Create a model folder in the root directory and download the model inside the folder. 58 | ``` 59 | mkdir model && cd model 60 | wget https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q8_0.bin 61 | ``` 62 | 63 | 64 | 5. Go to `ingest` folder and run the following command in your terminal to create the embeddings and store it locally: 65 | ``` 66 | python3 run ingest_chroma.py 67 | ``` 68 | 69 | 6. Go inside `app` folder and run the following command in your terminal to run the app UI: 70 | ``` 71 | chainlit run app_chroma.py --no-cache -w 72 | ``` 73 | 74 | **Repeat step 5 and 6 for different vectorstores.** Happy learning 😎 75 | 76 | --- 77 | ## Disclaimer 78 | This is test project and is presented in my youtube video to learn new stuffs using the openly available resources (models, libraries, framework,etc). It is not meant to be used in production as it's not production ready. You can modify the code and use for your usecases ✌️ 79 | -------------------------------------------------------------------------------- /app/.chainlit/config.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | # If true (default), the app will be available to anonymous users. 3 | # If false, users will need to authenticate and be part of the project to use the app. 4 | public = true 5 | 6 | # The project ID (found on https://cloud.chainlit.io). 7 | # The project ID is required when public is set to false or when using the cloud database. 8 | #id = "" 9 | 10 | # Uncomment if you want to persist the chats. 11 | # local will create a database in your .chainlit directory (requires node.js installed). 12 | # cloud will use the Chainlit cloud database. 13 | # custom will load use your custom client. 14 | # database = "local" 15 | 16 | # Whether to enable telemetry (default: true). No personal data is collected. 17 | enable_telemetry = true 18 | 19 | # List of environment variables to be provided by each user to use the app. 20 | user_env = [] 21 | 22 | # Duration (in seconds) during which the session is saved when the connection is lost 23 | session_timeout = 3600 24 | 25 | [UI] 26 | # Name of the app and chatbot. 27 | name = "Chatbot" 28 | 29 | # Description of the app and chatbot. This is used for HTML tags. 30 | # description = "" 31 | 32 | # The default value for the expand messages settings. 33 | default_expand_messages = false 34 | 35 | # Hide the chain of thought details from the user in the UI. 36 | hide_cot = false 37 | 38 | # Link to your github repo. This will add a github button in the UI's header. 39 | # github = "" 40 | 41 | # Override default MUI light theme. (Check theme.ts) 42 | [UI.theme.light] 43 | #background = "#FAFAFA" 44 | #paper = "#FFFFFF" 45 | 46 | [UI.theme.light.primary] 47 | #main = "#F80061" 48 | #dark = "#980039" 49 | #light = "#FFE7EB" 50 | 51 | # Override default MUI dark theme. (Check theme.ts) 52 | [UI.theme.dark] 53 | #background = "#FAFAFA" 54 | #paper = "#FFFFFF" 55 | 56 | [UI.theme.dark.primary] 57 | #main = "#F80061" 58 | #dark = "#980039" 59 | #light = "#FFE7EB" 60 | 61 | 62 | [meta] 63 | generated_by = "0.6.1" 64 | -------------------------------------------------------------------------------- /app/app_chroma.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import chainlit as cl 4 | from dotenv import load_dotenv 5 | from langchain.chains import RetrievalQA 6 | from langchain.embeddings import HuggingFaceEmbeddings 7 | from langchain.llms import CTransformers 8 | from langchain.prompts import PromptTemplate 9 | from langchain.vectorstores import Chroma 10 | 11 | # Load environment variables from .env file 12 | load_dotenv() 13 | 14 | HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") 15 | 16 | DB_CHROMA_PATH = "./../vectorstore/db_chroma" 17 | MODEL_PATH = "./../model/llama-2-7b-chat.ggmlv3.q8_0.bin" 18 | 19 | prompt_template = """Use the following pieces of context to answer the users question. 20 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 21 | ALWAYS return a "SOURCES" part in your answer. 22 | The "SOURCES" part should be a reference to the source of the document from which you got your answer. 23 | Example of your response should be as follows: 24 | 25 | Context: {context} 26 | Question: {question} 27 | 28 | Only return the helpful answer below and nothing else. 29 | Helpful answer: 30 | """ 31 | 32 | 33 | def set_custom_prompt(): 34 | """ 35 | Prompt template for QA retrieval for each vectorstore 36 | """ 37 | prompt = PromptTemplate( 38 | template=prompt_template, input_variables=["context", "question"] 39 | ) 40 | return prompt 41 | 42 | 43 | def create_retrieval_qa_chain(llm, prompt, db): 44 | """ 45 | Creates a Retrieval Question-Answering (QA) chain using a given language model, prompt, and database. 46 | 47 | This function initializes a RetrievalQA object with a specific chain type and configurations, 48 | and returns this QA chain. The retriever is set up to return the top 3 results (k=3). 49 | 50 | Args: 51 | llm (any): The language model to be used in the RetrievalQA. 52 | prompt (str): The prompt to be used in the chain type. 53 | db (any): The database to be used as the retriever. 54 | 55 | Returns: 56 | RetrievalQA: The initialized QA chain. 57 | """ 58 | qa_chain = RetrievalQA.from_chain_type( 59 | llm=llm, 60 | chain_type="stuff", 61 | retriever=db.as_retriever(search_kwargs={"k": 3}), 62 | return_source_documents=True, 63 | chain_type_kwargs={"prompt": prompt}, 64 | ) 65 | return qa_chain 66 | 67 | 68 | def load_model( 69 | model_path=MODEL_PATH, 70 | model_type="llama", 71 | max_new_tokens=512, 72 | temperature=0.7, 73 | ): 74 | """ 75 | Load a locally downloaded model. 76 | 77 | Parameters: 78 | model_path (str): The path to the model to be loaded. 79 | model_type (str): The type of the model. 80 | max_new_tokens (int): The maximum number of new tokens for the model. 81 | temperature (float): The temperature parameter for the model. 82 | 83 | Returns: 84 | CTransformers: The loaded model. 85 | 86 | Raises: 87 | FileNotFoundError: If the model file does not exist. 88 | SomeOtherException: If the model file is corrupt. 89 | """ 90 | if not os.path.exists(model_path): 91 | raise FileNotFoundError(f"No model file found at {model_path}") 92 | 93 | # Additional error handling could be added here for corrupt files, etc. 94 | 95 | llm = CTransformers( 96 | model=model_path, 97 | model_type=model_type, 98 | max_new_tokens=max_new_tokens, # type: ignore 99 | temperature=temperature, # type: ignore 100 | ) 101 | 102 | return llm 103 | 104 | 105 | def create_retrieval_qa_bot( 106 | model_name="sentence-transformers/all-MiniLM-L6-v2", 107 | persist_dir=DB_CHROMA_PATH, 108 | device="cpu", 109 | ): 110 | """ 111 | This function creates a retrieval-based question-answering bot. 112 | 113 | Parameters: 114 | model_name (str): The name of the model to be used for embeddings. 115 | persist_dir (str): The directory to persist the database. 116 | device (str): The device to run the model on (e.g., 'cpu', 'cuda'). 117 | 118 | Returns: 119 | RetrievalQA: The retrieval-based question-answering bot. 120 | 121 | Raises: 122 | FileNotFoundError: If the persist directory does not exist. 123 | SomeOtherException: If there is an issue with loading the embeddings or the model. 124 | """ 125 | 126 | if not os.path.exists(persist_dir): 127 | raise FileNotFoundError(f"No directory found at {persist_dir}") 128 | 129 | try: 130 | embeddings = HuggingFaceEmbeddings( 131 | model_name=model_name, 132 | model_kwargs={"device": device}, 133 | ) 134 | except Exception as e: 135 | raise Exception( 136 | f"Failed to load embeddings with model name {model_name}: {str(e)}" 137 | ) 138 | 139 | db = Chroma(persist_directory=persist_dir, embedding_function=embeddings) 140 | 141 | try: 142 | llm = load_model() # Assuming this function exists and works as expected 143 | except Exception as e: 144 | raise Exception(f"Failed to load model: {str(e)}") 145 | 146 | qa_prompt = ( 147 | set_custom_prompt() 148 | ) # Assuming this function exists and works as expected 149 | 150 | try: 151 | qa = create_retrieval_qa_chain( 152 | llm=llm, prompt=qa_prompt, db=db 153 | ) # Assuming this function exists and works as expected 154 | except Exception as e: 155 | raise Exception(f"Failed to create retrieval QA chain: {str(e)}") 156 | 157 | return qa 158 | 159 | 160 | def retrieve_bot_answer(query): 161 | """ 162 | Retrieves the answer to a given query using a QA bot. 163 | 164 | This function creates an instance of a QA bot, passes the query to it, 165 | and returns the bot's response. 166 | 167 | Args: 168 | query (str): The question to be answered by the QA bot. 169 | 170 | Returns: 171 | dict: The QA bot's response, typically a dictionary with response details. 172 | """ 173 | qa_bot_instance = create_retrieval_qa_bot() 174 | bot_response = qa_bot_instance({"query": query}) 175 | return bot_response 176 | 177 | 178 | @cl.on_chat_start 179 | async def initialize_bot(): 180 | """ 181 | Initializes the bot when a new chat starts. 182 | 183 | This asynchronous function creates a new instance of the retrieval QA bot, 184 | sends a welcome message, and stores the bot instance in the user's session. 185 | """ 186 | qa_chain = create_retrieval_qa_bot() 187 | welcome_message = cl.Message(content="Starting the bot...") 188 | await welcome_message.send() 189 | welcome_message.content = ( 190 | "Hi, Welcome to Chat With Documents using Llama2 and LangChain." 191 | ) 192 | await welcome_message.update() 193 | 194 | cl.user_session.set("chain", qa_chain) 195 | 196 | 197 | @cl.on_message 198 | async def process_chat_message(message): 199 | """ 200 | Processes incoming chat messages. 201 | 202 | This asynchronous function retrieves the QA bot instance from the user's session, 203 | sets up a callback handler for the bot's response, and executes the bot's 204 | call method with the given message and callback. The bot's answer and source 205 | documents are then extracted from the response. 206 | """ 207 | qa_chain = cl.user_session.get("chain") 208 | callback_handler = cl.AsyncLangchainCallbackHandler( 209 | stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"] 210 | ) 211 | callback_handler.answer_reached = True 212 | response = await qa_chain.acall(message, callbacks=[callback_handler]) 213 | bot_answer = response["result"] 214 | source_documents = response["source_documents"] 215 | 216 | if source_documents: 217 | bot_answer += f"\nSources:" + str(source_documents) 218 | else: 219 | bot_answer += "\nNo sources found" 220 | 221 | await cl.Message(content=bot_answer).send() 222 | -------------------------------------------------------------------------------- /app/app_chroma_openai.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import chainlit as cl 4 | from langchain.chains import RetrievalQA 5 | from langchain.chat_models import ChatOpenAI 6 | from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings 7 | from langchain.llms import CTransformers 8 | from langchain.prompts import PromptTemplate 9 | from langchain.vectorstores import Chroma 10 | 11 | DB_CHROMA_PATH = "./../vectorstore/db_chroma_openai" 12 | 13 | from dotenv import load_dotenv 14 | 15 | # Load environment variables from .env file 16 | load_dotenv() 17 | 18 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 19 | 20 | 21 | prompt_template = """Use the following pieces of context to answer the users question. 22 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 23 | ALWAYS return a "SOURCES" part in your answer. 24 | The "SOURCES" part should be a reference to the source of the document from which you got your answer. 25 | Example of your response should be as follows: 26 | 27 | Context: {context} 28 | Question: {question} 29 | 30 | Only return the helpful answer below and nothing else. 31 | Helpful answer: 32 | """ 33 | 34 | 35 | def set_custom_prompt(): 36 | """ 37 | Prompt template for QA retrieval for each vectorstore 38 | """ 39 | prompt = PromptTemplate( 40 | template=prompt_template, input_variables=["context", "question"] 41 | ) 42 | return prompt 43 | 44 | 45 | def create_retrieval_qa_chain(llm, prompt, db): 46 | """ 47 | Creates a Retrieval Question-Answering (QA) chain using a given language model, prompt, and database. 48 | 49 | This function initializes a RetrievalQA object with a specific chain type and configurations, 50 | and returns this QA chain. The retriever is set up to return the top 3 results (k=3). 51 | 52 | Args: 53 | llm (any): The language model to be used in the RetrievalQA. 54 | prompt (str): The prompt to be used in the chain type. 55 | db (any): The database to be used as the retriever. 56 | 57 | Returns: 58 | RetrievalQA: The initialized QA chain. 59 | """ 60 | qa_chain = RetrievalQA.from_chain_type( 61 | llm=llm, 62 | chain_type="stuff", 63 | retriever=db.as_retriever(search_kwargs={"k": 3}), 64 | return_source_documents=True, 65 | chain_type_kwargs={"prompt": prompt}, 66 | ) 67 | return qa_chain 68 | 69 | 70 | # initialize openai chat model 71 | llm = ChatOpenAI(model="gpt-3.5-turbo") 72 | 73 | 74 | def create_retrieval_qa_bot( 75 | model_name="text-embedding-ada-002", persist_dir=DB_CHROMA_PATH 76 | ): 77 | """ 78 | This function creates a retrieval-based question-answering bot. 79 | 80 | Parameters: 81 | model (str): The name of the model to be used for embeddings. 82 | persist_dir (str): The directory to persist the database. 83 | 84 | Returns: 85 | RetrievalQA: The retrieval-based question-answering bot. 86 | 87 | """ 88 | 89 | if not os.path.exists(persist_dir): 90 | raise FileNotFoundError(f"No directory found at {persist_dir}") 91 | 92 | try: 93 | embeddings = OpenAIEmbeddings(model=model_name) # type: ignore 94 | except Exception as e: 95 | raise Exception( 96 | f"Failed to load embeddings with model name {model_name}: {str(e)}" 97 | ) 98 | 99 | db = Chroma(persist_directory=persist_dir, embedding_function=embeddings) 100 | 101 | qa_prompt = ( 102 | set_custom_prompt() 103 | ) # Assuming this function exists and works as expected 104 | 105 | try: 106 | qa = create_retrieval_qa_chain( 107 | llm=llm, prompt=qa_prompt, db=db 108 | ) # Assuming this function exists and works as expected 109 | except Exception as e: 110 | raise Exception(f"Failed to create retrieval QA chain: {str(e)}") 111 | 112 | return qa 113 | 114 | 115 | def retrieve_bot_answer(query): 116 | """ 117 | Retrieves the answer to a given query using a QA bot. 118 | 119 | This function creates an instance of a QA bot, passes the query to it, 120 | and returns the bot's response. 121 | 122 | Args: 123 | query (str): The question to be answered by the QA bot. 124 | 125 | Returns: 126 | dict: The QA bot's response, typically a dictionary with response details. 127 | """ 128 | qa_bot_instance = create_retrieval_qa_bot() 129 | bot_response = qa_bot_instance({"query": query}) 130 | return bot_response 131 | 132 | 133 | @cl.on_chat_start 134 | async def initialize_bot(): 135 | """ 136 | Initializes the bot when a new chat starts. 137 | 138 | This asynchronous function creates a new instance of the retrieval QA bot, 139 | sends a welcome message, and stores the bot instance in the user's session. 140 | """ 141 | qa_chain = create_retrieval_qa_bot() 142 | welcome_message = cl.Message(content="Starting the bot...") 143 | await welcome_message.send() 144 | welcome_message.content = ( 145 | "Hi, Welcome to Chat With Documents using Llama2 and LangChain." 146 | ) 147 | await welcome_message.update() 148 | 149 | cl.user_session.set("chain", qa_chain) 150 | 151 | 152 | @cl.on_message 153 | async def process_chat_message(message): 154 | """ 155 | Processes incoming chat messages. 156 | 157 | This asynchronous function retrieves the QA bot instance from the user's session, 158 | sets up a callback handler for the bot's response, and executes the bot's 159 | call method with the given message and callback. The bot's answer and source 160 | documents are then extracted from the response. 161 | """ 162 | qa_chain = cl.user_session.get("chain") 163 | callback_handler = cl.AsyncLangchainCallbackHandler( 164 | stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"] 165 | ) 166 | callback_handler.answer_reached = True 167 | response = await qa_chain.acall(message, callbacks=[callback_handler]) 168 | bot_answer = response["result"] 169 | source_documents = response["source_documents"] 170 | 171 | if source_documents: 172 | bot_answer += f"\nSources:" + str(source_documents) 173 | else: 174 | bot_answer += "\nNo sources found" 175 | 176 | await cl.Message(content=bot_answer).send() 177 | -------------------------------------------------------------------------------- /app/app_faiss.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import chainlit as cl 4 | from dotenv import load_dotenv 5 | from langchain.chains import RetrievalQA 6 | from langchain.embeddings import HuggingFaceEmbeddings 7 | from langchain.llms import CTransformers 8 | from langchain.prompts import PromptTemplate 9 | from langchain.vectorstores import FAISS 10 | 11 | # Load environment variables from .env file 12 | load_dotenv() 13 | 14 | HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") 15 | 16 | DB_FAISS_PATH = "./../vectorstore/db_faiss" 17 | MODEL_PATH = "./../model/llama-2-7b-chat.ggmlv3.q8_0.bin" 18 | 19 | 20 | prompt_template = """Use the following pieces of context to answer the users question. 21 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 22 | ALWAYS return a "SOURCES" part in your answer. 23 | The "SOURCES" part should be a reference to the source of the document from which you got your answer. 24 | Example of your response should be as follows: 25 | 26 | Context: {context} 27 | Question: {question} 28 | 29 | Only return the helpful answer below and nothing else. 30 | Helpful answer: 31 | """ 32 | 33 | 34 | def set_custom_prompt(): 35 | """ 36 | Prompt template for QA retrieval for each vectorstore 37 | """ 38 | prompt = PromptTemplate( 39 | template=prompt_template, input_variables=["context", "question"] 40 | ) 41 | return prompt 42 | 43 | 44 | def create_retrieval_qa_chain(llm, prompt, db): 45 | """ 46 | Creates a Retrieval Question-Answering (QA) chain using a given language model, prompt, and database. 47 | 48 | This function initializes a RetrievalQA object with a specific chain type and configurations, 49 | and returns this QA chain. The retriever is set up to return the top 3 results (k=3). 50 | 51 | Args: 52 | llm (any): The language model to be used in the RetrievalQA. 53 | prompt (str): The prompt to be used in the chain type. 54 | db (any): The database to be used as the retriever. 55 | 56 | Returns: 57 | RetrievalQA: The initialized QA chain. 58 | """ 59 | qa_chain = RetrievalQA.from_chain_type( 60 | llm=llm, 61 | chain_type="stuff", 62 | retriever=db.as_retriever(search_kwargs={"k": 3}), 63 | return_source_documents=True, 64 | chain_type_kwargs={"prompt": prompt}, 65 | ) 66 | return qa_chain 67 | 68 | 69 | def load_model( 70 | model_path=MODEL_PATH, 71 | model_type="llama", 72 | max_new_tokens=512, 73 | temperature=0.7, 74 | ): 75 | """ 76 | Load a locally downloaded model. 77 | 78 | Parameters: 79 | model_path (str): The path to the model to be loaded. 80 | model_type (str): The type of the model. 81 | max_new_tokens (int): The maximum number of new tokens for the model. 82 | temperature (float): The temperature parameter for the model. 83 | 84 | Returns: 85 | CTransformers: The loaded model. 86 | 87 | Raises: 88 | FileNotFoundError: If the model file does not exist. 89 | SomeOtherException: If the model file is corrupt. 90 | """ 91 | if not os.path.exists(model_path): 92 | raise FileNotFoundError(f"No model file found at {model_path}") 93 | 94 | # Additional error handling could be added here for corrupt files, etc. 95 | 96 | llm = CTransformers( 97 | model=model_path, 98 | model_type=model_type, 99 | max_new_tokens=max_new_tokens, # type: ignore 100 | temperature=temperature, # type: ignore 101 | ) 102 | 103 | return llm 104 | 105 | 106 | def create_retrieval_qa_bot( 107 | model_name="sentence-transformers/all-MiniLM-L6-v2", 108 | persist_dir=DB_FAISS_PATH, 109 | device="cpu", 110 | ): 111 | """ 112 | This function creates a retrieval-based question-answering bot. 113 | 114 | Parameters: 115 | model_name (str): The name of the model to be used for embeddings. 116 | persist_dir (str): The directory to persist the database. 117 | device (str): The device to run the model on (e.g., 'cpu', 'cuda'). 118 | 119 | Returns: 120 | RetrievalQA: The retrieval-based question-answering bot. 121 | 122 | Raises: 123 | FileNotFoundError: If the persist directory does not exist. 124 | SomeOtherException: If there is an issue with loading the embeddings or the model. 125 | """ 126 | 127 | if not os.path.exists(persist_dir): 128 | raise FileNotFoundError(f"No directory found at {persist_dir}") 129 | 130 | try: 131 | embeddings = HuggingFaceEmbeddings( 132 | model_name=model_name, 133 | model_kwargs={"device": device}, 134 | ) 135 | except Exception as e: 136 | raise Exception( 137 | f"Failed to load embeddings with model name {model_name}: {str(e)}" 138 | ) 139 | 140 | db = FAISS.load_local(folder_path=DB_FAISS_PATH, embeddings=embeddings) 141 | 142 | try: 143 | llm = load_model() # Assuming this function exists and works as expected 144 | except Exception as e: 145 | raise Exception(f"Failed to load model: {str(e)}") 146 | 147 | qa_prompt = ( 148 | set_custom_prompt() 149 | ) # Assuming this function exists and works as expected 150 | 151 | try: 152 | qa = create_retrieval_qa_chain( 153 | llm=llm, prompt=qa_prompt, db=db 154 | ) # Assuming this function exists and works as expected 155 | except Exception as e: 156 | raise Exception(f"Failed to create retrieval QA chain: {str(e)}") 157 | 158 | return qa 159 | 160 | 161 | def retrieve_bot_answer(query): 162 | """ 163 | Retrieves the answer to a given query using a QA bot. 164 | 165 | This function creates an instance of a QA bot, passes the query to it, 166 | and returns the bot's response. 167 | 168 | Args: 169 | query (str): The question to be answered by the QA bot. 170 | 171 | Returns: 172 | dict: The QA bot's response, typically a dictionary with response details. 173 | """ 174 | qa_bot_instance = create_retrieval_qa_bot() 175 | bot_response = qa_bot_instance({"query": query}) 176 | return bot_response 177 | 178 | 179 | @cl.on_chat_start 180 | async def initialize_bot(): 181 | """ 182 | Initializes the bot when a new chat starts. 183 | 184 | This asynchronous function creates a new instance of the retrieval QA bot, 185 | sends a welcome message, and stores the bot instance in the user's session. 186 | """ 187 | qa_chain = create_retrieval_qa_bot() 188 | welcome_message = cl.Message(content="Starting the bot...") 189 | await welcome_message.send() 190 | welcome_message.content = ( 191 | "Hi, Welcome to Chat With Documents using Llama2 and LangChain." 192 | ) 193 | await welcome_message.update() 194 | 195 | cl.user_session.set("chain", qa_chain) 196 | 197 | 198 | @cl.on_message 199 | async def process_chat_message(message): 200 | """ 201 | Processes incoming chat messages. 202 | 203 | This asynchronous function retrieves the QA bot instance from the user's session, 204 | sets up a callback handler for the bot's response, and executes the bot's 205 | call method with the given message and callback. The bot's answer and source 206 | documents are then extracted from the response. 207 | """ 208 | qa_chain = cl.user_session.get("chain") 209 | callback_handler = cl.AsyncLangchainCallbackHandler( 210 | stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"] 211 | ) 212 | callback_handler.answer_reached = True 213 | response = await qa_chain.acall(message, callbacks=[callback_handler]) 214 | bot_answer = response["result"] 215 | source_documents = response["source_documents"] 216 | 217 | if source_documents: 218 | bot_answer += f"\nSources:" + str(source_documents) 219 | else: 220 | bot_answer += "\nNo sources found" 221 | 222 | await cl.Message(content=bot_answer).send() 223 | -------------------------------------------------------------------------------- /app/app_pinecone.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import chainlit as cl 4 | import pinecone 5 | from dotenv import load_dotenv 6 | from langchain.chains import RetrievalQA 7 | from langchain.embeddings import HuggingFaceEmbeddings 8 | from langchain.llms import CTransformers 9 | from langchain.prompts import PromptTemplate 10 | from langchain.vectorstores import Pinecone 11 | 12 | # Load environment variables from .env file 13 | load_dotenv() 14 | 15 | PINECONE_API_KEY = os.getenv("PINECONE_API_KEY") 16 | PINECONE_ENV = os.getenv("PINECONE_ENV") 17 | 18 | # initialize pinecone 19 | pinecone.init( 20 | api_key=PINECONE_API_KEY, # type: ignore 21 | environment=PINECONE_ENV, # type: ignore 22 | ) 23 | 24 | MODEL_PATH = "./../model/llama-2-7b-chat.ggmlv3.q8_0.bin" 25 | 26 | index = pinecone.Index("llama2-langchain") 27 | 28 | prompt_template = """Use the following pieces of context to answer the users question. 29 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 30 | ALWAYS return a "SOURCES" part in your answer. 31 | The "SOURCES" part should be a reference to the source of the document from which you got your answer. 32 | Example of your response should be as follows: 33 | 34 | Context: {context} 35 | Question: {question} 36 | 37 | Only return the helpful answer below and nothing else. 38 | Helpful answer: 39 | """ 40 | 41 | 42 | def set_custom_prompt(): 43 | """ 44 | Prompt template for QA retrieval for each vectorstore 45 | """ 46 | prompt = PromptTemplate( 47 | template=prompt_template, input_variables=["context", "question"] 48 | ) 49 | return prompt 50 | 51 | 52 | def create_retrieval_qa_chain(llm, prompt, db): 53 | """ 54 | Creates a Retrieval Question-Answering (QA) chain using a given language model, prompt, and database. 55 | 56 | This function initializes a RetrievalQA object with a specific chain type and configurations, 57 | and returns this QA chain. The retriever is set up to return the top 3 results (k=3). 58 | 59 | Args: 60 | llm (any): The language model to be used in the RetrievalQA. 61 | prompt (str): The prompt to be used in the chain type. 62 | db (any): The database to be used as the retriever. 63 | 64 | Returns: 65 | RetrievalQA: The initialized QA chain. 66 | """ 67 | qa_chain = RetrievalQA.from_chain_type( 68 | llm=llm, 69 | chain_type="stuff", 70 | retriever=db.as_retriever(search_kwargs={"k": 3}), 71 | return_source_documents=True, 72 | chain_type_kwargs={"prompt": prompt}, 73 | ) 74 | return qa_chain 75 | 76 | 77 | def load_model( 78 | model_path=MODEL_PATH, 79 | model_type="llama", 80 | max_new_tokens=512, 81 | temperature=0.7, 82 | ): 83 | """ 84 | Load a locally downloaded model. 85 | 86 | Parameters: 87 | model_path (str): The path to the model to be loaded. 88 | model_type (str): The type of the model. 89 | max_new_tokens (int): The maximum number of new tokens for the model. 90 | temperature (float): The temperature parameter for the model. 91 | 92 | Returns: 93 | CTransformers: The loaded model. 94 | 95 | Raises: 96 | FileNotFoundError: If the model file does not exist. 97 | SomeOtherException: If the model file is corrupt. 98 | """ 99 | if not os.path.exists(model_path): 100 | raise FileNotFoundError(f"No model file found at {model_path}") 101 | 102 | # Additional error handling could be added here for corrupt files, etc. 103 | 104 | llm = CTransformers( 105 | model=model_path, 106 | model_type=model_type, 107 | max_new_tokens=max_new_tokens, # type: ignore 108 | temperature=temperature, # type: ignore 109 | ) 110 | 111 | return llm 112 | 113 | 114 | def create_retrieval_qa_bot( 115 | model_name="sentence-transformers/all-MiniLM-L6-v2", 116 | index_name="llama2-langchain", 117 | device="cpu", 118 | ): 119 | """ 120 | This function creates a retrieval-based question-answering bot. 121 | 122 | Parameters: 123 | model_name (str): The name of the model to be used for embeddings. 124 | persist_dir (str): The directory to persist the database. 125 | device (str): The device to run the model on (e.g., 'cpu', 'cuda'). 126 | 127 | Returns: 128 | RetrievalQA: The retrieval-based question-answering bot. 129 | 130 | Raises: 131 | FileNotFoundError: If the persist directory does not exist. 132 | SomeOtherException: If there is an issue with loading the embeddings or the model. 133 | """ 134 | 135 | try: 136 | embeddings = HuggingFaceEmbeddings( 137 | model_name=model_name, 138 | model_kwargs={"device": device}, 139 | ) 140 | except Exception as e: 141 | raise Exception( 142 | f"Failed to load embeddings with model name {model_name}: {str(e)}" 143 | ) 144 | 145 | db = Pinecone.from_existing_index(index_name=index_name, embedding=embeddings) 146 | 147 | try: 148 | llm = load_model() # Assuming this function exists and works as expected 149 | except Exception as e: 150 | raise Exception(f"Failed to load model: {str(e)}") 151 | 152 | qa_prompt = ( 153 | set_custom_prompt() 154 | ) # Assuming this function exists and works as expected 155 | 156 | try: 157 | qa = create_retrieval_qa_chain( 158 | llm=llm, prompt=qa_prompt, db=db 159 | ) # Assuming this function exists and works as expected 160 | except Exception as e: 161 | raise Exception(f"Failed to create retrieval QA chain: {str(e)}") 162 | 163 | return qa 164 | 165 | 166 | def retrieve_bot_answer(query): 167 | """ 168 | Retrieves the answer to a given query using a QA bot. 169 | 170 | This function creates an instance of a QA bot, passes the query to it, 171 | and returns the bot's response. 172 | 173 | Args: 174 | query (str): The question to be answered by the QA bot. 175 | 176 | Returns: 177 | dict: The QA bot's response, typically a dictionary with response details. 178 | """ 179 | qa_bot_instance = create_retrieval_qa_bot() 180 | bot_response = qa_bot_instance({"query": query}) 181 | return bot_response 182 | 183 | 184 | @cl.on_chat_start 185 | async def initialize_bot(): 186 | """ 187 | Initializes the bot when a new chat starts. 188 | 189 | This asynchronous function creates a new instance of the retrieval QA bot, 190 | sends a welcome message, and stores the bot instance in the user's session. 191 | """ 192 | qa_chain = create_retrieval_qa_bot() 193 | welcome_message = cl.Message(content="Starting the bot...") 194 | await welcome_message.send() 195 | welcome_message.content = ( 196 | "Hi, Welcome to Chat With Documents using Llama2 and LangChain." 197 | ) 198 | await welcome_message.update() 199 | 200 | cl.user_session.set("chain", qa_chain) 201 | 202 | 203 | @cl.on_message 204 | async def process_chat_message(message): 205 | """ 206 | Processes incoming chat messages. 207 | 208 | This asynchronous function retrieves the QA bot instance from the user's session, 209 | sets up a callback handler for the bot's response, and executes the bot's 210 | call method with the given message and callback. The bot's answer and source 211 | documents are then extracted from the response. 212 | """ 213 | qa_chain = cl.user_session.get("chain") 214 | callback_handler = cl.AsyncLangchainCallbackHandler( 215 | stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"] 216 | ) 217 | callback_handler.answer_reached = True 218 | response = await qa_chain.acall(message, callbacks=[callback_handler]) 219 | bot_answer = response["result"] 220 | source_documents = response["source_documents"] 221 | 222 | if source_documents: 223 | bot_answer += f"\nSources:" + str(source_documents) 224 | else: 225 | bot_answer += "\nNo sources found" 226 | 227 | await cl.Message(content=bot_answer).send() 228 | -------------------------------------------------------------------------------- /app/chainlit.md: -------------------------------------------------------------------------------- 1 | # Welcome to Chainlit! 🚀🤖 2 | 3 | Hi, This is a simple Chainlit app to have chat with your documents. 4 | 5 | ## Useful Links 🔗 6 | 7 | **Youtube Playlist** 8 | Get started with [Chainlit Playlist](https://youtube.com/playlist?list=PLz-qytj7eIWWNnbCRxflmRbYI02jZeG0k) 🎥 9 | Get started with [LangChain Playlist](https://youtube.com/playlist?list=PLz-qytj7eIWVd1a5SsQ1dzOjVDHdgC1Ck) 🎥 10 | 11 | Happy coding! 💻😊 12 | 13 | ## If you want to support 14 | - [Buy me a coffee](https://ko-fi.com/datasciencebasics) && [Patreon](https://www.patreon.com/datasciencebasics) 15 | 16 | -------------------------------------------------------------------------------- /data/state_of_the_union.txt: -------------------------------------------------------------------------------- 1 | Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. 2 | 3 | Last year COVID-19 kept us apart. This year we are finally together again. 4 | 5 | Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. 6 | 7 | With a duty to one another to the American people to the Constitution. 8 | 9 | And with an unwavering resolve that freedom will always triumph over tyranny. 10 | 11 | Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. 12 | 13 | He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. 14 | 15 | He met the Ukrainian people. 16 | 17 | From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. 18 | 19 | Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. 20 | 21 | In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. 22 | 23 | Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. 24 | 25 | Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. 26 | 27 | Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. 28 | 29 | They keep moving. 30 | 31 | And the costs and the threats to America and the world keep rising. 32 | 33 | That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. 34 | 35 | The United States is a member along with 29 other nations. 36 | 37 | It matters. American diplomacy matters. American resolve matters. 38 | 39 | Putin’s latest attack on Ukraine was premeditated and unprovoked. 40 | 41 | He rejected repeated efforts at diplomacy. 42 | 43 | He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. 44 | 45 | We prepared extensively and carefully. 46 | 47 | We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. 48 | 49 | I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. 50 | 51 | We countered Russia’s lies with truth. 52 | 53 | And now that he has acted the free world is holding him accountable. 54 | 55 | Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. 56 | 57 | We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. 58 | 59 | Together with our allies –we are right now enforcing powerful economic sanctions. 60 | 61 | We are cutting off Russia’s largest banks from the international financial system. 62 | 63 | Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. 64 | 65 | We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. 66 | 67 | Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. 68 | 69 | The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. 70 | 71 | We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. 72 | 73 | And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. 74 | 75 | The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. 76 | 77 | Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. 78 | 79 | We are giving more than $1 Billion in direct assistance to Ukraine. 80 | 81 | And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. 82 | 83 | Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. 84 | 85 | Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. 86 | 87 | For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. 88 | 89 | As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. 90 | 91 | And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. 92 | 93 | Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. 94 | 95 | And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. 96 | 97 | To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. 98 | 99 | And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. 100 | 101 | Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. 102 | 103 | America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. 104 | 105 | These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. 106 | 107 | But I want you to know that we are going to be okay. 108 | 109 | When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. 110 | 111 | While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. 112 | 113 | We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. 114 | 115 | In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. 116 | 117 | This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. 118 | 119 | To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. 120 | 121 | Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. 122 | 123 | He will never extinguish their love of freedom. He will never weaken the resolve of the free world. 124 | 125 | We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. 126 | 127 | The pandemic has been punishing. 128 | 129 | And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. 130 | 131 | I understand. 132 | 133 | I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. 134 | 135 | That’s why one of the first things I did as President was fight to pass the American Rescue Plan. 136 | 137 | Because people were hurting. We needed to act, and we did. 138 | 139 | Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. 140 | 141 | It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. 142 | 143 | Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. 144 | 145 | And as my Dad used to say, it gave people a little breathing room. 146 | 147 | And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. 148 | 149 | And it worked. It created jobs. Lots of jobs. 150 | 151 | In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year 152 | than ever before in the history of America. 153 | 154 | Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. 155 | 156 | For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. 157 | 158 | But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. 159 | 160 | Vice President Harris and I ran for office with a new economic vision for America. 161 | 162 | Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up 163 | and the middle out, not from the top down. 164 | 165 | Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. 166 | 167 | America used to have the best roads, bridges, and airports on Earth. 168 | 169 | Now our infrastructure is ranked 13th in the world. 170 | 171 | We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. 172 | 173 | That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. 174 | 175 | This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. 176 | 177 | We’re done talking about infrastructure weeks. 178 | 179 | We’re going to have an infrastructure decade. 180 | 181 | It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. 182 | 183 | As I’ve told Xi Jinping, it is never a good bet to bet against the American people. 184 | 185 | We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. 186 | 187 | And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. 188 | 189 | We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. 190 | 191 | 4,000 projects have already been announced. 192 | 193 | And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. 194 | 195 | When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. 196 | 197 | The federal government spends about $600 Billion a year to keep the country safe and secure. 198 | 199 | There’s been a law on the books for almost a century 200 | to make sure taxpayers’ dollars support American jobs and businesses. 201 | 202 | Every Administration says they’ll do it, but we are actually doing it. 203 | 204 | We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. 205 | 206 | But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. 207 | 208 | That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. 209 | 210 | Let me give you one example of why it’s so important to pass it. 211 | 212 | If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. 213 | 214 | It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. 215 | 216 | This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. 217 | 218 | Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. 219 | 220 | Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. 221 | 222 | Smartphones. The Internet. Technology we have yet to invent. 223 | 224 | But that’s just the beginning. 225 | 226 | Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from 227 | $20 billion to $100 billion. 228 | 229 | That would be one of the biggest investments in manufacturing in American history. 230 | 231 | And all they’re waiting for is for you to pass this bill. 232 | 233 | So let’s not wait any longer. Send it to my desk. I’ll sign it. 234 | 235 | And we will really take off. 236 | 237 | And Intel is not alone. 238 | 239 | There’s something happening in America. 240 | 241 | Just look around and you’ll see an amazing story. 242 | 243 | The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing. 244 | 245 | Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas. 246 | 247 | That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. 248 | 249 | GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. 250 | 251 | All told, we created 369,000 new manufacturing jobs in America just last year. 252 | 253 | Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. 254 | 255 | As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” 256 | 257 | It’s time. 258 | 259 | But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. 260 | 261 | Inflation is robbing them of the gains they might otherwise feel. 262 | 263 | I get it. That’s why my top priority is getting prices under control. 264 | 265 | Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. 266 | 267 | The pandemic also disrupted global supply chains. 268 | 269 | When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. 270 | 271 | Look at cars. 272 | 273 | Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. 274 | 275 | And guess what, prices of automobiles went up. 276 | 277 | So—we have a choice. 278 | 279 | One way to fight inflation is to drive down wages and make Americans poorer. 280 | 281 | I have a better plan to fight inflation. 282 | 283 | Lower your costs, not your wages. 284 | 285 | Make more cars and semiconductors in America. 286 | 287 | More infrastructure and innovation in America. 288 | 289 | More goods moving faster and cheaper in America. 290 | 291 | More jobs where you can earn a good living in America. 292 | 293 | And instead of relying on foreign supply chains, let’s make it in America. 294 | 295 | Economists call it “increasing the productive capacity of our economy.” 296 | 297 | I call it building a better America. 298 | 299 | My plan to fight inflation will lower your costs and lower the deficit. 300 | 301 | 17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: 302 | 303 | First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis. 304 | 305 | He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make. 306 | 307 | But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. 308 | 309 | Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it. 310 | 311 | What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. 312 | 313 | Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy. 314 | 315 | For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it. 316 | 317 | Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. 318 | 319 | Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. 320 | 321 | Second – cut energy costs for families an average of $500 a year by combatting climate change. 322 | 323 | Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. 324 | 325 | Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child. 326 | 327 | Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children. 328 | 329 | My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. 330 | 331 | My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old. 332 | 333 | All of these will lower costs. 334 | 335 | And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody. 336 | 337 | The one thing all Americans agree on is that the tax system is not fair. We have to fix it. 338 | 339 | I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. 340 | 341 | Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax. 342 | 343 | That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. 344 | 345 | We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. 346 | 347 | That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter. 348 | 349 | So that’s my plan. It will grow the economy and lower costs for families. 350 | 351 | So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. 352 | 353 | My plan will not only lower costs to give families a fair shot, it will lower the deficit. 354 | 355 | The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. 356 | 357 | But in my administration, the watchdogs have been welcomed back. 358 | 359 | We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. 360 | 361 | And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. 362 | 363 | By the end of this year, the deficit will be down to less than half what it was before I took office. 364 | 365 | The only president ever to cut the deficit by more than one trillion dollars in a single year. 366 | 367 | Lowering your costs also means demanding more competition. 368 | 369 | I’m a capitalist, but capitalism without competition isn’t capitalism. 370 | 371 | It’s exploitation—and it drives up prices. 372 | 373 | When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. 374 | 375 | We see it happening with ocean carriers moving goods in and out of America. 376 | 377 | During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. 378 | 379 | Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. 380 | 381 | And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. 382 | 383 | That ends on my watch. 384 | 385 | Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. 386 | 387 | We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. 388 | 389 | Let’s pass the Paycheck Fairness Act and paid leave. 390 | 391 | Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. 392 | 393 | Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. 394 | 395 | And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. 396 | 397 | When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. 398 | 399 | For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. 400 | 401 | And I know you’re tired, frustrated, and exhausted. 402 | 403 | But I also know this. 404 | 405 | Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say 406 | we are moving forward safely, back to more normal routines. 407 | 408 | We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. 409 | 410 | Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. 411 | 412 | Under these new guidelines, most Americans in most of the country can now be mask free. 413 | 414 | And based on the projections, more of the country will reach that point across the next couple of weeks. 415 | 416 | Thanks to the progress we have made this past year, COVID-19 need no longer control our lives. 417 | 418 | I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. 419 | 420 | We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. 421 | 422 | Here are four common sense steps as we move forward safely. 423 | 424 | First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. 425 | 426 | We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. 427 | 428 | The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. 429 | 430 | We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%. 431 | 432 | We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month. 433 | 434 | And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost. 435 | 436 | If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. 437 | 438 | We’re leaving no one behind or ignoring anyone’s needs as we move forward. 439 | 440 | And on testing, we have made hundreds of millions of tests available for you to order for free. 441 | 442 | Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. 443 | 444 | Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. 445 | 446 | If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years. 447 | 448 | And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. 449 | 450 | I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. 451 | 452 | Third – we can end the shutdown of schools and businesses. We have the tools we need. 453 | 454 | It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. 455 | 456 | We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. 457 | 458 | Our schools are open. Let’s keep it that way. Our kids need to be in school. 459 | 460 | And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. 461 | 462 | We achieved this because we provided free vaccines, treatments, tests, and masks. 463 | 464 | Of course, continuing this costs money. 465 | 466 | I will soon send Congress a request. 467 | 468 | The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly. 469 | 470 | Fourth, we will continue vaccinating the world. 471 | 472 | We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. 473 | 474 | And we won’t stop. 475 | 476 | We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. 477 | 478 | Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. 479 | 480 | Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. 481 | 482 | We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. 483 | 484 | I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. 485 | 486 | They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. 487 | 488 | Officer Mora was 27 years old. 489 | 490 | Officer Rivera was 22. 491 | 492 | Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. 493 | 494 | I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. 495 | 496 | I’ve worked on these issues a long time. 497 | 498 | I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. 499 | 500 | So let’s not abandon our streets. Or choose between safety and equal justice. 501 | 502 | Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. 503 | 504 | That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. 505 | 506 | That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. 507 | 508 | We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. 509 | 510 | I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. 511 | 512 | And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. 513 | 514 | And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? 515 | 516 | Ban assault weapons and high-capacity magazines. 517 | 518 | Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. 519 | 520 | These laws don’t infringe on the Second Amendment. They save lives. 521 | 522 | The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. 523 | 524 | In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. 525 | 526 | We cannot let this happen. 527 | 528 | Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. 529 | 530 | Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. 531 | 532 | One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. 533 | 534 | And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. 535 | 536 | A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. 537 | 538 | And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. 539 | 540 | We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. 541 | 542 | We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. 543 | 544 | We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. 545 | 546 | We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. 547 | 548 | We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. 549 | 550 | Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. 551 | 552 | Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. 553 | 554 | It’s not only the right thing to do—it’s the economically smart thing to do. 555 | 556 | That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. 557 | 558 | Let’s get it done once and for all. 559 | 560 | Advancing liberty and justice also requires protecting the rights of women. 561 | 562 | The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. 563 | 564 | If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. 565 | 566 | And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. 567 | 568 | As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. 569 | 570 | While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. 571 | 572 | And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. 573 | 574 | So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. 575 | 576 | First, beat the opioid epidemic. 577 | 578 | There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. 579 | 580 | Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. 581 | 582 | If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. 583 | 584 | Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down. 585 | 586 | The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. 587 | 588 | I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. 589 | 590 | Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. 591 | 592 | As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. 593 | 594 | It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. 595 | 596 | And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. 597 | 598 | Third, support our veterans. 599 | 600 | Veterans are the best of us. 601 | 602 | I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. 603 | 604 | My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. 605 | 606 | Our troops in Iraq and Afghanistan faced many dangers. 607 | 608 | One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. 609 | 610 | When they came home, many of the world’s fittest and best trained warriors were never the same. 611 | 612 | Headaches. Numbness. Dizziness. 613 | 614 | A cancer that would put them in a flag-draped coffin. 615 | 616 | I know. 617 | 618 | One of those soldiers was my son Major Beau Biden. 619 | 620 | We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. 621 | 622 | But I’m committed to finding out everything we can. 623 | 624 | Committed to military families like Danielle Robinson from Ohio. 625 | 626 | The widow of Sergeant First Class Heath Robinson. 627 | 628 | He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. 629 | 630 | Stationed near Baghdad, just yards from burn pits the size of football fields. 631 | 632 | Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. 633 | 634 | But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. 635 | 636 | Danielle says Heath was a fighter to the very end. 637 | 638 | He didn’t know how to stop fighting, and neither did she. 639 | 640 | Through her pain she found purpose to demand we do better. 641 | 642 | Tonight, Danielle—we are. 643 | 644 | The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. 645 | 646 | And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. 647 | 648 | I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. 649 | 650 | And fourth, let’s end cancer as we know it. 651 | 652 | This is personal to me and Jill, to Kamala, and to so many of you. 653 | 654 | Cancer is the #2 cause of death in America–second only to heart disease. 655 | 656 | Last month, I announced our plan to supercharge 657 | the Cancer Moonshot that President Obama asked me to lead six years ago. 658 | 659 | Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. 660 | 661 | More support for patients and families. 662 | 663 | To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. 664 | 665 | It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. 666 | 667 | ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. 668 | 669 | A unity agenda for the nation. 670 | 671 | We can do this. 672 | 673 | My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. 674 | 675 | In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. 676 | 677 | We have fought for freedom, expanded liberty, defeated totalitarianism and terror. 678 | 679 | And built the strongest, freest, and most prosperous nation the world has ever known. 680 | 681 | Now is the hour. 682 | 683 | Our moment of responsibility. 684 | 685 | Our test of resolve and conscience, of history itself. 686 | 687 | It is in this moment that our character is formed. Our purpose is found. Our future is forged. 688 | 689 | Well I know this nation. 690 | 691 | We will meet the test. 692 | 693 | To protect freedom and liberty, to expand fairness and opportunity. 694 | 695 | We will save democracy. 696 | 697 | As hard as these times have been, I am more optimistic about America today than I have been my whole life. 698 | 699 | Because I see the future that is within our grasp. 700 | 701 | Because I know there is simply nothing beyond our capacity. 702 | 703 | We are the only nation on Earth that has always turned every crisis we have faced into an opportunity. 704 | 705 | The only nation that can be defined by a single word: possibilities. 706 | 707 | So on this night, in our 245th year as a nation, I have come to report on the State of the Union. 708 | 709 | And my report is this: the State of the Union is strong—because you, the American people, are strong. 710 | 711 | We are stronger today than we were a year ago. 712 | 713 | And we will be stronger a year from now than we are today. 714 | 715 | Now is our moment to meet and overcome the challenges of our time. 716 | 717 | And we will, as one people. 718 | 719 | One America. 720 | 721 | The United States of America. 722 | 723 | May God bless you all. May God protect our troops. -------------------------------------------------------------------------------- /example.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=sk-**** 2 | HUGGINGFACEHUB_API_TOKEN=hf_** 3 | PINECONE_ENV=***** 4 | PINECONE_API_KEY=***** -------------------------------------------------------------------------------- /ingest/ingest_chroma.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from chromadb.config import Settings 4 | from dotenv import load_dotenv 5 | from langchain.document_loaders import ( 6 | DirectoryLoader, 7 | PyPDFLoader, 8 | TextLoader, 9 | UnstructuredMarkdownLoader, 10 | ) 11 | from langchain.embeddings import HuggingFaceEmbeddings 12 | from langchain.text_splitter import RecursiveCharacterTextSplitter 13 | from langchain.vectorstores import Chroma 14 | 15 | # Load environment variables from .env file 16 | load_dotenv() 17 | 18 | HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") 19 | 20 | DB_CHROMA_PATH = "./../vectorstore/db_chroma" 21 | DATA_DIR = "./../data" 22 | 23 | 24 | # Create vector database 25 | def create_vector_database(): 26 | """ 27 | Creates a vector database using document loaders and embeddings. 28 | 29 | This function loads data from PDF, markdown and text files in the 'data/' directory, 30 | splits the loaded documents into chunks, transforms them into embeddings using HuggingFace, 31 | and finally persists the embeddings into a Chroma vector database. 32 | 33 | """ 34 | # Initialize loaders for different file types 35 | """ pdf_loader = DirectoryLoader(DATA_DIR, glob="**/*.pdf", loader_cls=PyPDFLoader) 36 | markdown_loader = DirectoryLoader( 37 | DATA_DIR, glob="**/*.md", loader_cls=UnstructuredMarkdownLoader 38 | ) 39 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 40 | 41 | all_loaders = [pdf_loader, markdown_loader, text_loader] 42 | 43 | # Load documents from all loaders 44 | loaded_documents = [] 45 | for loader in all_loaders: 46 | loaded_documents.extend(loader.load()) """ 47 | 48 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 49 | loaded_documents = text_loader.load() 50 | 51 | # len(loaded_documents) 52 | # loaded_documents[0] 53 | 54 | # Split loaded documents into chunks 55 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) 56 | chunked_documents = text_splitter.split_documents(loaded_documents) 57 | 58 | # len(chunked_documents) 59 | 60 | # Initialize HuggingFace embeddings 61 | huggingface_embeddings = HuggingFaceEmbeddings( 62 | model_name="sentence-transformers/all-MiniLM-L6-v2", 63 | model_kwargs={"device": "cpu"}, 64 | ) 65 | 66 | # Create and persist a Chroma vector database from the chunked documents 67 | vector_database = Chroma.from_documents( 68 | documents=chunked_documents, 69 | embedding=huggingface_embeddings, 70 | persist_directory=DB_CHROMA_PATH, 71 | ) 72 | 73 | vector_database.persist() 74 | 75 | 76 | if __name__ == "__main__": 77 | create_vector_database() 78 | -------------------------------------------------------------------------------- /ingest/ingest_chroma_openai.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain.document_loaders import ( 4 | DirectoryLoader, 5 | PyPDFLoader, 6 | TextLoader, 7 | UnstructuredMarkdownLoader, 8 | ) 9 | from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings 10 | from langchain.text_splitter import RecursiveCharacterTextSplitter 11 | from langchain.vectorstores import Chroma 12 | 13 | DB_CHROMA_PATH = "./../vectorstore/db_chroma_openai" 14 | DATA_DIR = "./../data" 15 | 16 | from dotenv import load_dotenv 17 | 18 | # Load environment variables from .env file 19 | load_dotenv() 20 | 21 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 22 | 23 | 24 | # Create vector database 25 | def create_vector_database(): 26 | """ 27 | Creates a vector database using document loaders and embeddings. 28 | 29 | This function loads data from PDF, markdown and text files in the 'data/' directory, 30 | splits the loaded documents into chunks, transforms them into embeddings using HuggingFace, 31 | and finally persists the embeddings into a Chroma vector database. 32 | 33 | """ 34 | # Initialize loaders for different file types 35 | """ pdf_loader = DirectoryLoader(DATA_DIR, glob="**/*.pdf", loader_cls=PyPDFLoader) 36 | markdown_loader = DirectoryLoader( 37 | DATA_DIR, glob="**/*.md", loader_cls=UnstructuredMarkdownLoader 38 | ) 39 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 40 | 41 | all_loaders = [pdf_loader, markdown_loader, text_loader] 42 | 43 | # Load documents from all loaders 44 | loaded_documents = [] 45 | for loader in all_loaders: 46 | loaded_documents.extend(loader.load()) """ 47 | 48 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 49 | loaded_documents = text_loader.load() 50 | 51 | # Split loaded documents into chunks 52 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) 53 | chunked_documents = text_splitter.split_documents(loaded_documents) 54 | 55 | # Initialize HuggingFace embeddings 56 | """ huggingface_embeddings = HuggingFaceEmbeddings( 57 | model_name="sentence-transformers/all-MiniLM-L6-v2", 58 | model_kwargs={"device": "cpu"}, 59 | ) """ 60 | 61 | # Initialize OpenAI embeddings 62 | openai_embeddings = OpenAIEmbeddings() 63 | 64 | # Create and persist a Chroma vector database from the chunked documents 65 | vector_database = Chroma.from_documents( 66 | documents=chunked_documents, 67 | embedding=openai_embeddings, 68 | persist_directory=DB_CHROMA_PATH, 69 | ) 70 | 71 | vector_database.persist() 72 | 73 | 74 | if __name__ == "__main__": 75 | create_vector_database() 76 | -------------------------------------------------------------------------------- /ingest/ingest_faiss.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain.document_loaders import ( 5 | DirectoryLoader, 6 | PyPDFLoader, 7 | TextLoader, 8 | UnstructuredMarkdownLoader, 9 | ) 10 | from langchain.embeddings import HuggingFaceEmbeddings 11 | from langchain.text_splitter import RecursiveCharacterTextSplitter 12 | from langchain.vectorstores import FAISS 13 | 14 | # Load environment variables from .env file 15 | load_dotenv() 16 | 17 | HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") 18 | 19 | DB_FAISS_PATH = "./../vectorstore/db_faiss" 20 | DATA_DIR = "./../data" 21 | 22 | 23 | # Create vector database 24 | def create_vector_database(): 25 | """ 26 | Creates a vector database using document loaders and embeddings. 27 | 28 | This function loads data from PDF, markdown and text files in the 'data/' directory, 29 | splits the loaded documents into chunks, transforms them into embeddings using HuggingFace, 30 | and finally persists the embeddings into a Chroma vector database. 31 | 32 | """ 33 | # Initialize loaders for different file types 34 | """ pdf_loader = DirectoryLoader(DATA_DIR, glob="**/*.pdf", loader_cls=PyPDFLoader) 35 | markdown_loader = DirectoryLoader( 36 | DATA_DIR, glob="**/*.md", loader_cls=UnstructuredMarkdownLoader 37 | ) 38 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 39 | 40 | all_loaders = [pdf_loader, markdown_loader, text_loader] 41 | 42 | # Load documents from all loaders 43 | loaded_documents = [] 44 | for loader in all_loaders: 45 | loaded_documents.extend(loader.load()) """ 46 | 47 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 48 | loaded_documents = text_loader.load() 49 | 50 | # Split loaded documents into chunks 51 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) 52 | chunked_documents = text_splitter.split_documents(loaded_documents) 53 | 54 | # Initialize HuggingFace embeddings 55 | huggingface_embeddings = HuggingFaceEmbeddings( 56 | model_name="sentence-transformers/all-MiniLM-L6-v2", 57 | model_kwargs={"device": "cpu"}, 58 | ) 59 | 60 | # Create and persist a Chroma vector database from the chunked documents 61 | vector_database = FAISS.from_documents( 62 | documents=chunked_documents, 63 | embedding=huggingface_embeddings, 64 | ) 65 | 66 | vector_database.save_local(DB_FAISS_PATH) 67 | 68 | 69 | if __name__ == "__main__": 70 | create_vector_database() 71 | -------------------------------------------------------------------------------- /ingest/ingest_pinecone.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pinecone 4 | from dotenv import load_dotenv 5 | from langchain.document_loaders import ( 6 | DirectoryLoader, 7 | PyPDFLoader, 8 | TextLoader, 9 | UnstructuredMarkdownLoader, 10 | ) 11 | from langchain.embeddings import HuggingFaceEmbeddings 12 | from langchain.text_splitter import RecursiveCharacterTextSplitter 13 | from langchain.vectorstores import Pinecone 14 | 15 | # Load environment variables from .env file 16 | load_dotenv() 17 | 18 | PINECONE_API_KEY = os.getenv("PINECONE_API_KEY") 19 | PINECONE_ENV = os.getenv("PINECONE_ENV") 20 | 21 | DATA_DIR = "./../data" 22 | 23 | 24 | # Create vector database 25 | def create_vector_database(): 26 | """ 27 | Creates a vector database using document loaders and embeddings. 28 | 29 | This function loads data from PDF, markdown and text files in the 'data/' directory, 30 | splits the loaded documents into chunks, transforms them into embeddings using HuggingFace, 31 | and finally persists the embeddings into a Chroma vector database. 32 | 33 | """ 34 | 35 | # Initialize loaders for different file types 36 | """ pdf_loader = DirectoryLoader(DATA_DIR, glob="**/*.pdf", loader_cls=PyPDFLoader) 37 | markdown_loader = DirectoryLoader( 38 | DATA_DIR, glob="**/*.md", loader_cls=UnstructuredMarkdownLoader 39 | ) 40 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 41 | 42 | all_loaders = [pdf_loader, markdown_loader, text_loader] 43 | 44 | # Load documents from all loaders 45 | loaded_documents = [] 46 | for loader in all_loaders: 47 | loaded_documents.extend(loader.load()) """ 48 | 49 | text_loader = DirectoryLoader(DATA_DIR, glob="**/*.txt", loader_cls=TextLoader) 50 | loaded_documents = text_loader.load() 51 | 52 | # Split loaded documents into chunks 53 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) 54 | chunked_documents = text_splitter.split_documents(loaded_documents) 55 | 56 | # Initialize HuggingFace embeddings 57 | huggingface_embeddings = HuggingFaceEmbeddings( 58 | model_name="sentence-transformers/all-MiniLM-L6-v2", 59 | model_kwargs={"device": "cpu"}, 60 | ) 61 | 62 | # initialize pinecone 63 | pinecone.init( 64 | api_key=PINECONE_API_KEY, # type: ignore 65 | environment=PINECONE_ENV, # type: ignore 66 | ) 67 | 68 | # giving index a name 69 | index_name = "llama2-langchain" 70 | 71 | # delete index if same name already exists 72 | if index_name in pinecone.list_indexes(): 73 | pinecone.delete_index(index_name) 74 | 75 | # create index 76 | pinecone.create_index(name=index_name, dimension=384, metric="cosine") 77 | 78 | # Create and store a pinecone vector database from the chunked documents 79 | vector_database = Pinecone.from_documents( 80 | documents=chunked_documents, 81 | embedding=huggingface_embeddings, 82 | index_name=index_name, 83 | ) 84 | 85 | 86 | if __name__ == "__main__": 87 | create_vector_database() 88 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | chainlit 3 | python-dotenv 4 | chromadb 5 | faiss-cpu 6 | torch 7 | transformers 8 | sentence_transformers 9 | unstructured 10 | pypdf 11 | ctransformers 12 | Markdown 13 | pinecone-client 14 | tiktoken 15 | openai --------------------------------------------------------------------------------