├── requirements.txt ├── README.md └── app.py /requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | google-generativeai 3 | python-dotenv 4 | langchain 5 | PyPDF2 6 | chromadb 7 | faiss-cpu 8 | langchain_google_genai -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GoogleGemini-PDF 2 | chat with multiple pdf docs using Google's Gemini generative AI 3 | 4 | In this tutorial, we will explore how to chat with multiple PDF files using Gemini-Pro, a powerful tool that can extract and analyze data from any document. Gemini-Pro is a free software that allows you to interact with your PDF files using natural language queries. You can ask questions, filter results, compare data, and more. Gemini-Pro is easy to use and can handle complex and large PDF files with speed and accuracy. 5 | 6 | ## Required packages: 7 | https://github.com/Nagh-DE/GoogleGemini-PDF/blob/main/requirements.txt 8 | 9 | ## Main python file 10 | (that is used to create front-end, embed the files, and retrieve the responses): 11 | https://github.com/Nagh-DE/GoogleGemini-PDF/blob/main/app.py 12 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from PyPDF2 import PdfReader #library to read pdf files 3 | from langchain.text_splitter import RecursiveCharacterTextSplitter#library to split pdf files 4 | import os 5 | 6 | from langchain_google_genai import GoogleGenerativeAIEmbeddings #to embed the text 7 | import google.generativeai as genai 8 | 9 | from langchain.vectorstores import FAISS #for vector embeddings 10 | from langchain_google_genai import ChatGoogleGenerativeAI # 11 | from langchain.chains.question_answering import load_qa_chain #to chain the prompts 12 | from langchain.prompts import PromptTemplate #to create prompt templates 13 | from dotenv import load_dotenv 14 | 15 | load_dotenv() 16 | 17 | genai.configure(api_key = os.getenv("GOOGLE_API_KEY")) 18 | 19 | def get_pdf_text(pdf_docs): 20 | text = "" 21 | # iterate over all pdf files uploaded 22 | for pdf in pdf_docs: 23 | pdf_reader = PdfReader(pdf) 24 | # iterate over all pages in a pdf 25 | for page in pdf_reader.pages: 26 | text += page.extract_text() 27 | return text 28 | 29 | def get_text_chunks(text): 30 | # create an object of RecursiveCharacterTextSplitter with specific chunk size and overlap size 31 | text_splitter = RecursiveCharacterTextSplitter(chunk_size = 10000, chunk_overlap = 1000) 32 | # now split the text we have using object created 33 | chunks = text_splitter.split_text(text) 34 | 35 | return chunks 36 | 37 | def get_vector_store(text_chunks): 38 | embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001") # google embeddings 39 | vector_store = FAISS.from_texts(text_chunks,embeddings) # use the embedding object on the splitted text of pdf docs 40 | vector_store.save_local("faiss_index") # save the embeddings in local 41 | 42 | def get_conversation_chain(): 43 | 44 | # define the prompt 45 | prompt_template = """ 46 | Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in 47 | provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n 48 | Context:\n {context}?\n 49 | Question: \n{question}\n 50 | 51 | Answer: 52 | """ 53 | 54 | model = ChatGoogleGenerativeAI(model = "gemini-pro", temperatue = 0.3) # create object of gemini-pro 55 | 56 | prompt = PromptTemplate(template = prompt_template, input_variables= ["context","question"]) 57 | 58 | chain = load_qa_chain(model,chain_type="stuff",prompt = prompt) 59 | 60 | return chain 61 | 62 | def user_input(user_question): 63 | # user_question is the input question 64 | embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001") 65 | # load the local faiss db 66 | new_db = FAISS.load_local("faiss_index", embeddings) 67 | 68 | # using similarity search, get the answer based on the input 69 | docs = new_db.similarity_search(user_question) 70 | 71 | chain = get_conversation_chain() 72 | 73 | 74 | response = chain( 75 | {"input_documents":docs, "question": user_question} 76 | , return_only_outputs=True) 77 | 78 | print(response) 79 | st.write("Reply: ", response["output_text"]) 80 | 81 | def main(): 82 | st.set_page_config("Chat PDF") 83 | st.header("Chat with PDF using Gemini") 84 | 85 | user_question = st.text_input("Ask a Question:") 86 | 87 | if user_question: 88 | user_input(user_question) 89 | 90 | with st.sidebar: 91 | st.title("Menu:") 92 | pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True) 93 | if st.button("Submit & Process"): 94 | with st.spinner("Processing..."): 95 | raw_text = get_pdf_text(pdf_docs) 96 | text_chunks = get_text_chunks(raw_text) 97 | get_vector_store(text_chunks) 98 | st.success("Done") 99 | 100 | 101 | if __name__ == "__main__": 102 | main() 103 | --------------------------------------------------------------------------------