├── .gitignore
├── App_schematic.png
├── Semantic_Research_Engine.png
├── requirements.txt
├── chainlit.md
├── rag_test.py
├── index.html
├── .chainlit
├── config.toml
└── translations
│ ├── en-US.json
│ └── pt-BR.json
├── search_engine.py
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | /vision transformers
3 | .DS_Store
4 | __pycache__/
--------------------------------------------------------------------------------
/App_schematic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tahreemrasul/semantic_research_engine/HEAD/App_schematic.png
--------------------------------------------------------------------------------
/Semantic_Research_Engine.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tahreemrasul/semantic_research_engine/HEAD/Semantic_Research_Engine.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | sentence-transformers==2.2.2
2 | torch==2.1.1
3 | python-dotenv==1.0.0
4 | chainlit
5 | langchain
6 | langchain-openai
7 | langchain-community
8 | chromadb
9 | literalai
10 | nbconvert
11 | arxiv
12 | pymupdf
--------------------------------------------------------------------------------
/chainlit.md:
--------------------------------------------------------------------------------
1 | # Welcome to Chainlit! 🚀🤖
2 |
3 | Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4 |
5 | ## Useful Links 🔗
6 |
7 | - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
8 | - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
9 |
10 | We can't wait to see what you create with Chainlit! Happy coding! 💻😊
11 |
12 | ## Welcome screen
13 |
14 | To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
15 |
--------------------------------------------------------------------------------
/rag_test.py:
--------------------------------------------------------------------------------
1 | from langchain_community.document_loaders import ArxivLoader
2 | from langchain.text_splitter import RecursiveCharacterTextSplitter
3 | from langchain_community.vectorstores import Chroma
4 | from langchain_community.embeddings import HuggingFaceEmbeddings
5 | from langchain.chains import RetrievalQA
6 | from langchain_openai import ChatOpenAI
7 | from dotenv import load_dotenv
8 |
9 | load_dotenv()
10 |
11 |
12 | def rag(query, question):
13 | arxiv_docs = ArxivLoader(query=query, load_max_docs=1).load()
14 |
15 | print(arxiv_docs[0].metadata['Title'])
16 |
17 | pdf_data = []
18 | for doc in arxiv_docs:
19 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
20 | texts = text_splitter.create_documents([doc.page_content])
21 | pdf_data.append(texts)
22 |
23 | embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-l6-v2")
24 | db = Chroma.from_documents(pdf_data[0], embeddings)
25 |
26 | llm = ChatOpenAI(model='gpt-3.5-turbo',
27 | temperature=0)
28 |
29 | qa = RetrievalQA.from_chain_type(llm=llm,
30 | chain_type="stuff",
31 | retriever=db.as_retriever())
32 | result = qa({"query": question})
33 | return result
34 |
35 |
36 | query = "lightweight transformer for language tasks"
37 | question = "how many and which benchmark datasets and tasks were compared for light weight transformer?"
38 | output = rag(query, question)
39 | print(output)
40 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Semantic Search Engine
5 |
40 |
41 |
42 |
43 | Semantic Search Application with Copilot
44 |
55 |
56 |
57 |
58 |
86 |
87 |
--------------------------------------------------------------------------------
/.chainlit/config.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | # Whether to enable telemetry (default: true). No personal data is collected.
3 | enable_telemetry = true
4 |
5 |
6 | # List of environment variables to be provided by each user to use the app.
7 | user_env = []
8 |
9 | # Duration (in seconds) during which the session is saved when the connection is lost
10 | session_timeout = 3600
11 |
12 | # Enable third parties caching (e.g LangChain cache)
13 | cache = false
14 |
15 | # Authorized origins
16 | allow_origins = ["*"]
17 |
18 | # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19 | # follow_symlink = false
20 |
21 | [features]
22 | # Show the prompt playground
23 | prompt_playground = true
24 |
25 | # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
26 | unsafe_allow_html = false
27 |
28 | # Process and display mathematical expressions. This can clash with "$" characters in messages.
29 | latex = false
30 |
31 | # Automatically tag threads with the current chat profile (if a chat profile is used)
32 | auto_tag_thread = true
33 |
34 | # Authorize users to upload files with messages
35 | [features.multi_modal]
36 | enabled = true
37 | accept = ["*/*"]
38 | max_files = 20
39 | max_size_mb = 500
40 |
41 | # Allows user to use speech to text
42 | [features.speech_to_text]
43 | enabled = false
44 | # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
45 | # language = "en-US"
46 |
47 | [UI]
48 | # Name of the app and chatbot.
49 | name = "Chatbot"
50 |
51 | # Show the readme while the thread is empty.
52 | show_readme_as_default = false
53 |
54 | # Description of the app and chatbot. This is used for HTML tags.
55 | # description = ""
56 |
57 | # Large size content are by default collapsed for a cleaner ui
58 | default_collapse_content = true
59 |
60 | # The default value for the expand messages settings.
61 | default_expand_messages = false
62 |
63 | # Hide the chain of thought details from the user in the UI.
64 | hide_cot = false
65 |
66 | # Link to your github repo. This will add a github button in the UI's header.
67 | # github = ""
68 |
69 | # Specify a CSS file that can be used to customize the user interface.
70 | # The CSS file can be served from the public directory or via an external link.
71 | # custom_css = "/public/test.css"
72 |
73 | # Specify a Javascript file that can be used to customize the user interface.
74 | # The Javascript file can be served from the public directory.
75 | # custom_js = "/public/test.js"
76 |
77 | # Specify a custom font url.
78 | # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
79 |
80 | # Specify a custom build directory for the frontend.
81 | # This can be used to customize the frontend code.
82 | # Be careful: If this is a relative path, it should not start with a slash.
83 | # custom_build = "./public/build"
84 |
85 | # Override default MUI light theme. (Check theme.ts)
86 | [UI.theme]
87 | #font_family = "Inter, sans-serif"
88 | [UI.theme.light]
89 | #background = "#FAFAFA"
90 | #paper = "#FFFFFF"
91 |
92 | [UI.theme.light.primary]
93 | #main = "#F80061"
94 | #dark = "#980039"
95 | #light = "#FFE7EB"
96 |
97 | # Override default MUI dark theme. (Check theme.ts)
98 | [UI.theme.dark]
99 | #background = "#FAFAFA"
100 | #paper = "#FFFFFF"
101 |
102 | [UI.theme.dark.primary]
103 | #main = "#F80061"
104 | #dark = "#980039"
105 | #light = "#FFE7EB"
106 |
107 |
108 | [meta]
109 | generated_by = "1.0.506"
110 |
--------------------------------------------------------------------------------
/search_engine.py:
--------------------------------------------------------------------------------
1 | import chainlit as cl
2 | from langchain_community.document_loaders import ArxivLoader
3 | from langchain_community.vectorstores import Chroma
4 | from langchain_community.embeddings import HuggingFaceEmbeddings
5 | from langchain.text_splitter import RecursiveCharacterTextSplitter
6 | from langchain.chains import RetrievalQA
7 | from langchain_openai import ChatOpenAI
8 | from literalai import LiteralClient
9 | from dotenv import load_dotenv
10 |
11 | load_dotenv()
12 |
13 | client = LiteralClient()
14 |
15 | # This will fetch the champion version, you can also pass a specific version
16 | prompt = client.api.get_prompt(name="test_prompt")
17 | prompt = prompt.to_langchain_chat_prompt_template()
18 | prompt.input_variables = ["context", "question"]
19 |
20 |
21 | @cl.on_chat_start
22 | async def retrieve_docs():
23 | if cl.context.session.client_type == "copilot":
24 | llm = ChatOpenAI(model='gpt-3.5-turbo',
25 | temperature=0)
26 |
27 | # QUERY PORTION
28 | query = None
29 |
30 | # Wait for the user to ask an Arxiv question
31 | while query is None:
32 | query = await cl.AskUserMessage(
33 | content="Please enter a topic to begin!", timeout=15).send()
34 | arxiv_query = query['output']
35 |
36 | # ARXIV DOCS PORTION
37 | arxiv_docs = ArxivLoader(query=arxiv_query, load_max_docs=1).load()
38 | # Prepare arXiv results for display
39 | arxiv_papers = [
40 | f"Published: {doc.metadata['Published']} \n Title: {doc.metadata['Title']} \n Authors: {doc.metadata['Authors']} \n Summary: {doc.metadata['Summary'][:50]}... \n---\n"
41 | for doc in arxiv_docs]
42 |
43 | # Trigger popup for arXiv results
44 | fn_arxiv = cl.CopilotFunction(name="showArxivResults", args={"results": "\n".join(arxiv_papers)})
45 | await fn_arxiv.acall()
46 |
47 | await cl.Message(content=f"We found some useful results online for `{arxiv_query}` "
48 | f"Displaying them in a popup!").send()
49 |
50 | await cl.Message(content=f"Downloading and chunking articles for `{arxiv_query}` "
51 | f"This operation can take a while!").send()
52 |
53 | # DB PORTION
54 | pdf_data = []
55 | for doc in arxiv_docs:
56 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
57 | texts = text_splitter.create_documents([doc.page_content])
58 | pdf_data.append(texts)
59 |
60 | embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-l6-v2")
61 | db = Chroma.from_documents(pdf_data[0], embeddings)
62 |
63 | # CHAIN PORTION
64 | chain = RetrievalQA.from_chain_type(llm=llm,
65 | chain_type="stuff",
66 | retriever=db.as_retriever(),
67 | chain_type_kwargs={
68 | "verbose": True,
69 | "prompt": prompt
70 | }
71 | )
72 |
73 | # Let the user know that the system is ready
74 | await cl.Message(content=f"Database creation for `{arxiv_query}` complete. You can now ask questions!").send()
75 |
76 | cl.user_session.set("db", db)
77 | cl.user_session.set("chain", chain)
78 |
79 |
80 | @cl.on_message
81 | async def retrieve_docs(message: cl.Message):
82 | if cl.context.session.client_type == "copilot":
83 | question = message.content
84 | chain = cl.user_session.get("chain")
85 | db = cl.user_session.get("db")
86 | # Create a new instance of the callback handler for each invocation
87 | cb = client.langchain_callback()
88 | variables = {"context": db.as_retriever(search_kwargs={"k": 1}), "query": question}
89 | database_results = await chain.acall(variables,
90 | callbacks=[cb])
91 | results = [f"Question: {question} \n Answer: {database_results['result']}"]
92 | # Trigger popup for database results
93 | fn_db = cl.CopilotFunction(name="showDatabaseResults", args={"results": "\n".join(results)})
94 | await fn_db.acall()
95 | await cl.Message(content=f"We found some useful results from our database for your question: `{question}`"
96 | f"Displaying them in a popup!").send()
97 |
98 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Observable Semantic Research Paper Engine with Chainlit Copilot, Literal and LangChain
2 |
3 | This project demonstrates how to create an observable research paper engine using the arXiv API to retrieve the most similar papers to a user query. The retrieved papers are embedded into a Chroma vector database, based on Retrieval Augmented Generation (RAG). The user can then ask questions from the retrieved papers. The application embeds a Chainlit based Copilot inside the webpage, allowing for a more interactive and friendly user experience. To track performance and observe the application's behavior, the application is integrated with Literal AI, an observability framework.
4 |
5 | ## Copilot
6 | Software Copilot are a new kind of assistant embedded in your app/product. They are designed to help users get the most out of your app by providing contextual guidance and take actions on their behalf. Here is an overview of the application architecture:
7 | 
8 |
9 | ## Key Features
10 |
11 | - Retrieve relevant papers based on user query using the LangChain wrapper for `arXiv` API
12 | - Embed retrieved papers in a Chroma database to initiate a RAG pipeline
13 | - Create optimized prompts for the RAG pipeline using Literal
14 | - Develop a Chainlit application for the above
15 | - Create a simple web for the application
16 | - Embed the Chainlit Copilot inside the web app for a more interactive experience
17 | - Integrate observability features to track app performance and generations using Literal
18 |
19 | ## Tech Stack
20 |
21 | This project leverages the following technologies:
22 |
23 | - [Chainlit](https://github.com/Chainlit/chainlit): Used for deploying a frontend application for the chatbot, and embedding the copilot.
24 | - [Literal AI](https://docs.getliteral.ai/get-started/overview): For creating, optimizing and testing prompts for the RAG pipeline, and for integrating observability features in the app.
25 | - [LangChain](https://github.com/langchain-ai/langchain): For retrieving arXiv queries, and managing the app's language understanding and generation.
26 | - [OpenAI](https://openai.com/): Ensures high-speed computations utilizing the GPT-3.5 models.
27 | - [Chroma](https://github.com/chroma-core/chroma): For creating the vector store to be used in retrieval.
28 |
29 | 
30 |
31 | ## Prerequisites
32 |
33 | - Python 3.8 or later
34 | - An OpenAI API key
35 | - A Literal AI API Key
36 |
37 | ## Clone the Repository
38 | Clone this repo using the following commands:
39 | ```bash
40 | git clone git@github.com:tahreemrasul/semantic_research_engine.git
41 | cd ./semantic_research_engine
42 | ```
43 |
44 | ## Environment Setup
45 |
46 | ### Conda Environment
47 |
48 | To set up your development environment, you'll need to install Conda. Once Conda is installed, you can create and activate a new environment with the following commands:
49 |
50 | ```bash
51 | conda create --name semantic_research_engine python=3.10
52 | conda activate semantic_research_engine
53 | ```
54 | ## Dependencies Installation
55 | After activating the Conda environment, install the project dependencies by running:
56 |
57 | ```bash
58 | pip install -r requirements.txt
59 | ```
60 |
61 | ## Project Structure
62 |
63 | - `rag_test.py`: Test script to demonstrate building blocks of the pipeline used in the RAG portion of the application.
64 | - `search_engine.py`: Main script to run the semantic research paper engine with a Chainlit frontend application.
65 | - `index.html`: The primary HTML file serving as the user interface for the semantic research paper search engine, embedding the Copilot for an interactive experience.
66 |
67 | ## Usage
68 |
69 | ### `.env` File
70 | - Create a `.env` file in the root directory of the project.
71 | - Add your OpenAI & Literal AI API keys to the .env file:
72 | ```bash
73 | OPENAI_API_KEY='Your-OpenAI-API-Key-Here'
74 | LITERAL_API_KEY='Your-LiteralAI-API-Key-Here'
75 | ```
76 |
77 | ### Running the Chatbot with Chainlit Frontend
78 | The application can be run by first deploying the Chainlit web app. To do this, run:
79 | ```bash
80 | chainlit run search_engine.py -w
81 | ```
82 | This command will start a local web server at [https:/localhost:8000](https:/localhost:8000). It is important to do this first before hosting the web application.
83 |
84 | Once your Chainlit server is up and running, you can deploy the web app by in a separate terminal using:
85 | ```bash
86 | npx http-server
87 | ```
88 | Remember the HTML file has to be served by a server, opening it directly in your browser won’t work. The above command ensures this is done correctly using `npm` from Node.js. The web application should be live at [https:/localhost:8080](https:/localhost:8080).
89 |
90 | ## Contributing
91 |
92 | Contributions to the Semantic Research Engine App are welcome! Please feel free to submit pull requests or open issues to suggest improvements or add new features.
93 |
94 | ## License
95 |
96 | [MIT](https://www.mit.edu/~amini/LICENSE.md)
97 |
--------------------------------------------------------------------------------
/.chainlit/translations/en-US.json:
--------------------------------------------------------------------------------
1 | {
2 | "components": {
3 | "atoms": {
4 | "buttons": {
5 | "userButton": {
6 | "menu": {
7 | "settings": "Settings",
8 | "settingsKey": "S",
9 | "APIKeys": "API Keys",
10 | "logout": "Logout"
11 | }
12 | }
13 | }
14 | },
15 | "molecules": {
16 | "newChatButton": {
17 | "newChat": "New Chat"
18 | },
19 | "tasklist": {
20 | "TaskList": {
21 | "title": "\ud83d\uddd2\ufe0f Task List",
22 | "loading": "Loading...",
23 | "error": "An error occured"
24 | }
25 | },
26 | "attachments": {
27 | "cancelUpload": "Cancel upload",
28 | "removeAttachment": "Remove attachment"
29 | },
30 | "newChatDialog": {
31 | "createNewChat": "Create new chat?",
32 | "clearChat": "This will clear the current messages and start a new chat.",
33 | "cancel": "Cancel",
34 | "confirm": "Confirm"
35 | },
36 | "settingsModal": {
37 | "expandMessages": "Expand Messages",
38 | "hideChainOfThought": "Hide Chain of Thought",
39 | "darkMode": "Dark Mode"
40 | }
41 | },
42 | "organisms": {
43 | "chat": {
44 | "history": {
45 | "index": {
46 | "lastInputs": "Last Inputs",
47 | "noInputs": "Such empty...",
48 | "loading": "Loading..."
49 | }
50 | },
51 | "inputBox": {
52 | "input": {
53 | "placeholder": "Type your message here..."
54 | },
55 | "speechButton": {
56 | "start": "Start recording",
57 | "stop": "Stop recording"
58 | },
59 | "SubmitButton": {
60 | "sendMessage": "Send message",
61 | "stopTask": "Stop Task"
62 | },
63 | "UploadButton": {
64 | "attachFiles": "Attach files"
65 | },
66 | "waterMark": {
67 | "text": "Built with"
68 | }
69 | },
70 | "Messages": {
71 | "index": {
72 | "running": "Running",
73 | "executedSuccessfully": "executed successfully",
74 | "failed": "failed",
75 | "feedbackUpdated": "Feedback updated",
76 | "updating": "Updating"
77 | }
78 | },
79 | "dropScreen": {
80 | "dropYourFilesHere": "Drop your files here"
81 | },
82 | "index": {
83 | "failedToUpload": "Failed to upload",
84 | "cancelledUploadOf": "Cancelled upload of",
85 | "couldNotReachServer": "Could not reach the server",
86 | "continuingChat": "Continuing previous chat"
87 | },
88 | "settings": {
89 | "settingsPanel": "Settings panel",
90 | "reset": "Reset",
91 | "cancel": "Cancel",
92 | "confirm": "Confirm"
93 | }
94 | },
95 | "threadHistory": {
96 | "sidebar": {
97 | "filters": {
98 | "FeedbackSelect": {
99 | "feedbackAll": "Feedback: All",
100 | "feedbackPositive": "Feedback: Positive",
101 | "feedbackNegative": "Feedback: Negative"
102 | },
103 | "SearchBar": {
104 | "search": "Search"
105 | }
106 | },
107 | "DeleteThreadButton": {
108 | "confirmMessage": "This will delete the thread as well as it's messages and elements.",
109 | "cancel": "Cancel",
110 | "confirm": "Confirm",
111 | "deletingChat": "Deleting chat",
112 | "chatDeleted": "Chat deleted"
113 | },
114 | "index": {
115 | "pastChats": "Past Chats"
116 | },
117 | "ThreadList": {
118 | "empty": "Empty..."
119 | },
120 | "TriggerButton": {
121 | "closeSidebar": "Close sidebar",
122 | "openSidebar": "Open sidebar"
123 | }
124 | },
125 | "Thread": {
126 | "backToChat": "Go back to chat",
127 | "chatCreatedOn": "This chat was created on"
128 | }
129 | },
130 | "header": {
131 | "chat": "Chat",
132 | "readme": "Readme"
133 | }
134 | }
135 | },
136 | "hooks": {
137 | "useLLMProviders": {
138 | "failedToFetchProviders": "Failed to fetch providers:"
139 | }
140 | },
141 | "pages": {
142 | "Design": {},
143 | "Env": {
144 | "savedSuccessfully": "Saved successfully",
145 | "requiredApiKeys": "Required API Keys",
146 | "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
147 | },
148 | "Page": {
149 | "notPartOfProject": "You are not part of this project."
150 | },
151 | "ResumeButton": {
152 | "resumeChat": "Resume Chat"
153 | }
154 | }
155 | }
--------------------------------------------------------------------------------
/.chainlit/translations/pt-BR.json:
--------------------------------------------------------------------------------
1 | {
2 | "components": {
3 | "atoms": {
4 | "buttons": {
5 | "userButton": {
6 | "menu": {
7 | "settings": "Configura\u00e7\u00f5es",
8 | "settingsKey": "S",
9 | "APIKeys": "Chaves de API",
10 | "logout": "Sair"
11 | }
12 | }
13 | }
14 | },
15 | "molecules": {
16 | "newChatButton": {
17 | "newChat": "Nova Conversa"
18 | },
19 | "tasklist": {
20 | "TaskList": {
21 | "title": "\ud83d\uddd2\ufe0f Lista de Tarefas",
22 | "loading": "Carregando...",
23 | "error": "Ocorreu um erro"
24 | }
25 | },
26 | "attachments": {
27 | "cancelUpload": "Cancelar envio",
28 | "removeAttachment": "Remover anexo"
29 | },
30 | "newChatDialog": {
31 | "createNewChat": "Criar novo chat?",
32 | "clearChat": "Isso limpar\u00e1 as mensagens atuais e iniciar\u00e1 uma nova conversa.",
33 | "cancel": "Cancelar",
34 | "confirm": "Confirmar"
35 | },
36 | "settingsModal": {
37 | "expandMessages": "Expandir Mensagens",
38 | "hideChainOfThought": "Esconder Sequ\u00eancia de Pensamento",
39 | "darkMode": "Modo Escuro"
40 | }
41 | },
42 | "organisms": {
43 | "chat": {
44 | "history": {
45 | "index": {
46 | "lastInputs": "\u00daltimas Entradas",
47 | "noInputs": "Vazio...",
48 | "loading": "Carregando..."
49 | }
50 | },
51 | "inputBox": {
52 | "input": {
53 | "placeholder": "Digite sua mensagem aqui..."
54 | },
55 | "speechButton": {
56 | "start": "Iniciar grava\u00e7\u00e3o",
57 | "stop": "Parar grava\u00e7\u00e3o"
58 | },
59 | "SubmitButton": {
60 | "sendMessage": "Enviar mensagem",
61 | "stopTask": "Parar Tarefa"
62 | },
63 | "UploadButton": {
64 | "attachFiles": "Anexar arquivos"
65 | },
66 | "waterMark": {
67 | "text": "Constru\u00eddo com"
68 | }
69 | },
70 | "Messages": {
71 | "index": {
72 | "running": "Executando",
73 | "executedSuccessfully": "executado com sucesso",
74 | "failed": "falhou",
75 | "feedbackUpdated": "Feedback atualizado",
76 | "updating": "Atualizando"
77 | }
78 | },
79 | "dropScreen": {
80 | "dropYourFilesHere": "Solte seus arquivos aqui"
81 | },
82 | "index": {
83 | "failedToUpload": "Falha ao enviar",
84 | "cancelledUploadOf": "Envio cancelado de",
85 | "couldNotReachServer": "N\u00e3o foi poss\u00edvel conectar ao servidor",
86 | "continuingChat": "Continuando o chat anterior"
87 | },
88 | "settings": {
89 | "settingsPanel": "Painel de Configura\u00e7\u00f5es",
90 | "reset": "Redefinir",
91 | "cancel": "Cancelar",
92 | "confirm": "Confirmar"
93 | }
94 | },
95 | "threadHistory": {
96 | "sidebar": {
97 | "filters": {
98 | "FeedbackSelect": {
99 | "feedbackAll": "Feedback: Todos",
100 | "feedbackPositive": "Feedback: Positivo",
101 | "feedbackNegative": "Feedback: Negativo"
102 | },
103 | "SearchBar": {
104 | "search": "Buscar"
105 | }
106 | },
107 | "DeleteThreadButton": {
108 | "confirmMessage": "Isso deletar\u00e1 a conversa, assim como suas mensagens e elementos.",
109 | "cancel": "Cancelar",
110 | "confirm": "Confirmar",
111 | "deletingChat": "Deletando conversa",
112 | "chatDeleted": "Conversa deletada"
113 | },
114 | "index": {
115 | "pastChats": "Conversas Anteriores"
116 | },
117 | "ThreadList": {
118 | "empty": "Vazio..."
119 | },
120 | "TriggerButton": {
121 | "closeSidebar": "Fechar barra lateral",
122 | "openSidebar": "Abrir barra lateral"
123 | }
124 | },
125 | "Thread": {
126 | "backToChat": "Voltar para a conversa",
127 | "chatCreatedOn": "Esta conversa foi criada em"
128 | }
129 | },
130 | "header": {
131 | "chat": "Conversa",
132 | "readme": "Leia-me"
133 | }
134 | },
135 | "hooks": {
136 | "useLLMProviders": {
137 | "failedToFetchProviders": "Falha ao buscar provedores:"
138 | }
139 | },
140 | "pages": {
141 | "Design": {},
142 | "Env": {
143 | "savedSuccessfully": "Salvo com sucesso",
144 | "requiredApiKeys": "Chaves de API necess\u00e1rias",
145 | "requiredApiKeysInfo": "Para usar este aplicativo, as seguintes chaves de API s\u00e3o necess\u00e1rias. As chaves s\u00e3o armazenadas localmente em seu dispositivo."
146 | },
147 | "Page": {
148 | "notPartOfProject": "Voc\u00ea n\u00e3o faz parte deste projeto."
149 | },
150 | "ResumeButton": {
151 | "resumeChat": "Continuar Conversa"
152 | }
153 | }
154 | }
155 | }
--------------------------------------------------------------------------------