├── .gitignore ├── README.md ├── assets └── img │ └── demo-cover.png ├── backend ├── .gitignore ├── README.md ├── app │ ├── __init__.py │ ├── api │ │ ├── __init__.py │ │ └── routers │ │ │ ├── __init__.py │ │ │ ├── chat.py │ │ │ └── index.py │ ├── conn_llm.py │ ├── context.py │ ├── engine │ │ ├── __init__.py │ │ ├── agents.py │ │ ├── constants.py │ │ ├── generate.py │ │ ├── index.py │ │ ├── loader.py │ │ ├── query_tools.py │ │ └── utils.py │ ├── public │ │ ├── 404.html │ │ ├── favicon.ico │ │ ├── index.html │ │ ├── index.txt │ │ └── llama.png │ ├── static │ │ └── _next │ │ │ └── static │ │ │ ├── a0F5iAmesR9XNB9QY5ZMN │ │ │ ├── _buildManifest.js │ │ │ └── _ssgManifest.js │ │ │ ├── chunks │ │ │ ├── 69-2dd335a42df48c81.js │ │ │ ├── 77-6cbd386b7cb57e62.js │ │ │ ├── app │ │ │ │ ├── _not-found-aeb74ca2b03c8470.js │ │ │ │ ├── layout-386c8c158172a3ec.js │ │ │ │ └── page-887ea91a8c950aff.js │ │ │ ├── fd9d1056-16b896b82b5ac953.js │ │ │ ├── framework-b370f160bb96059c.js │ │ │ ├── main-app-1444d65fd19930be.js │ │ │ ├── main-f1b04c206ba955e4.js │ │ │ ├── pages │ │ │ │ ├── _app-d21e88acd55d90f1.js │ │ │ │ └── _error-d6107f1aac0c574c.js │ │ │ ├── polyfills-c67a75d1b6f99dc8.js │ │ │ └── webpack-d074f59e6fb5743e.js │ │ │ ├── css │ │ │ └── 3ffa7081b01d7ba0.css │ │ │ └── media │ │ │ ├── 05a31a2ca4975f99-s.woff2 │ │ │ ├── 513657b02c5c193f-s.woff2 │ │ │ ├── 51ed15f9841b9f9d-s.woff2 │ │ │ ├── c9a5bc6a7c948fb0-s.p.woff2 │ │ │ ├── d6b16ce4a6175f26-s.woff2 │ │ │ ├── ec159349637c90ad-s.woff2 │ │ │ └── fd4db3eb5472fc27-s.woff2 │ └── storage.py ├── config.json ├── main.py ├── refresh_index.py └── tests │ └── __init__.py ├── dist ├── app-0.1.0-py3-none-any.whl ├── app-0.1.0.tar.gz ├── docker-entrypoint.sh └── requirements.txt ├── dockerfile ├── dockerfile-amd64 ├── frontend ├── .gitignore ├── README.md ├── app │ ├── components │ │ ├── chat-section.tsx │ │ ├── header.tsx │ │ ├── transform.ts │ │ └── ui │ │ │ ├── README.md │ │ │ ├── button.tsx │ │ │ ├── chat │ │ │ ├── chat-actions.tsx │ │ │ ├── chat-avatar.tsx │ │ │ ├── chat-input.tsx │ │ │ ├── chat-message.tsx │ │ │ ├── chat-messages.tsx │ │ │ ├── chat.interface.ts │ │ │ ├── codeblock.tsx │ │ │ ├── index.ts │ │ │ ├── markdown.tsx │ │ │ └── use-copy-to-clipboard.tsx │ │ │ ├── file-uploader.tsx │ │ │ ├── input.tsx │ │ │ ├── lib │ │ │ └── utils.ts │ │ │ └── upload-image-preview.tsx │ ├── favicon.ico │ ├── globals.css │ ├── layout.tsx │ └── page.tsx ├── next.config.js ├── package-lock.json ├── package.json ├── postcss.config.js ├── public │ └── llama.png ├── tailwind.config.ts └── tsconfig.json ├── poetry.lock └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | storage 3 | .env 4 | chromadb 5 | cache_folder 6 | .venv 7 | .vscode 8 | 9 | 10 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 11 | 12 | # dependencies 13 | /node_modules 14 | /.pnp 15 | .pnp.js 16 | 17 | # testing 18 | /coverage 19 | 20 | # next.js 21 | /.next/ 22 | /out/ 23 | 24 | # production 25 | /build 26 | 27 | # misc 28 | .DS_Store 29 | *.pem 30 | 31 | # debug 32 | npm-debug.log* 33 | yarn-debug.log* 34 | yarn-error.log* 35 | 36 | # local env files 37 | .env*.local 38 | 39 | # vercel 40 | .vercel 41 | 42 | # typescript 43 | *.tsbuildinfo 44 | next-env.d.ts 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is a [LlamaIndex](https://www.llamaindex.ai/) project using [FastAPI](https://fastapi.tiangolo.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama). 2 | 3 | **Demo** 4 | 5 | 6 | 7 | ## Getting Started 8 | ### Quickstart 9 | #### Option 1) Clone project and setup by yourself 10 | ```bash 11 | git clone https://github.com/thisishugow/create-llama-ollama.git 12 | cd create-llama-ollama 13 | poetry install 14 | ``` 15 | Download and install `Ollama`([Guide](https://ollama.com)). 16 | ```bash 17 | # After Ollama is installed. 18 | ollama pull llama3 19 | ollama serve & 20 | ``` 21 | 22 | Start the app: 23 | ```bash 24 | poetry install 25 | poetry run ./backend/main.py -c ./backend/config.json # you can make your configuration. 26 | ``` 27 | Then visit [http://localhost:8080](http://localhost:8080) with your browser to see the result. 28 | 29 | #### Optional 2) Docker container 30 | 31 | ```bash 32 | # for Windows WSL2/Linux 33 | docker run --name my-offline-llama -p 8080:8080 thisisyuwang/create-llama-ollama:latest-linux-amd64 34 | 35 | # for Mac arm64 36 | docker run --name my-offline-llama -p 8080:8080 thisisyuwang/create-llama-ollama:latest-arm64 37 | ``` 38 | > ⚠️ WARNING: It will take minutes to download LLM at the first time. 39 | 40 | Then visit [http://localhost:8080](http://localhost:8080) with your browser to see the result. 41 | 42 | 43 | 44 | ### For Development 45 | Startup the backend as described in the [backend README](./backend/README.md). 46 | 47 | Second, run the development server of the frontend as described in the [frontend README](./frontend/README.md). 48 | 49 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 50 | 51 | First, setup the environment: 52 | 53 | ``` 54 | poetry install 55 | poetry shell 56 | ``` 57 | 58 | By default, we use the OpenAI LLM (though you can customize, see `app/context.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory. 59 | 60 | Example `.env` file: 61 | 62 | ``` 63 | OPENAI_API_KEY= 64 | ``` 65 | 66 | Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step): 67 | 68 | ``` 69 | python app/engine/generate.py 70 | ``` 71 | 72 | Third, run the development server: 73 | 74 | ``` 75 | python main.py 76 | ``` 77 | 78 | Then call the API endpoint `/api/chat` to see the result: 79 | 80 | ``` 81 | curl --location 'localhost:8000/api/chat' \ 82 | --header 'Content-Type: application/json' \ 83 | --data '{ "messages": [{ "role": "user", "content": "Hello" }] }' 84 | ``` 85 | 86 | You can start editing the API by modifying `app/api/routers/chat.py`. The endpoint auto-updates as you save the file. 87 | 88 | Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API. 89 | 90 | The API allows CORS for all origins to simplify development. You can change this behavior by setting the `ENVIRONMENT` environment variable to `prod`: 91 | 92 | ``` 93 | ENVIRONMENT=prod uvicorn main:app 94 | ``` 95 | 96 | ## Learn More 97 | 98 | To learn more about LlamaIndex, take a look at the following resources: 99 | 100 | - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex. 101 | - [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features). 102 | 103 | You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome! 104 | -------------------------------------------------------------------------------- /assets/img/demo-cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thisishugow/create-llama-ollama/b45eee69b27ace1dcfe32609586f3d47f29faf0a/assets/img/demo-cover.png -------------------------------------------------------------------------------- /backend/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | storage 3 | .env 4 | -------------------------------------------------------------------------------- /backend/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thisishugow/create-llama-ollama/b45eee69b27ace1dcfe32609586f3d47f29faf0a/backend/README.md -------------------------------------------------------------------------------- /backend/app/__init__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | from llama_index.core import Settings 5 | from app.conn_llm import get_sys_embedding, get_sys_llm 6 | from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP 7 | from app.engine.utils import read_json_config 8 | 9 | 10 | 11 | def get_args(): 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | "-c", 15 | "--config", 16 | help="Config path", 17 | type=str, 18 | required=True, 19 | ) 20 | args = parser.parse_args() 21 | return args 22 | if "CREATE_LLAMA_APP_CONF" not in os.environ.keys(): 23 | args = get_args() 24 | os.environ['CREATE_LLAMA_APP_CONF'] = args.config 25 | 26 | else: 27 | conf = read_json_config() 28 | 29 | Settings.llm = get_sys_llm() 30 | Settings.embed_model = get_sys_embedding() 31 | Settings.chunk_size = conf.get('chunk_size', CHUNK_SIZE) 32 | Settings.chunk_overlap = conf.get('chunk_size', CHUNK_OVERLAP) -------------------------------------------------------------------------------- /backend/app/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thisishugow/create-llama-ollama/b45eee69b27ace1dcfe32609586f3d47f29faf0a/backend/app/api/__init__.py -------------------------------------------------------------------------------- /backend/app/api/routers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thisishugow/create-llama-ollama/b45eee69b27ace1dcfe32609586f3d47f29faf0a/backend/app/api/routers/__init__.py -------------------------------------------------------------------------------- /backend/app/api/routers/chat.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from fastapi.responses import StreamingResponse 4 | from llama_index.core.chat_engine.types import BaseChatEngine 5 | 6 | from app.engine import get_chat_engine 7 | 8 | from fastapi import APIRouter, Depends, HTTPException, Request, status 9 | from llama_index.core.llms import ChatMessage 10 | from llama_index.core.types import MessageRole 11 | from pydantic import BaseModel 12 | 13 | chat_router = r = APIRouter() 14 | 15 | 16 | class _Message(BaseModel): 17 | role: MessageRole 18 | content: str 19 | 20 | 21 | class _ChatData(BaseModel): 22 | messages: List[_Message] 23 | 24 | 25 | @r.post("") 26 | async def chat( 27 | request: Request, 28 | data: _ChatData, 29 | chat_engine: BaseChatEngine = Depends(get_chat_engine), 30 | ): 31 | # check preconditions and get last message 32 | if len(data.messages) == 0: 33 | raise HTTPException( 34 | status_code=status.HTTP_400_BAD_REQUEST, 35 | detail="No messages provided", 36 | ) 37 | lastMessage = data.messages.pop() 38 | if lastMessage.role != MessageRole.USER: 39 | raise HTTPException( 40 | status_code=status.HTTP_400_BAD_REQUEST, 41 | detail="Last message must be from user", 42 | ) 43 | # convert messages coming from the request to type ChatMessage 44 | messages = [ 45 | ChatMessage( 46 | role=m.role, 47 | content=m.content, 48 | ) 49 | for m in data.messages 50 | ] 51 | # query chat engine 52 | # query_engine = get_compound_chat_engine() 53 | response = await chat_engine.astream_chat(lastMessage.content, messages) 54 | 55 | # stream response 56 | async def event_generator(): 57 | async for token in response.async_response_gen(): 58 | # If client closes connection, stop sending events 59 | if await request.is_disconnected(): 60 | break 61 | yield token 62 | 63 | return StreamingResponse(event_generator(), media_type="text/plain") 64 | -------------------------------------------------------------------------------- /backend/app/api/routers/index.py: -------------------------------------------------------------------------------- 1 | import app 2 | 3 | from fastapi import APIRouter, HTTPException, Request, status, Response 4 | from fastapi.responses import HTMLResponse, RedirectResponse, FileResponse 5 | from fastapi.staticfiles import StaticFiles 6 | from fastapi.templating import Jinja2Templates 7 | from fastapi.responses import HTMLResponse 8 | import logging, os 9 | LIB_ROOT:os.PathLike = os.path.dirname(os.path.realpath(app.__file__)) 10 | router = APIRouter() 11 | templates = Jinja2Templates(directory=os.path.join(LIB_ROOT, 'public')) 12 | router.mount("/_next", StaticFiles(directory=os.path.join(LIB_ROOT, 'static/_next/')),) 13 | router.mount("/static", StaticFiles(directory=os.path.join(LIB_ROOT, 'static')),) 14 | 15 | 16 | @router.get("/", response_class=HTMLResponse) 17 | async def redirect_homepage(request: Request): 18 | return templates.TemplateResponse("index.html", context={"request":request,}) 19 | 20 | @router.get("/llama.png") 21 | async def get_favicon(request: Request): 22 | favicon_path = os.path.join(os.path.join(LIB_ROOT, 'public'), "llama.png") 23 | return FileResponse(path=favicon_path, media_type="image/vnd.microsoft.icon") 24 | 25 | @router.get("/favicon.ico") 26 | async def get_favicon(request: Request): 27 | favicon_path = os.path.join(os.path.join(LIB_ROOT, 'public'), "favicon.ico") 28 | return FileResponse(path=favicon_path, media_type="image/vnd.microsoft.icon") -------------------------------------------------------------------------------- /backend/app/conn_llm.py: -------------------------------------------------------------------------------- 1 | import os 2 | from logging import getLogger 3 | from llama_index.llms.ollama import Ollama 4 | from transformers import AutoTokenizer 5 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding 6 | from llama_index.llms.langchain import LangChainLLM 7 | from llama_index.core.chat_engine.types import BaseChatEngine 8 | from langchain.chat_models.ollama import ChatOllama 9 | 10 | from app.engine.utils import read_json_config 11 | 12 | 13 | 14 | 15 | conf:dict = read_json_config() 16 | log = getLogger('uvicorn') 17 | EMBDEDING_MODEL:str = "sentence-transformers/all-mpnet-base-v2" 18 | HF_TOKEN:str = os.environ.get('HF_TOKEN', '') 19 | hf_token_to_log:str = '' 20 | if HF_TOKEN: 21 | hf_token_to_log:str = f"{HF_TOKEN[0:10]}*********" 22 | 23 | 24 | def get_sys_tokenizer(): 25 | conf:dict = read_json_config() 26 | log.debug(f'You are using HF_TOKEN: {hf_token_to_log}') 27 | tokenizer = AutoTokenizer.from_pretrained( 28 | conf.get('embedding', EMBDEDING_MODEL), 29 | tokenizer_config={ 30 | "token": HF_TOKEN, 31 | },) 32 | return tokenizer 33 | 34 | def get_sys_llm()->LangChainLLM: 35 | conf:dict = read_json_config() 36 | # sys_llm: Ollama = Ollama(model=conf.get('llm', "mistral")) 37 | sys_llm = LangChainLLM(ChatOllama(model=conf.get('llm', "llama3"))) 38 | return sys_llm 39 | 40 | def get_sys_embedding() -> HuggingFaceEmbedding: 41 | conf:dict = read_json_config() 42 | tokenizer = get_sys_tokenizer() 43 | sys_embedding: HuggingFaceEmbedding = HuggingFaceEmbedding( 44 | model_name=conf.get('embedding', EMBDEDING_MODEL), 45 | tokenizer=tokenizer, 46 | cache_folder=conf.get("cache_folder", "cache_folder"), 47 | max_length=512, 48 | # device="mps", 49 | ) 50 | return sys_embedding 51 | -------------------------------------------------------------------------------- /backend/app/context.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from llama_index.core import ServiceContext 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core import Settings, ServiceContext 6 | 7 | 8 | def create_base_context(): 9 | # model = os.getenv("MODEL", "gpt-3.5-turbo") 10 | # return ServiceContext.from_defaults( 11 | # llm=OpenAI(model=model), 12 | # ) 13 | return ServiceContext.from_defaults(llm=Settings.llm, embed_model=Settings.embed_model ) -------------------------------------------------------------------------------- /backend/app/engine/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.chat_engine.types import BaseChatEngine 2 | from llama_index.core.chat_engine import CondensePlusContextChatEngine 3 | from llama_index.core.agent import ReActAgent 4 | from app.engine.index import get_index 5 | from app.engine.query_tools import get_tools 6 | def get_chat_engine()->BaseChatEngine: 7 | chat_engine = get_index().as_chat_engine( 8 | similarity_top_k=3, 9 | chat_mode="condense_plus_context", 10 | ) 11 | 12 | return chat_engine 13 | -------------------------------------------------------------------------------- /backend/app/engine/agents.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.selectors import LLMSingleSelector 2 | from llama_index.core.query_engine import RouterQueryEngine 3 | from llama_index.core.agent import ReActAgent 4 | from app.engine.index import get_index 5 | from app.engine.query_tools import get_tools 6 | 7 | 8 | def get_compound_chat_engine(): 9 | chat_engine = RouterQueryEngine( 10 | selector=LLMSingleSelector.from_defaults(), 11 | query_engine_tools=get_tools() 12 | ) 13 | return chat_engine 14 | -------------------------------------------------------------------------------- /backend/app/engine/constants.py: -------------------------------------------------------------------------------- 1 | DATA_DIR = "data" # directory containing the documents to index 2 | CHUNK_SIZE = 1024 3 | CHUNK_OVERLAP = 20 4 | PGVECTOR_SCHEMA = "public" 5 | PGVECTOR_TABLE = "llamaindex_embedding" -------------------------------------------------------------------------------- /backend/app/engine/generate.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | from dotenv import load_dotenv 3 | 4 | load_dotenv() 5 | import logging 6 | 7 | from app.engine.utils import init_pg_vector_store_from_env 8 | from app.engine.loader import get_documents 9 | from app.storage import chroma_vector_store 10 | from app.engine.utils import read_json_config 11 | from app.conn_llm import get_sys_embedding, get_sys_llm 12 | from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP 13 | from app.engine.utils import read_json_config 14 | 15 | from llama_index.core import ( 16 | SimpleDirectoryReader, 17 | VectorStoreIndex, 18 | StorageContext, 19 | Settings, 20 | ) 21 | 22 | logging.basicConfig(level=logging.INFO) 23 | logger = logging.getLogger() 24 | 25 | 26 | def generate_datasource(from_:Literal["postgres", "chroma"]='chroma'): 27 | conf = read_json_config() 28 | Settings.llm = get_sys_llm() 29 | Settings.embed_model = get_sys_embedding() 30 | Settings.chunk_size = conf.get('chunk_size', CHUNK_SIZE) 31 | Settings.chunk_overlap = conf.get('chunk_size', CHUNK_OVERLAP) 32 | logger.info("Creating new index") 33 | # load the documents and create the index 34 | documents = get_documents() 35 | 36 | if from_ == 'postgres': 37 | logger.info("Connecting to index from PGVector...") 38 | store = init_pg_vector_store_from_env() 39 | elif from_ == 'chroma': 40 | logger.info("Connecting to index from ChromaDB...") 41 | store = chroma_vector_store 42 | else: 43 | ValueError(f"arg from_ must be in ['postgres', 'chroma']. Found='{from_}'") 44 | 45 | storage_context = StorageContext.from_defaults(vector_store=store) 46 | VectorStoreIndex.from_documents( 47 | documents, 48 | storage_context=storage_context, 49 | show_progress=True, # this will show you a progress bar as the embeddings are created 50 | ) 51 | logger.info( 52 | f"Successfully created embeddings in the vector store" 53 | ) 54 | 55 | 56 | if __name__ == "__main__": 57 | generate_datasource() 58 | -------------------------------------------------------------------------------- /backend/app/engine/index.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Literal 3 | from llama_index.core import ( 4 | VectorStoreIndex, 5 | ) 6 | from app.engine.utils import init_pg_vector_store_from_env 7 | from app.storage import chroma_vector_store 8 | 9 | def get_index(from_:Literal["postgres", "chroma"]="chroma"): 10 | logger = logging.getLogger("uvicorn") 11 | if from_ == 'postgres': 12 | logger.info("Connecting to index from PGVector...") 13 | store = init_pg_vector_store_from_env() 14 | elif from_ == 'chroma': 15 | logger.info("Connecting to index from ChromaDB...") 16 | store = chroma_vector_store 17 | else: 18 | ValueError(f"arg from_ must be in ['postgres', 'chroma']. Found='{from_}'") 19 | index = VectorStoreIndex.from_vector_store(store) 20 | logger.info("Finished connecting to index from Vector Store.") 21 | return index 22 | -------------------------------------------------------------------------------- /backend/app/engine/loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | from app.engine.constants import DATA_DIR 3 | from app.engine.utils import read_json_config 4 | from llama_index.core import VectorStoreIndex, download_loader 5 | from llama_index.core import SimpleDirectoryReader 6 | conf = read_json_config() 7 | def get_documents(): 8 | return SimpleDirectoryReader(conf.get('data_dir', DATA_DIR)).load_data() 9 | -------------------------------------------------------------------------------- /backend/app/engine/query_tools.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.tools import QueryEngineTool, ToolMetadata 2 | from app.engine.index import get_index 3 | 4 | 5 | def get_tools()->list[QueryEngineTool]: 6 | query_engine = get_index().as_query_engine() 7 | tools: list[QueryEngineTool] = [ 8 | QueryEngineTool( 9 | query_engine=query_engine, 10 | metadata=ToolMetadata( 11 | name='local-knowledge-base-retriever', 12 | description=( 13 | "It is useful when the given question cannot be answered in current chat context." 14 | "This retriever will query the related information from the local vector store and response." 15 | ), 16 | ) 17 | ), 18 | QueryEngineTool( 19 | query_engine=query_engine, 20 | metadata=ToolMetadata( 21 | name='my-good-friend', 22 | description=( 23 | "When the question cannot be found in the context, " 24 | "then just make a causal talk with user, just like his/her best friend." 25 | ), 26 | ) 27 | ), 28 | ] 29 | return tools -------------------------------------------------------------------------------- /backend/app/engine/utils.py: -------------------------------------------------------------------------------- 1 | import os, json 2 | from llama_index.vector_stores.postgres import PGVectorStore 3 | from urllib.parse import urlparse 4 | from app.engine.constants import PGVECTOR_SCHEMA, PGVECTOR_TABLE 5 | 6 | 7 | def init_pg_vector_store_from_env(): 8 | original_conn_string = os.environ.get("PG_CONNECTION_STRING") 9 | if original_conn_string is None or original_conn_string == "": 10 | raise ValueError("PG_CONNECTION_STRING environment variable is not set.") 11 | 12 | # The PGVectorStore requires both two connection strings, one for psycopg2 and one for asyncpg 13 | # Update the configured scheme with the psycopg2 and asyncpg schemes 14 | original_scheme = urlparse(original_conn_string).scheme + "://" 15 | conn_string = original_conn_string.replace( 16 | original_scheme, "postgresql+psycopg2://" 17 | ) 18 | async_conn_string = original_conn_string.replace( 19 | original_scheme, "postgresql+asyncpg://" 20 | ) 21 | conf = read_json_config() 22 | return PGVectorStore( 23 | connection_string=conn_string, 24 | async_connection_string=async_conn_string, 25 | schema_name=conf.get('pgvector_schema', PGVECTOR_SCHEMA), 26 | table_name=conf.get('pgvector_table', PGVECTOR_TABLE), 27 | ) 28 | 29 | def read_json_config(env:str='CREATE_LLAMA_APP_CONF')->dict: 30 | fp:os.PathLike = os.environ.get(env, None) 31 | if fp is None: 32 | return {} 33 | with open(fp, 'r') as f: 34 | res:dict = json.load(f) 35 | return res -------------------------------------------------------------------------------- /backend/app/public/404.html: -------------------------------------------------------------------------------- 1 | 404: This page could not be found.Create Llama App

404

This page could not be found.

-------------------------------------------------------------------------------- /backend/app/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thisishugow/create-llama-ollama/b45eee69b27ace1dcfe32609586f3d47f29faf0a/backend/app/public/favicon.ico -------------------------------------------------------------------------------- /backend/app/public/index.html: -------------------------------------------------------------------------------- 1 | Create Llama App

Get started by editing app/page.tsx

-------------------------------------------------------------------------------- /backend/app/public/index.txt: -------------------------------------------------------------------------------- 1 | 2:I[81749,["77","static/chunks/77-6cbd386b7cb57e62.js","931","static/chunks/app/page-887ea91a8c950aff.js"],"Image"] 2 | 3:I[7293,["77","static/chunks/77-6cbd386b7cb57e62.js","931","static/chunks/app/page-887ea91a8c950aff.js"],""] 3 | 4:I[5613,[],""] 4 | 5:I[31778,[],""] 5 | 0:["a0F5iAmesR9XNB9QY5ZMN",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","main",null,{"className":"flex min-h-screen flex-col items-center gap-10 p-24 background-gradient","children":[["$","div",null,{"className":"z-10 max-w-5xl w-full items-center justify-between font-mono text-sm lg:flex","children":[["$","p",null,{"className":"fixed left-0 top-0 flex w-full justify-center border-b border-gray-300 bg-gradient-to-b from-zinc-200 pb-6 pt-8 backdrop-blur-2xl dark:border-neutral-800 dark:bg-zinc-800/30 dark:from-inherit lg:static lg:w-auto lg:rounded-xl lg:border lg:bg-gray-200 lg:p-4 lg:dark:bg-zinc-800/30","children":["Get started by editing ",["$","code",null,{"className":"font-mono font-bold","children":"app/page.tsx"}]]}],["$","div",null,{"className":"fixed bottom-0 left-0 flex h-48 w-full items-end justify-center bg-gradient-to-t from-white via-white dark:from-black dark:via-black lg:static lg:h-auto lg:w-auto lg:bg-none","children":["$","a",null,{"href":"https://www.llamaindex.ai/","className":"flex items-center justify-center font-nunito text-lg font-bold gap-2","children":[["$","span",null,{"children":"Built by LlamaIndex"}],["$","$L2",null,{"className":"rounded-xl","src":"/llama.png","alt":"Llama Logo","width":40,"height":40,"priority":true}]]}]}]]}],["$","$L3",null,{}]]}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_aaf875","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/_next/static/css/3ffa7081b01d7ba0.css","precedence":"next","crossOrigin":""}]],"$L6"]]]] 6 | 6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"Create Llama App"}],["$","meta","3",{"name":"description","content":"Generated by create-llama"}],["$","link","4",{"rel":"icon","href":"/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]] 7 | 1:null 8 | -------------------------------------------------------------------------------- /backend/app/public/llama.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thisishugow/create-llama-ollama/b45eee69b27ace1dcfe32609586f3d47f29faf0a/backend/app/public/llama.png -------------------------------------------------------------------------------- /backend/app/static/_next/static/a0F5iAmesR9XNB9QY5ZMN/_buildManifest.js: -------------------------------------------------------------------------------- 1 | self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/_error":["static/chunks/pages/_error-d6107f1aac0c574c.js"],sortedPages:["/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB(); -------------------------------------------------------------------------------- /backend/app/static/_next/static/a0F5iAmesR9XNB9QY5ZMN/_ssgManifest.js: -------------------------------------------------------------------------------- 1 | self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB() -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/app/_not-found-aeb74ca2b03c8470.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[165],{83155:function(e,t,n){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_not-found",function(){return n(84032)}])},84032:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i}}),n(86921);let o=n(3827);n(64090);let r={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},desc:{display:"inline-block"},h1:{display:"inline-block",margin:"0 20px 0 0",padding:"0 23px 0 0",fontSize:24,fontWeight:500,verticalAlign:"top",lineHeight:"49px"},h2:{fontSize:14,fontWeight:400,lineHeight:"49px",margin:0}};function i(){return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)("title",{children:"404: This page could not be found."}),(0,o.jsx)("div",{style:r.error,children:(0,o.jsxs)("div",{children:[(0,o.jsx)("style",{dangerouslySetInnerHTML:{__html:"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}),(0,o.jsx)("h1",{className:"next-error-h1",style:r.h1,children:"404"}),(0,o.jsx)("div",{style:r.desc,children:(0,o.jsx)("h2",{style:r.h2,children:"This page could not be found."})})]})})]})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)}},function(e){e.O(0,[971,69,744],function(){return e(e.s=83155)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/app/layout-386c8c158172a3ec.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[185],{84757:function(n,e,t){Promise.resolve().then(t.t.bind(t,86087,23)),Promise.resolve().then(t.t.bind(t,52445,23))},52445:function(){},86087:function(n){n.exports={style:{fontFamily:"'__Inter_aaf875', '__Inter_Fallback_aaf875'",fontStyle:"normal"},className:"__className_aaf875"}}},function(n){n.O(0,[971,69,744],function(){return n(n.s=84757)}),_N_E=n.O()}]); -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/app/page-887ea91a8c950aff.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{46482:function(e,s,t){Promise.resolve().then(t.bind(t,7293)),Promise.resolve().then(t.t.bind(t,81749,23))},7293:function(e,s,t){"use strict";t.r(s),t.d(s,{default:function(){return V}});var l=t(3827),a=t(15237),n=t(64090);let r=e=>!!e&&"object"==typeof e&&0!==Object.keys(e).length,i=(e,s)=>(s&&e.forEach((e,t)=>{let l=s[t];r(l)&&(e.data=l)}),e);var o=t(76371),c=t(57742),d=t(34734),u=t(51367);function m(){for(var e=arguments.length,s=Array(e),t=0;t{let{className:t,variant:a,size:n,asChild:r=!1,...i}=e,c=r?o.g7:"button";return(0,l.jsx)(c,{className:m(h({variant:a,size:n,className:t})),ref:s,...i})});p.displayName="Button";var f=t(18994),x=t(80223);function g(e){var s;let{config:t,onFileUpload:a,onFileError:r}=e,[i,o]=(0,n.useState)(!1),c=(null==t?void 0:t.inputId)||"fileInput",d=(null==t?void 0:t.fileSizeLimit)||52428800,u=null==t?void 0:t.allowedExtensions,p=null!==(s=null==t?void 0:t.checkExtension)&&void 0!==s?s:e=>u&&!u.includes(e)?"Invalid file type. Please select a file with one of these formats: ".concat(u.join(",")):null,g=e=>e.size>d,v=()=>{document.getElementById(c).value=""},b=async e=>{var s;let t=null===(s=e.target.files)||void 0===s?void 0:s[0];t&&(o(!0),await j(t),v(),o(!1))},j=async e=>{let s=r||window.alert,t=p(e.name.split(".").pop()||"");return t?s(t):g(e)?s("File size exceeded. Limit is ".concat(d/1024/1024," MB")):void await a(e)};return(0,l.jsxs)("div",{className:"self-stretch",children:[(0,l.jsx)("input",{type:"file",id:c,style:{display:"none"},onChange:b,accept:null==u?void 0:u.join(","),disabled:(null==t?void 0:t.disabled)||i}),(0,l.jsx)("label",{htmlFor:c,className:m(h({variant:"secondary",size:"icon"}),"cursor-pointer",i&&"opacity-50"),children:i?(0,l.jsx)(f.Z,{className:"h-4 w-4 animate-spin"}):(0,l.jsx)(x.Z,{className:"-rotate-45 w-4 h-4"})})]})}let v=n.forwardRef((e,s)=>{let{className:t,type:a,...n}=e;return(0,l.jsx)("input",{type:a,className:m("flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50",t),ref:s,...n})});v.displayName="Input";var b=t(71483),j=t(20703);function w(e){let{url:s,onRemove:t}=e;return(0,l.jsxs)("div",{className:"relative w-20 h-20 group",children:[(0,l.jsx)(j.default,{src:s,alt:"Uploaded image",fill:!0,className:"object-cover w-full h-full rounded-xl hover:brightness-75"}),(0,l.jsx)("div",{className:m("absolute -top-2 -right-2 w-6 h-6 z-10 bg-gray-500 text-white rounded-full hidden group-hover:block"),children:(0,l.jsx)(b.Z,{className:"w-6 h-6 bg-gray-500 text-white rounded-full",onClick:t})})]})}function y(e){let[s,t]=(0,n.useState)(null),a=async e=>{t(await new Promise((s,t)=>{let l=new FileReader;l.readAsDataURL(e),l.onload=()=>s(l.result),l.onerror=e=>t(e)}))},r=async s=>{var t,l;try{if(e.multiModal&&s.type.startsWith("image/"))return await a(s);null===(t=e.onFileUpload)||void 0===t||t.call(e,s)}catch(s){console.log("檔案上傳錯誤:".concat(s)),null===(l=e.onFileError)||void 0===l||l.call(e,s.message)}};return(0,l.jsxs)("form",{onSubmit:l=>{if(s){e.handleSubmit(l,{data:{imageUrl:s}}),t(null);return}e.handleSubmit(l)},className:"rounded-xl bg-white p-4 shadow-xl space-y-4",children:[s&&(0,l.jsx)(w,{url:s,onRemove:()=>t(null)}),(0,l.jsxs)("div",{className:"flex w-full items-start justify-between gap-4 ",children:[(0,l.jsx)(v,{autoFocus:!0,name:"message",placeholder:"Type a message",className:"flex-1",value:e.input,onChange:e.handleInputChange}),e.multiModal&&(0,l.jsx)(g,{onFileUpload:r,onFileError:e.onFileError}),(0,l.jsx)(p,{type:"submit",disabled:e.isLoading,children:"Send message"})]})]})}var N=t(10081),k=t(40834);function C(e){return(0,l.jsxs)("div",{className:"space-x-4",children:[e.showStop&&(0,l.jsxs)(p,{variant:"outline",size:"sm",onClick:e.stop,children:[(0,l.jsx)(N.Z,{className:"mr-2 h-4 w-4"}),"Stop generating"]}),e.showReload&&(0,l.jsxs)(p,{variant:"outline",size:"sm",onClick:e.reload,children:[(0,l.jsx)(k.Z,{className:"mr-2 h-4 w-4"}),"Regenerate"]})]})}var L=t(80037),z=t(23416),R=t(52835);function S(e){let{role:s}=e;return"user"===s?(0,l.jsx)("div",{className:"flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border bg-background shadow",children:(0,l.jsx)(R.Z,{className:"h-4 w-4"})}):(0,l.jsx)("div",{className:"flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border bg-black text-white shadow",children:(0,l.jsx)(j.default,{className:"rounded-md",src:"/llama.png",alt:"Llama Logo",width:24,height:24,priority:!0})})}var E=t(49200),Z=t(24130),T=t(28105),F=t(44715),U=t(193),P=t(75688);function _(e){let{timeout:s=2e3}=e,[t,l]=n.useState(!1);return{isCopied:t,copyToClipboard:e=>{var t;(null===(t=navigator.clipboard)||void 0===t?void 0:t.writeText)&&e&&navigator.clipboard.writeText(e).then(()=>{l(!0),setTimeout(()=>{l(!1)},s)})}}}let I=U.Z,M={javascript:".js",python:".py",java:".java",c:".c",cpp:".cpp","c++":".cpp","c#":".cs",ruby:".rb",php:".php",swift:".swift","objective-c":".m",kotlin:".kt",typescript:".ts",go:".go",perl:".pl",rust:".rs",scala:".scala",haskell:".hs",lua:".lua",shell:".sh",sql:".sql",html:".html",css:".css"},B=function(e){let s=arguments.length>1&&void 0!==arguments[1]&&arguments[1],t="ABCDEFGHJKLMNPQRSTUVWXY3456789",l="";for(let s=0;s{let{language:s,value:t}=e,{isCopied:a,copyToClipboard:n}=_({timeout:2e3});return(0,l.jsxs)("div",{className:"codeblock relative w-full bg-zinc-950 font-sans",children:[(0,l.jsxs)("div",{className:"flex w-full items-center justify-between bg-zinc-800 px-6 py-2 pr-4 text-zinc-100",children:[(0,l.jsx)("span",{className:"text-xs lowercase",children:s}),(0,l.jsxs)("div",{className:"flex items-center space-x-1",children:[(0,l.jsxs)(p,{variant:"ghost",onClick:()=>{let e=M[s]||".file",l="file-".concat(B(3,!0)).concat(e),a=window.prompt("Enter file name",l);if(!a)return;let n=new Blob([t],{type:"text/plain"}),r=URL.createObjectURL(n),i=document.createElement("a");i.download=a,i.href=r,i.style.display="none",document.body.appendChild(i),i.click(),document.body.removeChild(i),URL.revokeObjectURL(r)},size:"icon",children:[(0,l.jsx)(F.Z,{}),(0,l.jsx)("span",{className:"sr-only",children:"Download"})]}),(0,l.jsxs)(p,{variant:"ghost",size:"icon",onClick:()=>{a||n(t)},children:[a?(0,l.jsx)(L.Z,{className:"h-4 w-4"}):(0,l.jsx)(z.Z,{className:"h-4 w-4"}),(0,l.jsx)("span",{className:"sr-only",children:"Copy code"})]})]})]}),(0,l.jsx)(I,{language:s,style:P.RY,PreTag:"div",showLineNumbers:!0,customStyle:{width:"100%",background:"transparent",padding:"1.5rem 1rem",borderRadius:"0.5rem"},codeTagProps:{style:{fontSize:"0.9rem",fontFamily:"var(--font-mono)"}},children:t})]})});A.displayName="CodeBlock";let D=(0,n.memo)(E.D,(e,s)=>e.children===s.children&&e.className===s.className);function O(e){let{content:s}=e;return(0,l.jsx)(D,{className:"prose dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 break-words",remarkPlugins:[Z.Z,T.Z],components:{p(e){let{children:s}=e;return(0,l.jsx)("p",{className:"mb-2 last:mb-0",children:s})},code(e){let{node:s,inline:t,className:a,children:n,...r}=e;if(n.length){if("▍"==n[0])return(0,l.jsx)("span",{className:"mt-1 animate-pulse cursor-default",children:"▍"});n[0]=n[0].replace("`▍`","▍")}let i=/language-(\w+)/.exec(a||"");return t?(0,l.jsx)("code",{className:a,...r,children:n}):(0,l.jsx)(A,{language:i&&i[1]||"",value:String(n).replace(/\n$/,""),...r},Math.random())}},children:s})}function H(e){let{messageData:s}=e,{image_url:t,type:a}=s;return"image_url"===a?(0,l.jsx)("div",{className:"rounded-md max-w-[200px] shadow-md",children:(0,l.jsx)(j.default,{src:t.url,width:0,height:0,sizes:"100vw",style:{width:"100%",height:"auto"},alt:""})}):null}function W(e){let{isCopied:s,copyToClipboard:t}=_({timeout:2e3});return(0,l.jsxs)("div",{className:"flex items-start gap-4 pr-5 pt-5",children:[(0,l.jsx)(S,{role:e.role}),(0,l.jsxs)("div",{className:"group flex flex-1 justify-between gap-2",children:[(0,l.jsxs)("div",{className:"flex-1 space-y-4",children:[e.data&&(0,l.jsx)(H,{messageData:e.data}),(0,l.jsx)(O,{content:e.content})]}),(0,l.jsx)(p,{onClick:()=>t(e.content),size:"icon",variant:"ghost",className:"h-8 w-8 opacity-0 group-hover:opacity-100",children:s?(0,l.jsx)(L.Z,{className:"h-4 w-4"}):(0,l.jsx)(z.Z,{className:"h-4 w-4"})})]})]})}function q(e){let s=(0,n.useRef)(null),t=e.messages.length,a=e.messages[t-1],r=()=>{s.current&&(s.current.scrollTop=s.current.scrollHeight)},i=t>0&&(null==a?void 0:a.role)!=="user",o=e.reload&&!e.isLoading&&i,c=e.stop&&e.isLoading,d=e.isLoading&&!i;return(0,n.useEffect)(()=>{r()},[t,a]),(0,l.jsxs)("div",{className:"w-full rounded-xl bg-white p-4 shadow-xl pb-0",children:[(0,l.jsxs)("div",{className:"flex h-[50vh] flex-col gap-5 divide-y overflow-y-auto pb-4",ref:s,children:[e.messages.map(e=>(0,l.jsx)(W,{...e},e.id)),d&&(0,l.jsx)("div",{className:"flex justify-center items-center pt-10",children:(0,l.jsx)(f.Z,{className:"h-4 w-4 animate-spin"})})]}),(0,l.jsx)("div",{className:"flex justify-end py-4",children:(0,l.jsx)(C,{reload:e.reload,stop:e.stop,showReload:o,showStop:c})})]})}var J=t(32215);function V(){let{messages:e,input:s,isLoading:t,handleSubmit:r,handleInputChange:o,reload:c,stop:d,data:u}=(0,a.RJ)({api:J.env.NEXT_PUBLIC_CHAT_API,headers:{"Content-Type":"application/json"}}),m=(0,n.useMemo)(()=>i(e,u),[e,u]);return(0,l.jsxs)("div",{className:"space-y-4 max-w-5xl w-full",children:[(0,l.jsx)(q,{messages:m,isLoading:t,reload:c,stop:d}),(0,l.jsx)(y,{input:s,handleSubmit:r,handleInputChange:o,isLoading:t,multiModal:!1})]})}}},function(e){e.O(0,[77,971,69,744],function(){return e(e.s=46482)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/main-app-1444d65fd19930be.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[744],{67923:function(e,n,t){Promise.resolve().then(t.t.bind(t,47690,23)),Promise.resolve().then(t.t.bind(t,48955,23)),Promise.resolve().then(t.t.bind(t,5613,23)),Promise.resolve().then(t.t.bind(t,11902,23)),Promise.resolve().then(t.t.bind(t,31778,23)),Promise.resolve().then(t.t.bind(t,77831,23))}},function(e){var n=function(n){return e(e.s=n)};e.O(0,[971,69],function(){return n(35317),n(67923)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/pages/_app-d21e88acd55d90f1.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[888],{41597:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_app",function(){return u(57174)}])}},function(n){var _=function(_){return n(n.s=_)};n.O(0,[774,179],function(){return _(41597),_(94546)}),_N_E=n.O()}]); -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/pages/_error-d6107f1aac0c574c.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[820],{81981:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_error",function(){return u(5103)}])}},function(n){n.O(0,[888,774,179],function(){return n(n.s=81981)}),_N_E=n.O()}]); -------------------------------------------------------------------------------- /backend/app/static/_next/static/chunks/polyfills-c67a75d1b6f99dc8.js: -------------------------------------------------------------------------------- 1 | !function(){var t="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function e(t){var e={exports:{}};return t(e,e.exports),e.exports}var r=function(t){return t&&t.Math==Math&&t},n=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof t&&t)||Function("return this")(),o=function(t){try{return!!t()}catch(t){return!0}},i=!o(function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}),a={}.propertyIsEnumerable,u=Object.getOwnPropertyDescriptor,s=u&&!a.call({1:2},1)?function(t){var e=u(this,t);return!!e&&e.enumerable}:a,c={f:s},f=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}},l={}.toString,h=function(t){return l.call(t).slice(8,-1)},p="".split,d=o(function(){return!Object("z").propertyIsEnumerable(0)})?function(t){return"String"==h(t)?p.call(t,""):Object(t)}:Object,v=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t},g=function(t){return d(v(t))},y=function(t){return"object"==typeof t?null!==t:"function"==typeof t},m=function(t,e){if(!y(t))return t;var r,n;if(e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;if("function"==typeof(r=t.valueOf)&&!y(n=r.call(t)))return n;if(!e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;throw TypeError("Can't convert object to primitive value")},b={}.hasOwnProperty,w=function(t,e){return b.call(t,e)},S=n.document,E=y(S)&&y(S.createElement),x=function(t){return E?S.createElement(t):{}},A=!i&&!o(function(){return 7!=Object.defineProperty(x("div"),"a",{get:function(){return 7}}).a}),O=Object.getOwnPropertyDescriptor,R={f:i?O:function(t,e){if(t=g(t),e=m(e,!0),A)try{return O(t,e)}catch(t){}if(w(t,e))return f(!c.f.call(t,e),t[e])}},j=function(t){if(!y(t))throw TypeError(String(t)+" is not an object");return t},P=Object.defineProperty,I={f:i?P:function(t,e,r){if(j(t),e=m(e,!0),j(r),A)try{return P(t,e,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported");return"value"in r&&(t[e]=r.value),t}},T=i?function(t,e,r){return I.f(t,e,f(1,r))}:function(t,e,r){return t[e]=r,t},k=function(t,e){try{T(n,t,e)}catch(r){n[t]=e}return e},L="__core-js_shared__",U=n[L]||k(L,{}),M=Function.toString;"function"!=typeof U.inspectSource&&(U.inspectSource=function(t){return M.call(t)});var _,N,C,F=U.inspectSource,B=n.WeakMap,D="function"==typeof B&&/native code/.test(F(B)),q=!1,z=e(function(t){(t.exports=function(t,e){return U[t]||(U[t]=void 0!==e?e:{})})("versions",[]).push({version:"3.6.5",mode:"global",copyright:"© 2020 Denis Pushkarev (zloirock.ru)"})}),W=0,K=Math.random(),G=function(t){return"Symbol("+String(void 0===t?"":t)+")_"+(++W+K).toString(36)},$=z("keys"),V=function(t){return $[t]||($[t]=G(t))},H={};if(D){var X=new(0,n.WeakMap),Y=X.get,J=X.has,Q=X.set;_=function(t,e){return Q.call(X,t,e),e},N=function(t){return Y.call(X,t)||{}},C=function(t){return J.call(X,t)}}else{var Z=V("state");H[Z]=!0,_=function(t,e){return T(t,Z,e),e},N=function(t){return w(t,Z)?t[Z]:{}},C=function(t){return w(t,Z)}}var tt,et={set:_,get:N,has:C,enforce:function(t){return C(t)?N(t):_(t,{})},getterFor:function(t){return function(e){var r;if(!y(e)||(r=N(e)).type!==t)throw TypeError("Incompatible receiver, "+t+" required");return r}}},rt=e(function(t){var e=et.get,r=et.enforce,o=String(String).split("String");(t.exports=function(t,e,i,a){var u=!!a&&!!a.unsafe,s=!!a&&!!a.enumerable,c=!!a&&!!a.noTargetGet;"function"==typeof i&&("string"!=typeof e||w(i,"name")||T(i,"name",e),r(i).source=o.join("string"==typeof e?e:"")),t!==n?(u?!c&&t[e]&&(s=!0):delete t[e],s?t[e]=i:T(t,e,i)):s?t[e]=i:k(e,i)})(Function.prototype,"toString",function(){return"function"==typeof this&&e(this).source||F(this)})}),nt=n,ot=function(t){return"function"==typeof t?t:void 0},it=function(t,e){return arguments.length<2?ot(nt[t])||ot(n[t]):nt[t]&&nt[t][e]||n[t]&&n[t][e]},at=Math.ceil,ut=Math.floor,st=function(t){return isNaN(t=+t)?0:(t>0?ut:at)(t)},ct=Math.min,ft=function(t){return t>0?ct(st(t),9007199254740991):0},lt=Math.max,ht=Math.min,pt=function(t,e){var r=st(t);return r<0?lt(r+e,0):ht(r,e)},dt=function(t){return function(e,r,n){var o,i=g(e),a=ft(i.length),u=pt(n,a);if(t&&r!=r){for(;a>u;)if((o=i[u++])!=o)return!0}else for(;a>u;u++)if((t||u in i)&&i[u]===r)return t||u||0;return!t&&-1}},vt={includes:dt(!0),indexOf:dt(!1)},gt=vt.indexOf,yt=function(t,e){var r,n=g(t),o=0,i=[];for(r in n)!w(H,r)&&w(n,r)&&i.push(r);for(;e.length>o;)w(n,r=e[o++])&&(~gt(i,r)||i.push(r));return i},mt=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],bt=mt.concat("length","prototype"),wt={f:Object.getOwnPropertyNames||function(t){return yt(t,bt)}},St={f:Object.getOwnPropertySymbols},Et=it("Reflect","ownKeys")||function(t){var e=wt.f(j(t)),r=St.f;return r?e.concat(r(t)):e},xt=function(t,e){for(var r=Et(e),n=I.f,o=R.f,i=0;i2?arguments[2]:void 0,u=Mt((void 0===a?n:pt(a,n))-i,n-o),s=1;for(i0;)i in r?r[o]=r[i]:delete r[o],o+=s,i+=s;return r},Nt=!!Object.getOwnPropertySymbols&&!o(function(){return!String(Symbol())}),Ct=Nt&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,Ft=z("wks"),Bt=n.Symbol,Dt=Ct?Bt:Bt&&Bt.withoutSetter||G,qt=function(t){return w(Ft,t)||(Ft[t]=Nt&&w(Bt,t)?Bt[t]:Dt("Symbol."+t)),Ft[t]},zt=Object.keys||function(t){return yt(t,mt)},Wt=i?Object.defineProperties:function(t,e){j(t);for(var r,n=zt(e),o=n.length,i=0;o>i;)I.f(t,r=n[i++],e[r]);return t},Kt=it("document","documentElement"),Gt=V("IE_PROTO"),$t=function(){},Vt=function(t){return"