├── backend
├── app
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ └── routers
│ │ │ ├── __init__.py
│ │ │ └── chat.py
│ ├── engine
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── loader.py
│ │ ├── context.py
│ │ ├── generate.py
│ │ └── index.py
│ └── context.py
├── tests
│ └── __init__.py
├── .gitignore
├── .DS_Store
├── data
│ └── 1952-Camargo-Drive
│ │ ├── 20. PropertyDetails_1952_Camargo_Dr_San_Jose.pdf
│ │ └── 4. Home Inspection Report - Homeguard 6.17.22.pdf
├── pyproject.toml
├── main.py
└── README.md
├── frontend
├── .env
├── app
│ ├── favicon.ico
│ ├── components
│ │ ├── ui
│ │ │ └── chat
│ │ │ │ ├── index.ts
│ │ │ │ ├── chat-item.tsx
│ │ │ │ ├── chat-avatar.tsx
│ │ │ │ ├── chat-messages.tsx
│ │ │ │ └── chat-input.tsx
│ │ ├── transform.ts
│ │ ├── chat-section.tsx
│ │ └── header.tsx
│ ├── page.tsx
│ ├── layout.tsx
│ └── globals.css
├── public
│ └── llama.png
├── postcss.config.js
├── .gitignore
├── next.config.js
├── tsconfig.json
├── package.json
├── README.md
└── tailwind.config.ts
├── .DS_Store
├── .idea
└── .idea.homeai.dir
│ └── .idea
│ ├── vcs.xml
│ ├── indexLayout.xml
│ └── .gitignore
└── README.md
/backend/app/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/app/api/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/app/engine/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/app/api/routers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | storage
3 | .env
4 |
--------------------------------------------------------------------------------
/frontend/.env:
--------------------------------------------------------------------------------
1 | MODEL=gpt-4
2 | NEXT_PUBLIC_MODEL=gpt-4
3 |
--------------------------------------------------------------------------------
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/2sunflower33/homeai/HEAD/.DS_Store
--------------------------------------------------------------------------------
/backend/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/2sunflower33/homeai/HEAD/backend/.DS_Store
--------------------------------------------------------------------------------
/frontend/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/2sunflower33/homeai/HEAD/frontend/app/favicon.ico
--------------------------------------------------------------------------------
/frontend/public/llama.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/2sunflower33/homeai/HEAD/frontend/public/llama.png
--------------------------------------------------------------------------------
/frontend/postcss.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | };
7 |
--------------------------------------------------------------------------------
/backend/app/engine/constants.py:
--------------------------------------------------------------------------------
1 | STORAGE_DIR = "storage" # directory to cache the generated index
2 | DATA_DIR = "data" # directory containing the documents to index
3 | CHUNK_SIZE = 1024
4 | CHUNK_OVERLAP = 20
5 |
--------------------------------------------------------------------------------
/backend/data/1952-Camargo-Drive/20. PropertyDetails_1952_Camargo_Dr_San_Jose.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/2sunflower33/homeai/HEAD/backend/data/1952-Camargo-Drive/20. PropertyDetails_1952_Camargo_Dr_San_Jose.pdf
--------------------------------------------------------------------------------
/backend/data/1952-Camargo-Drive/4. Home Inspection Report - Homeguard 6.17.22.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/2sunflower33/homeai/HEAD/backend/data/1952-Camargo-Drive/4. Home Inspection Report - Homeguard 6.17.22.pdf
--------------------------------------------------------------------------------
/.idea/.idea.homeai.dir/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/.idea.homeai.dir/.idea/indexLayout.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/frontend/app/components/ui/chat/index.ts:
--------------------------------------------------------------------------------
1 | import ChatInput from "./chat-input";
2 | import ChatMessages from "./chat-messages";
3 |
4 | export type { ChatInputProps } from "./chat-input";
5 | export type { Message } from "./chat-messages";
6 | export { ChatInput, ChatMessages };
7 |
--------------------------------------------------------------------------------
/backend/app/engine/loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | from app.engine.constants import DATA_DIR
3 | from llama_index import VectorStoreIndex, download_loader
4 | from llama_index import SimpleDirectoryReader
5 |
6 |
7 | def get_documents():
8 | return SimpleDirectoryReader(DATA_DIR).load_data()
9 |
--------------------------------------------------------------------------------
/backend/app/context.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from llama_index import ServiceContext
4 | from llama_index.llms import OpenAI
5 |
6 |
7 | def create_base_context():
8 | model = os.getenv("MODEL", "gpt-3.5-turbo")
9 | return ServiceContext.from_defaults(
10 | llm=OpenAI(model=model),
11 | )
12 |
--------------------------------------------------------------------------------
/frontend/app/page.tsx:
--------------------------------------------------------------------------------
1 | import Header from "@/app/components/header";
2 | import ChatSection from "./components/chat-section";
3 |
4 | export default function Home() {
5 | return (
6 |
7 |
8 |
9 |
10 | );
11 | }
12 |
--------------------------------------------------------------------------------
/.idea/.idea.homeai.dir/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Rider ignored files
5 | /modules.xml
6 | /projectSettingsUpdater.xml
7 | /.idea.homeai.iml
8 | /contentModel.xml
9 | # Editor-based HTTP Client requests
10 | /httpRequests/
11 | # Datasource local storage ignored files
12 | /dataSources/
13 | /dataSources.local.xml
14 |
--------------------------------------------------------------------------------
/frontend/app/components/ui/chat/chat-item.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import ChatAvatar from "./chat-avatar";
4 | import { Message } from "./chat-messages";
5 |
6 | export default function ChatItem(message: Message) {
7 | return (
8 |
9 |
10 |
{message.content}
11 |
12 | );
13 | }
14 |
--------------------------------------------------------------------------------
/backend/app/engine/context.py:
--------------------------------------------------------------------------------
1 | from llama_index import ServiceContext
2 |
3 | from app.context import create_base_context
4 | from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
5 |
6 |
7 | def create_service_context():
8 | base = create_base_context()
9 | return ServiceContext.from_defaults(
10 | llm=base.llm,
11 | embed_model=base.embed_model,
12 | chunk_size=CHUNK_SIZE,
13 | chunk_overlap=CHUNK_OVERLAP,
14 | )
15 |
--------------------------------------------------------------------------------
/backend/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "app"
3 | version = "0.1.0"
4 | description = ""
5 | authors = ["Marcus Schiesser "]
6 | readme = "README.md"
7 |
8 | [tool.poetry.dependencies]
9 | python = "^3.11,<3.12"
10 | fastapi = "^0.104.1"
11 | uvicorn = { extras = ["standard"], version = "^0.23.2" }
12 | llama-index = "^0.9.19"
13 | pypdf = "^3.17.0"
14 | python-dotenv = "^1.0.0"
15 | docx2txt = "^0.8"
16 |
17 | [build-system]
18 | requires = ["poetry-core"]
19 | build-backend = "poetry.core.masonry.api"
20 |
--------------------------------------------------------------------------------
/frontend/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # next.js
12 | /.next/
13 | /out/
14 |
15 | # production
16 | /build
17 |
18 | # misc
19 | .DS_Store
20 | *.pem
21 |
22 | # debug
23 | npm-debug.log*
24 | yarn-debug.log*
25 | yarn-error.log*
26 |
27 | # local env files
28 | .env*.local
29 |
30 | # vercel
31 | .vercel
32 |
33 | # typescript
34 | *.tsbuildinfo
35 | next-env.d.ts
36 |
--------------------------------------------------------------------------------
/frontend/app/layout.tsx:
--------------------------------------------------------------------------------
1 | import type { Metadata } from "next";
2 | import { Inter } from "next/font/google";
3 | import "./globals.css";
4 |
5 | const inter = Inter({ subsets: ["latin"] });
6 |
7 | export const metadata: Metadata = {
8 | title: "Create Llama App",
9 | description: "Generated by create-llama",
10 | };
11 |
12 | export default function RootLayout({
13 | children,
14 | }: {
15 | children: React.ReactNode;
16 | }) {
17 | return (
18 |
19 | {children}
20 |
21 | );
22 | }
23 |
--------------------------------------------------------------------------------
/frontend/next.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {
3 | output: "export",
4 | images: { unoptimized: true },
5 | webpack: (config) => {
6 | // See https://webpack.js.org/configuration/resolve/#resolvealias
7 | config.resolve.alias = {
8 | ...config.resolve.alias,
9 | sharp$: false,
10 | "onnxruntime-node$": false,
11 | };
12 | return config;
13 | },
14 | experimental: {
15 | outputFileTracingIncludes: {
16 | "/*": ["./cache/**/*"],
17 | },
18 | },
19 | };
20 |
21 | module.exports = nextConfig;
22 |
--------------------------------------------------------------------------------
/frontend/app/components/transform.ts:
--------------------------------------------------------------------------------
1 | import { JSONValue, Message } from "ai";
2 |
3 | export const isValidMessageData = (rawData: JSONValue | undefined) => {
4 | if (!rawData || typeof rawData !== "object") return false;
5 | if (Object.keys(rawData).length === 0) return false;
6 | return true;
7 | };
8 |
9 | export const insertDataIntoMessages = (
10 | messages: Message[],
11 | data: JSONValue[] | undefined,
12 | ) => {
13 | if (!data) return messages;
14 | messages.forEach((message, i) => {
15 | const rawData = data[i];
16 | if (isValidMessageData(rawData)) message.data = rawData;
17 | });
18 | return messages;
19 | };
20 |
--------------------------------------------------------------------------------
/frontend/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "es5",
4 | "lib": ["dom", "dom.iterable", "esnext"],
5 | "allowJs": true,
6 | "skipLibCheck": true,
7 | "strict": true,
8 | "noEmit": true,
9 | "esModuleInterop": true,
10 | "module": "esnext",
11 | "moduleResolution": "bundler",
12 | "resolveJsonModule": true,
13 | "isolatedModules": true,
14 | "jsx": "preserve",
15 | "incremental": true,
16 | "plugins": [
17 | {
18 | "name": "next",
19 | },
20 | ],
21 | "paths": {
22 | "@/*": ["./*"],
23 | },
24 | "forceConsistentCasingInFileNames": true,
25 | },
26 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
27 | "exclude": ["node_modules"],
28 | }
29 |
--------------------------------------------------------------------------------
/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "homeai",
3 | "version": "0.1.0",
4 | "scripts": {
5 | "dev": "cross-env NEXT_PUBLIC_CHAT_API=http://localhost:8000/api/chat next dev",
6 | "build": "next build",
7 | "start": "next start",
8 | "lint": "next lint",
9 | "generate": "node app/api/chat/engine/generate.mjs"
10 | },
11 | "dependencies": {
12 | "ai": "^2.2.27",
13 | "dotenv": "^16.3.1",
14 | "llamaindex": "0.1.7",
15 | "next": "^14.0.3",
16 | "react": "^18.2.0",
17 | "react-dom": "^18.2.0",
18 | "supports-color": "^9.4.0"
19 | },
20 | "devDependencies": {
21 | "@types/node": "^20.10.3",
22 | "@types/react": "^18.2.42",
23 | "@types/react-dom": "^18.2.17",
24 | "autoprefixer": "^10.4.16",
25 | "postcss": "^8.4.32",
26 | "tailwindcss": "^3.3.6",
27 | "typescript": "^5.3.2",
28 | "cross-env": "^7.0.3"
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/backend/main.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 |
3 | load_dotenv()
4 |
5 | import logging
6 | import os
7 | import uvicorn
8 | from app.api.routers.chat import chat_router
9 | from fastapi import FastAPI
10 | from fastapi.middleware.cors import CORSMiddleware
11 |
12 | app = FastAPI()
13 |
14 | environment = os.getenv("ENVIRONMENT", "dev") # Default to 'development' if not set
15 |
16 |
17 | if environment == "dev":
18 | logger = logging.getLogger("uvicorn")
19 | logger.warning("Running in development mode - allowing CORS for all origins")
20 | app.add_middleware(
21 | CORSMiddleware,
22 | allow_origins=["*"],
23 | allow_credentials=True,
24 | allow_methods=["*"],
25 | allow_headers=["*"],
26 | )
27 |
28 | app.include_router(chat_router, prefix="/api/chat")
29 |
30 |
31 | if __name__ == "__main__":
32 | uvicorn.run(app="main:app", host="0.0.0.0", reload=True)
33 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This is a [LlamaIndex](https://www.llamaindex.ai/) project bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
2 |
3 | ## Getting Started
4 |
5 | First, startup the backend as described in the [backend README](./backend/README.md).
6 |
7 | Second, run the development server of the frontend as described in the [frontend README](./frontend/README.md).
8 |
9 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
10 |
11 | ## Learn More
12 |
13 | To learn more about LlamaIndex, take a look at the following resources:
14 |
15 | - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
16 | - [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features).
17 |
18 | You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
19 |
--------------------------------------------------------------------------------
/backend/app/engine/generate.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from dotenv import load_dotenv
4 |
5 | from app.engine.constants import DATA_DIR, STORAGE_DIR
6 | from app.engine.context import create_service_context
7 | from app.engine.loader import get_documents
8 |
9 | load_dotenv()
10 |
11 | from llama_index import (
12 | SimpleDirectoryReader,
13 | VectorStoreIndex,
14 | )
15 |
16 | logging.basicConfig(level=logging.INFO)
17 | logger = logging.getLogger()
18 |
19 |
20 | def generate_datasource(service_context):
21 | logger.info("Creating new index")
22 | # load the documents and create the index
23 | documents = get_documents()
24 | index = VectorStoreIndex.from_documents(documents, service_context=service_context)
25 | # store it for later
26 | index.storage_context.persist(STORAGE_DIR)
27 | logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
28 |
29 |
30 | if __name__ == "__main__":
31 | service_context = create_service_context()
32 | generate_datasource(service_context)
33 |
--------------------------------------------------------------------------------
/frontend/README.md:
--------------------------------------------------------------------------------
1 | This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Next.js](https://nextjs.org/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
2 |
3 | ## Getting Started
4 |
5 | First, install the dependencies:
6 |
7 | ```
8 | npm install
9 | ```
10 |
11 | Second, run the development server:
12 |
13 | ```
14 | npm run dev
15 | ```
16 |
17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
18 |
19 | You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
20 |
21 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
22 |
23 | ## Learn More
24 |
25 | To learn more about LlamaIndex, take a look at the following resources:
26 |
27 | - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
28 | - [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features).
29 |
30 | You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
31 |
--------------------------------------------------------------------------------
/frontend/app/components/ui/chat/chat-avatar.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import Image from "next/image";
4 | import { Message } from "./chat-messages";
5 |
6 | export default function ChatAvatar(message: Message) {
7 | if (message.role === "user") {
8 | return (
9 |
19 | );
20 | }
21 |
22 | return (
23 |
24 |
32 |
33 | );
34 | }
35 |
--------------------------------------------------------------------------------
/frontend/app/components/ui/chat/chat-messages.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useEffect, useRef } from "react";
4 | import ChatItem from "./chat-item";
5 |
6 | export interface Message {
7 | id: string;
8 | content: string;
9 | role: string;
10 | }
11 |
12 | export default function ChatMessages({
13 | messages,
14 | isLoading,
15 | reload,
16 | stop,
17 | }: {
18 | messages: Message[];
19 | isLoading?: boolean;
20 | stop?: () => void;
21 | reload?: () => void;
22 | }) {
23 | const scrollableChatContainerRef = useRef(null);
24 |
25 | const scrollToBottom = () => {
26 | if (scrollableChatContainerRef.current) {
27 | scrollableChatContainerRef.current.scrollTop =
28 | scrollableChatContainerRef.current.scrollHeight;
29 | }
30 | };
31 |
32 | useEffect(() => {
33 | scrollToBottom();
34 | }, [messages.length]);
35 |
36 | return (
37 |
38 |
42 | {messages.map((m: Message) => (
43 |
44 | ))}
45 |
46 |
47 | );
48 | }
49 |
--------------------------------------------------------------------------------
/frontend/app/components/chat-section.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useChat } from "ai/react";
4 | import { useMemo } from "react";
5 | import { insertDataIntoMessages } from "./transform";
6 | import { ChatInput, ChatMessages } from "./ui/chat";
7 |
8 | export default function ChatSection() {
9 | const {
10 | messages,
11 | input,
12 | isLoading,
13 | handleSubmit,
14 | handleInputChange,
15 | reload,
16 | stop,
17 | data,
18 | } = useChat({
19 | api: process.env.NEXT_PUBLIC_CHAT_API,
20 | headers: {
21 | "Content-Type": "application/json", // using JSON because of vercel/ai 2.2.26
22 | },
23 | });
24 |
25 | const transformedMessages = useMemo(() => {
26 | return insertDataIntoMessages(messages, data);
27 | }, [messages, data]);
28 |
29 | return (
30 |
31 |
37 |
44 |
45 | );
46 | }
47 |
--------------------------------------------------------------------------------
/frontend/app/components/header.tsx:
--------------------------------------------------------------------------------
1 | import Image from "next/image";
2 |
3 | export default function Header() {
4 | return (
5 |
6 |
7 | Get started by editing
8 | app/page.tsx
9 |
10 |
26 |
27 | );
28 | }
29 |
--------------------------------------------------------------------------------
/frontend/app/components/ui/chat/chat-input.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | export interface ChatInputProps {
4 | /** The current value of the input */
5 | input?: string;
6 | /** An input/textarea-ready onChange handler to control the value of the input */
7 | handleInputChange?: (
8 | e:
9 | | React.ChangeEvent
10 | | React.ChangeEvent,
11 | ) => void;
12 | /** Form submission handler to automatically reset input and append a user message */
13 | handleSubmit: (e: React.FormEvent) => void;
14 | isLoading: boolean;
15 | multiModal?: boolean;
16 | }
17 |
18 | export default function ChatInput(props: ChatInputProps) {
19 | return (
20 | <>
21 |
41 | >
42 | );
43 | }
44 |
--------------------------------------------------------------------------------
/backend/README.md:
--------------------------------------------------------------------------------
1 | This is a [LlamaIndex](https://www.llamaindex.ai/) project using [FastAPI](https://fastapi.tiangolo.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
2 |
3 | ## Getting Started
4 |
5 | First, setup the environment:
6 |
7 | ```
8 | poetry install
9 | poetry shell
10 | ```
11 |
12 | By default, we use the OpenAI LLM (though you can customize, see `app/context.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory.
13 |
14 | Example `.env` file:
15 |
16 | ```
17 | OPENAI_API_KEY=
18 | ```
19 |
20 | Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
21 |
22 | ```
23 | python app/engine/generate.py
24 | ```
25 |
26 | Third, run the development server:
27 |
28 | ```
29 | python main.py
30 | ```
31 |
32 | Then call the API endpoint `/api/chat` to see the result:
33 |
34 | ```
35 | curl --location 'localhost:8000/api/chat' \
36 | --header 'Content-Type: application/json' \
37 | --data '{ "messages": [{ "role": "user", "content": "Hello" }] }'
38 | ```
39 |
40 | You can start editing the API by modifying `app/api/routers/chat.py`. The endpoint auto-updates as you save the file.
41 |
42 | Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API.
43 |
44 | The API allows CORS for all origins to simplify development. You can change this behavior by setting the `ENVIRONMENT` environment variable to `prod`:
45 |
46 | ```
47 | ENVIRONMENT=prod uvicorn main:app
48 | ```
49 |
50 | ## Learn More
51 |
52 | To learn more about LlamaIndex, take a look at the following resources:
53 |
54 | - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
55 |
56 | You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
57 |
--------------------------------------------------------------------------------
/backend/app/engine/index.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from llama_index import (
4 | StorageContext,
5 | load_index_from_storage,
6 | VectorStoreIndex, SimpleDirectoryReader, ServiceContext,
7 | )
8 | from dotenv import load_dotenv
9 | from llama_index.llms import HuggingFaceLLM
10 | from pathlib import Path
11 | from llama_index import download_loader
12 | import logging
13 | from llama_index.vector_stores import AstraDBVectorStore
14 |
15 |
16 | import os
17 | from llama_index import (
18 | StorageContext,
19 | load_index_from_storage,
20 | VectorStoreIndex, SimpleDirectoryReader, ServiceContext,
21 | )
22 | from llama_index.llms import HuggingFaceLLM
23 | from llama_index import download_loader
24 | from llama_index import VectorStoreIndex, SimpleDirectoryReader
25 | from llama_index.tools import QueryEngineTool, ToolMetadata
26 | from llama_index.query_engine import SubQuestionQueryEngine
27 | from llama_index.callbacks import CallbackManager, LlamaDebugHandler
28 | from llama_index import ServiceContext
29 | from llama_parse import LlamaParse # pip install llama-parse
30 | from llama_index import SimpleDirectoryReader # pip install llama-index
31 | from app.engine.context import create_service_context
32 |
33 | service_context = create_service_context()
34 | parser = LlamaParse(
35 | api_key=os.getenv("LLAMA_API_KEY"), # can also be set in your env as LLAMA_CLOUD_API_KEY
36 | result_type="markdown" # "markdown" and "text" are available
37 | )
38 | file_extractor = {".pdf": parser}
39 | astra_db_store = AstraDBVectorStore(
40 | token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
41 | api_endpoint= os.getenv("ASTRA_DB_API_ENDPOINT"),
42 | collection_name="test",
43 | embedding_dimension=1536,
44 | )
45 | documents = SimpleDirectoryReader(f'/Users/jingli/ragathon0203/homeai/backend/data/1952-Camargo-Drive/',
46 | file_extractor=file_extractor).load_data()
47 | storage_context = StorageContext.from_defaults(vector_store=astra_db_store)
48 | index = VectorStoreIndex.from_documents(
49 | documents, storage_context=storage_context
50 | )
51 | def get_chat_engine():
52 | return index.as_chat_engine(similarity_top_k=5, chat_mode="condense_plus_context")
--------------------------------------------------------------------------------
/frontend/app/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | @layer base {
6 | :root {
7 | --background: 0 0% 100%;
8 | --foreground: 222.2 47.4% 11.2%;
9 |
10 | --muted: 210 40% 96.1%;
11 | --muted-foreground: 215.4 16.3% 46.9%;
12 |
13 | --popover: 0 0% 100%;
14 | --popover-foreground: 222.2 47.4% 11.2%;
15 |
16 | --border: 214.3 31.8% 91.4%;
17 | --input: 214.3 31.8% 91.4%;
18 |
19 | --card: 0 0% 100%;
20 | --card-foreground: 222.2 47.4% 11.2%;
21 |
22 | --primary: 222.2 47.4% 11.2%;
23 | --primary-foreground: 210 40% 98%;
24 |
25 | --secondary: 210 40% 96.1%;
26 | --secondary-foreground: 222.2 47.4% 11.2%;
27 |
28 | --accent: 210 40% 96.1%;
29 | --accent-foreground: 222.2 47.4% 11.2%;
30 |
31 | --destructive: 0 100% 50%;
32 | --destructive-foreground: 210 40% 98%;
33 |
34 | --ring: 215 20.2% 65.1%;
35 |
36 | --radius: 0.5rem;
37 | }
38 |
39 | .dark {
40 | --background: 224 71% 4%;
41 | --foreground: 213 31% 91%;
42 |
43 | --muted: 223 47% 11%;
44 | --muted-foreground: 215.4 16.3% 56.9%;
45 |
46 | --accent: 216 34% 17%;
47 | --accent-foreground: 210 40% 98%;
48 |
49 | --popover: 224 71% 4%;
50 | --popover-foreground: 215 20.2% 65.1%;
51 |
52 | --border: 216 34% 17%;
53 | --input: 216 34% 17%;
54 |
55 | --card: 224 71% 4%;
56 | --card-foreground: 213 31% 91%;
57 |
58 | --primary: 210 40% 98%;
59 | --primary-foreground: 222.2 47.4% 1.2%;
60 |
61 | --secondary: 222.2 47.4% 11.2%;
62 | --secondary-foreground: 210 40% 98%;
63 |
64 | --destructive: 0 63% 31%;
65 | --destructive-foreground: 210 40% 98%;
66 |
67 | --ring: 216 34% 17%;
68 |
69 | --radius: 0.5rem;
70 | }
71 | }
72 |
73 | @layer base {
74 | * {
75 | @apply border-border;
76 | }
77 | body {
78 | @apply bg-background text-foreground;
79 | font-feature-settings:
80 | "rlig" 1,
81 | "calt" 1;
82 | }
83 | .background-gradient {
84 | background-color: #fff;
85 | background-image: radial-gradient(
86 | at 21% 11%,
87 | rgba(186, 186, 233, 0.53) 0,
88 | transparent 50%
89 | ),
90 | radial-gradient(at 85% 0, hsla(46, 57%, 78%, 0.52) 0, transparent 50%),
91 | radial-gradient(at 91% 36%, rgba(194, 213, 255, 0.68) 0, transparent 50%),
92 | radial-gradient(at 8% 40%, rgba(251, 218, 239, 0.46) 0, transparent 50%);
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/frontend/tailwind.config.ts:
--------------------------------------------------------------------------------
1 | import type { Config } from "tailwindcss";
2 | import { fontFamily } from "tailwindcss/defaultTheme";
3 |
4 | const config: Config = {
5 | darkMode: ["class"],
6 | content: ["app/**/*.{ts,tsx}", "components/**/*.{ts,tsx}"],
7 | theme: {
8 | container: {
9 | center: true,
10 | padding: "2rem",
11 | screens: {
12 | "2xl": "1400px",
13 | },
14 | },
15 | extend: {
16 | colors: {
17 | border: "hsl(var(--border))",
18 | input: "hsl(var(--input))",
19 | ring: "hsl(var(--ring))",
20 | background: "hsl(var(--background))",
21 | foreground: "hsl(var(--foreground))",
22 | primary: {
23 | DEFAULT: "hsl(var(--primary))",
24 | foreground: "hsl(var(--primary-foreground))",
25 | },
26 | secondary: {
27 | DEFAULT: "hsl(var(--secondary))",
28 | foreground: "hsl(var(--secondary-foreground))",
29 | },
30 | destructive: {
31 | DEFAULT: "hsl(var(--destructive) / )",
32 | foreground: "hsl(var(--destructive-foreground) / )",
33 | },
34 | muted: {
35 | DEFAULT: "hsl(var(--muted))",
36 | foreground: "hsl(var(--muted-foreground))",
37 | },
38 | accent: {
39 | DEFAULT: "hsl(var(--accent))",
40 | foreground: "hsl(var(--accent-foreground))",
41 | },
42 | popover: {
43 | DEFAULT: "hsl(var(--popover))",
44 | foreground: "hsl(var(--popover-foreground))",
45 | },
46 | card: {
47 | DEFAULT: "hsl(var(--card))",
48 | foreground: "hsl(var(--card-foreground))",
49 | },
50 | },
51 | borderRadius: {
52 | xl: `calc(var(--radius) + 4px)`,
53 | lg: `var(--radius)`,
54 | md: `calc(var(--radius) - 2px)`,
55 | sm: "calc(var(--radius) - 4px)",
56 | },
57 | fontFamily: {
58 | sans: ["var(--font-sans)", ...fontFamily.sans],
59 | },
60 | keyframes: {
61 | "accordion-down": {
62 | from: { height: "0" },
63 | to: { height: "var(--radix-accordion-content-height)" },
64 | },
65 | "accordion-up": {
66 | from: { height: "var(--radix-accordion-content-height)" },
67 | to: { height: "0" },
68 | },
69 | },
70 | animation: {
71 | "accordion-down": "accordion-down 0.2s ease-out",
72 | "accordion-up": "accordion-up 0.2s ease-out",
73 | },
74 | },
75 | },
76 | plugins: [],
77 | };
78 | export default config;
79 |
--------------------------------------------------------------------------------
/backend/app/api/routers/chat.py:
--------------------------------------------------------------------------------
1 | # import nest_asyncio
2 | from typing import List
3 | import json
4 | from typing import Dict
5 | from builtins import Exception
6 | from builtins import ValueError
7 | import logging
8 |
9 | from fastapi import APIRouter, Depends, HTTPException, status
10 | from llama_index.chat_engine.types import BaseChatEngine
11 | from llama_index.llms.base import ChatMessage
12 | from llama_index.llms.types import MessageRole
13 | from pydantic import BaseModel
14 | from app.engine.index import get_chat_engine
15 |
16 | chat_router = r = APIRouter()
17 |
18 | class _Message(BaseModel):
19 | role: MessageRole
20 | content: str
21 |
22 | class _ChatData(BaseModel):
23 | messages: List[_Message]
24 |
25 | class _Upgrade(BaseModel):
26 | year_of_upgrade: int
27 | what_was_done: str
28 | does_it_have_permit: bool
29 |
30 | class _PropertyDetail(BaseModel):
31 | house_address: str
32 | property_tax: float
33 | house_size: str
34 | lot_size: str
35 | bedroom_numbers: int
36 | bathroom_numbers: int
37 | upgrades: List[_Upgrade]
38 |
39 |
40 | class _Result(BaseModel):
41 | property_detail: _PropertyDetail
42 | major_concerns: str
43 |
44 | @r.post("")
45 | async def chat(
46 | data: _ChatData,
47 | chat_engine: BaseChatEngine = Depends(get_chat_engine),
48 | ) -> _Result:
49 | # check preconditions and get last message
50 | if len(data.messages) == 0:
51 | raise HTTPException(
52 | status_code=status.HTTP_400_BAD_REQUEST,
53 | detail="No messages provided",
54 | )
55 | lastMessage = data.messages.pop()
56 | if lastMessage.role != MessageRole.USER:
57 | raise HTTPException(
58 | status_code=status.HTTP_400_BAD_REQUEST,
59 | detail="Last message must be from user",
60 | )
61 | query_str = lastMessage.content;
62 | # latestMessage.content = "what's the answer of 2+2"
63 | # convert messages coming from the request to type ChatMessage
64 | # messages = [
65 | # ChatMessage(
66 | # role=m.role,
67 | # content=m.content,
68 | # )
69 | # for m in data.messages
70 | # ]
71 | messages = [
72 | ChatMessage(
73 | role=m.role,
74 | content="",
75 | )
76 | for m in data.messages
77 | ]
78 | house_detail_query = "Extract the following information for property" + query_str + """
79 | What’s its address?
80 | What’s the property tax?
81 | What’s the house size?
82 | What’s the lot size?
83 | How many bedrooms?
84 | How many bathrooms?
85 | If there are updates, What are the updates have done? For each upgrade, what’s the year of upgrade? What was done? Does it have the permit?
86 |
87 | If you are unsure about the answer, you can skip it.
88 |
89 | Provide ONLY a code block using markdown of the JSON response and no additional comments , text, prose, descriptions, or acknowledgements. Schema:
90 |
91 | {
92 | "house_address": "string",
93 | "property_tax": float,
94 | "house_size": "string",
95 | "lot_size": "string",
96 | "bedroom_numbers": integer,
97 | "bathroom_numbers": integer,
98 | "upgrades": [
99 | {
100 | "year_of_upgrade": integer,
101 | "what_was_done": "string",
102 | "does_it_have_permit": boolean
103 | },
104 | {
105 | "year_of_upgrade": integer,
106 | "what_was_done": "string",
107 | "does_it_have_permit": boolean
108 | },
109 | {
110 | "year_of_upgrade": integer,
111 | "what_was_done": "string",
112 | "does_it_have_permit": boolean
113 | }
114 | ]
115 | }
116 | """
117 | # query chat engine
118 | # response = await chat_engine.achat(house_detail_query, messages)
119 | # response = await chat_engine.achat(lastMessage.content, messages)
120 | # response = await chat_engine.aquery(lastMessage.content)
121 | # nest_asyncio.apply()
122 | # response = chat_engine.query(lastMessage.content)
123 |
124 | response = await chat_engine.achat(house_detail_query, messages)
125 | # response = await chat_engine.achat(lastMessage.content, messages)
126 |
127 | logger = logging.getLogger("uvicorn")
128 |
129 | def strToDetailedInfo(input_str: str) -> _PropertyDetail:
130 | try:
131 | logger.info(f"$$$ input string: {input_str}")
132 | # Parse the JSON string into a Python dictionary
133 | data_dict: Dict = json.loads(input_str)
134 | logger.info(f"$$$ data_dict: {data_dict}")
135 |
136 | # Convert the dictionary into a _DetailedInfo instance using Pydantic
137 | property_detail = _PropertyDetail(**data_dict)
138 |
139 | return property_detail
140 | except Exception as e:
141 | # Handle any parsing or validation errors
142 | raise ValueError(f"Failed to convert string to DetailedInfo: {e}")
143 |
144 | # Convert the response to a DetailedInfo instance
145 | property_detail = strToDetailedInfo(response.response[7:-4])
146 |
147 | return _Result(
148 | property_detail = property_detail,
149 | major_concerns = "This is major concern"
150 | )
--------------------------------------------------------------------------------