├── README.md ├── backend ├── .env.example ├── .gitignore ├── README.md ├── app │ ├── __init__.py │ ├── api │ │ ├── __init__.py │ │ └── routers │ │ │ ├── __init__.py │ │ │ └── chat.py │ ├── scheme │ │ └── diagram.py │ └── settings.py ├── main.py ├── poetry.lock ├── pyproject.toml └── tests │ └── __init__.py └── frontend ├── .env.example ├── .gitignore ├── README.md ├── app ├── components │ ├── diagram-section.tsx │ ├── diagram-vis.tsx │ ├── topic-input.tsx │ └── ui │ │ └── lib │ │ └── utils.ts ├── favicon.ico ├── globals.css ├── layout.tsx ├── observability │ └── index.ts └── page.tsx ├── bun.lockb ├── next.config.js ├── next.config.json ├── next.config.mjs ├── package.json ├── postcss.config.js ├── public └── llama.png ├── tailwind.config.ts ├── tsconfig.json └── webpack.config.mjs /README.md: -------------------------------------------------------------------------------- 1 | In this tutorial, we'll see how to use LlamaIndex Pydantic Program mode and partial object parsing to send intermediate pydantic objects from incomplete JSONs to the frontend for an intuitive user experience. 2 | 3 | Full video tutorial under 2.5 minutes below 👇 🔥🔥 4 | 5 | [![AI Diagram Generator](https://img.youtube.com/vi/Z0l2WaFYQ88/maxresdefault.jpg)](https://www.youtube.com/watch?v=Z0l2WaFYQ88) 6 | 7 | We use LlamaIndex Pydantic Program mode for structured output generation and we use partial object parsing to send intermediate objects that are validated using Pydantic models to the frontend. 8 | 9 | Click the above thumbnail for the detailed video tutorial. 10 | 11 | ## Getting Started 12 | 13 | First clone the repo: 14 | 15 | ```bash 16 | git clone https://github.com/rsrohan99/ai-diagram-generator.git 17 | cd ai-diagram-generator 18 | ``` 19 | 20 | ## Start the Backend 21 | 22 | `cd` into the `backend` directory 23 | 24 | ```bash 25 | cd backend 26 | ``` 27 | 28 | ### First create `.env` from `.env.example` 29 | 30 | ```bash 31 | cp .env.example .env 32 | ``` 33 | 34 | ### Set the OpenAI key in .env 35 | 36 | ```bash 37 | OPENAI_API_KEY=**** 38 | ``` 39 | 40 | ### Install the dependencies 41 | 42 | ```bash 43 | poetry install 44 | ``` 45 | 46 | ### Start the backend server 47 | 48 | ```bash 49 | poetry run python main.py 50 | ``` 51 | 52 | ## Start the Frontend 53 | 54 | `cd` into the `frontend` directory 55 | 56 | ```bash 57 | cd frontend 58 | ``` 59 | 60 | ### First create `.env` from `.env.example` 61 | 62 | ```bash 63 | cp .env.example .env 64 | ``` 65 | 66 | ### Install the dependencies 67 | 68 | ```bash 69 | npm i 70 | ``` 71 | 72 | ### Start the frontend server 73 | 74 | ```bash 75 | npm run dev 76 | ``` 77 | -------------------------------------------------------------------------------- /backend/.env.example: -------------------------------------------------------------------------------- 1 | # The name of LLM model to use. 2 | MODEL=gpt-3.5-turbo-0125 3 | 4 | # The OpenAI API key to use. 5 | OPENAI_API_KEY="" 6 | 7 | # The address to start the backend app. 8 | APP_HOST=0.0.0.0 9 | 10 | # The port to start the backend app. 11 | APP_PORT=8000 12 | 13 | # Name of the embedding model to use. 14 | EMBEDDING_MODEL=text-embedding-3-small 15 | 16 | # Dimension of the embedding model to use. 17 | # EMBEDDING_DIM= 18 | 19 | # Temperature for sampling from the model. 20 | # LLM_TEMPERATURE= 21 | 22 | # Maximum number of tokens to generate. 23 | # LLM_MAX_TOKENS= 24 | 25 | # The number of similar embeddings to return when retrieving documents. 26 | TOP_K=3 27 | 28 | # Custom system prompt. 29 | # Example: 30 | # SYSTEM_PROMPT=" 31 | # We have provided context information below. 32 | # --------------------- 33 | # {context_str} 34 | # --------------------- 35 | # Given this information, please answer the question: {query_str} 36 | # " 37 | # SYSTEM_PROMPT= 38 | -------------------------------------------------------------------------------- /backend/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | storage 3 | .env 4 | -------------------------------------------------------------------------------- /backend/README.md: -------------------------------------------------------------------------------- 1 | This is a [LlamaIndex](https://www.llamaindex.ai/) project using [FastAPI](https://fastapi.tiangolo.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama). 2 | 3 | ## Getting Started 4 | 5 | First, setup the environment with poetry: 6 | 7 | > **_Note:_** This step is not needed if you are using the dev-container. 8 | 9 | ``` 10 | poetry install 11 | poetry shell 12 | ``` 13 | 14 | By default, we use the OpenAI LLM (though you can customize, see `app/settings.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory. 15 | 16 | Example `.env` file: 17 | 18 | ``` 19 | OPENAI_API_KEY= 20 | ``` 21 | 22 | Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step): 23 | 24 | ``` 25 | python app/engine/generate.py 26 | ``` 27 | 28 | Third, run the development server: 29 | 30 | ``` 31 | python main.py 32 | ``` 33 | 34 | Then call the API endpoint `/api/chat` to see the result: 35 | 36 | ``` 37 | curl --location 'localhost:8000/api/chat' \ 38 | --header 'Content-Type: application/json' \ 39 | --data '{ "messages": [{ "role": "user", "content": "Hello" }] }' 40 | ``` 41 | 42 | You can start editing the API by modifying `app/api/routers/chat.py`. The endpoint auto-updates as you save the file. 43 | 44 | Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API. 45 | 46 | The API allows CORS for all origins to simplify development. You can change this behavior by setting the `ENVIRONMENT` environment variable to `prod`: 47 | 48 | ``` 49 | ENVIRONMENT=prod python main.py 50 | ``` 51 | 52 | ## Learn More 53 | 54 | To learn more about LlamaIndex, take a look at the following resources: 55 | 56 | - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex. 57 | 58 | You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome! 59 | -------------------------------------------------------------------------------- /backend/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/backend/app/__init__.py -------------------------------------------------------------------------------- /backend/app/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/backend/app/api/__init__.py -------------------------------------------------------------------------------- /backend/app/api/routers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/backend/app/api/routers/__init__.py -------------------------------------------------------------------------------- /backend/app/api/routers/chat.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pydantic import BaseModel 3 | from fastapi.responses import StreamingResponse 4 | from fastapi import APIRouter, Request 5 | from llama_index.program.openai import OpenAIPydanticProgram 6 | 7 | from app.scheme.diagram import Diagram 8 | 9 | chat_router = r = APIRouter() 10 | 11 | class RequestData(BaseModel): 12 | prompt: str 13 | 14 | prompt_template_str = """ 15 | Explain the given topic using a very detailed and comprehensive tree-like diagram. You must use loads of nodes spanning multiple levels to properly explain the topic in details. Make the tree very deep in hierarchy with as much details as possible. Use proper labels for the nodes and edges. Use the most appropriate shape for the nodes to make the diagram as visually appealing as possible. 16 | topic: {topic} 17 | """ 18 | 19 | @r.post("") 20 | async def chat( 21 | request: Request, 22 | data: RequestData 23 | ): 24 | topic = data.prompt 25 | diagram_program = OpenAIPydanticProgram.from_defaults( 26 | output_cls=Diagram, 27 | prompt_template_str=prompt_template_str, 28 | verbose=True 29 | ) 30 | 31 | # stream response 32 | async def event_generator(): 33 | for partial_diagram in diagram_program.stream_partial_objects( topic=topic): 34 | if await request.is_disconnected(): 35 | break 36 | yield f"2:{json.dumps([partial_diagram.model_dump(by_alias=True)])}\n" 37 | 38 | return StreamingResponse( 39 | event_generator(), 40 | media_type="text/event-stream", 41 | headers={ 42 | "X-Experimental-Stream-Data": "true" 43 | } 44 | ) 45 | -------------------------------------------------------------------------------- /backend/app/scheme/diagram.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | from uuid import uuid4 3 | from pydantic import BaseModel, Field 4 | 5 | class Node(BaseModel): 6 | """A Node of the Diagram""" 7 | id: int 8 | label: str 9 | # color: str = Field("blue", description="colour name for the edge e.g. red, blue etc.") 10 | shape: Literal['dot','ellipse','box','hexagon','diamond'] = Field( 11 | "dot", 12 | description="Best shape based on the node type" 13 | ) 14 | 15 | 16 | class Edge(BaseModel): 17 | """An Edge of the Diagram""" 18 | id: str = Field(default_factory=lambda: str(uuid4())) 19 | source: int = Field( 20 | ..., 21 | description="Int id of the source node", 22 | serialization_alias="from" 23 | ) 24 | target: int = Field( 25 | ..., 26 | description="Int id of the target node", 27 | serialization_alias="to" 28 | ) 29 | label: str 30 | 31 | 32 | class Diagram(BaseModel): 33 | """A very comprehensive and detailed diagram to fully understand the given topic""" 34 | 35 | nodes: list[Node] = Field(..., default_factory=list) 36 | edges: list[Edge] = Field(..., default_factory=list) -------------------------------------------------------------------------------- /backend/app/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict 3 | from llama_index.core.settings import Settings 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.embeddings.openai import OpenAIEmbedding 6 | 7 | 8 | def llm_config_from_env() -> Dict: 9 | from llama_index.core.constants import DEFAULT_TEMPERATURE 10 | 11 | model = os.getenv("MODEL") 12 | temperature = os.getenv("LLM_TEMPERATURE", DEFAULT_TEMPERATURE) 13 | max_tokens = os.getenv("LLM_MAX_TOKENS") 14 | 15 | config = { 16 | "model": model, 17 | "temperature": float(temperature), 18 | "max_tokens": int(max_tokens) if max_tokens is not None else None, 19 | } 20 | return config 21 | 22 | 23 | def embedding_config_from_env() -> Dict: 24 | model = os.getenv("EMBEDDING_MODEL") 25 | dimension = os.getenv("EMBEDDING_DIM") 26 | 27 | config = { 28 | "model": model, 29 | "dimension": int(dimension) if dimension is not None else None, 30 | } 31 | return config 32 | 33 | 34 | def init_settings(): 35 | llm_configs = llm_config_from_env() 36 | embedding_configs = embedding_config_from_env() 37 | 38 | Settings.llm = OpenAI(**llm_configs) 39 | Settings.embed_model = OpenAIEmbedding(**embedding_configs) 40 | Settings.chunk_size = int(os.getenv("CHUNK_SIZE", "1024")) 41 | Settings.chunk_overlap = int(os.getenv("CHUNK_OVERLAP", "20")) 42 | -------------------------------------------------------------------------------- /backend/main.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | 3 | load_dotenv() 4 | 5 | import logging 6 | import os 7 | import uvicorn 8 | from fastapi import FastAPI 9 | from fastapi.middleware.cors import CORSMiddleware 10 | from app.api.routers.chat import chat_router 11 | from app.settings import init_settings 12 | 13 | 14 | app = FastAPI() 15 | 16 | init_settings() 17 | 18 | environment = os.getenv("ENVIRONMENT", "dev") # Default to 'development' if not set 19 | 20 | 21 | if environment == "dev": 22 | logger = logging.getLogger("uvicorn") 23 | logger.warning("Running in development mode - allowing CORS for all origins") 24 | app.add_middleware( 25 | CORSMiddleware, 26 | allow_origins=["*"], 27 | allow_credentials=True, 28 | allow_methods=["*"], 29 | allow_headers=["*"], 30 | expose_headers=[ "X-Experimental-Stream-Data"], 31 | ) 32 | 33 | app.include_router(chat_router, prefix="/api/chat") 34 | 35 | 36 | if __name__ == "__main__": 37 | app_host = os.getenv("APP_HOST", "0.0.0.0") 38 | app_port = int(os.getenv("APP_PORT", "8000")) 39 | reload = True if environment == "dev" else False 40 | 41 | uvicorn.run(app="main:app", host=app_host, port=app_port, reload=reload) 42 | -------------------------------------------------------------------------------- /backend/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool] 2 | [tool.poetry] 3 | name = "app" 4 | version = "0.1.0" 5 | description = "" 6 | authors = [ "Marcus Schiesser " ] 7 | readme = "README.md" 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.11,<3.12" 11 | fastapi = "^0.109.1" 12 | python-dotenv = "^1.0.0" 13 | llama-index = "^0.10.15" 14 | llama-index-core = "^0.10.15" 15 | llama-index-program-openai = "^0.1.6" 16 | 17 | [tool.poetry.dependencies.uvicorn] 18 | extras = [ "standard" ] 19 | version = "^0.23.2" 20 | 21 | [tool.poetry.dependencies.docx2txt] 22 | version = "^0.8" 23 | 24 | [build-system] 25 | requires = [ "poetry-core" ] 26 | build-backend = "poetry.core.masonry.api" -------------------------------------------------------------------------------- /backend/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/backend/tests/__init__.py -------------------------------------------------------------------------------- /frontend/.env.example: -------------------------------------------------------------------------------- 1 | NEXT_PUBLIC_CHAT_API=http://localhost:8000/api/chat -------------------------------------------------------------------------------- /frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | 27 | # local env files 28 | .env*.local 29 | 30 | # vercel 31 | .vercel 32 | 33 | # typescript 34 | *.tsbuildinfo 35 | next-env.d.ts 36 | -------------------------------------------------------------------------------- /frontend/README.md: -------------------------------------------------------------------------------- 1 | This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Next.js](https://nextjs.org/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama). 2 | 3 | ## Getting Started 4 | 5 | First, install the dependencies: 6 | 7 | ``` 8 | npm install 9 | ``` 10 | 11 | Third, run the development server: 12 | 13 | ``` 14 | npm run dev 15 | ``` 16 | 17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 18 | 19 | You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. 20 | 21 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. 22 | 23 | ## Learn More 24 | 25 | To learn more about LlamaIndex, take a look at the following resources: 26 | 27 | - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features). 28 | - [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features). 29 | 30 | You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome! 31 | -------------------------------------------------------------------------------- /frontend/app/components/diagram-section.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useCompletion } from "ai/react"; 4 | 5 | import TopicInput from "./topic-input"; 6 | import DiagramVis, { Diagram } from "./diagram-vis"; 7 | import { useEffect, useState } from "react"; 8 | 9 | export default function DiagramSection() { 10 | const { input, isLoading, handleInputChange, handleSubmit, data } = 11 | useCompletion({ 12 | api: process.env.NEXT_PUBLIC_CHAT_API, 13 | }); 14 | 15 | const [diagram, setDiagram] = useState(null); 16 | 17 | useEffect(() => { 18 | // console.log("data"); 19 | // console.log(data); 20 | if (data && data.length > 0) { 21 | setDiagram(data[data.length - 1] as unknown as Diagram); 22 | } 23 | }, [data]); 24 | 25 | return ( 26 |
27 | 33 | {diagram && } 34 |
35 | ); 36 | } 37 | -------------------------------------------------------------------------------- /frontend/app/components/diagram-vis.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import Graph from "react-graph-vis"; 4 | 5 | export interface Diagram { 6 | nodes: { 7 | id: number; 8 | label: string; 9 | shape: string; 10 | }[]; 11 | edges: { 12 | from: number; 13 | to: number; 14 | label: string; 15 | id: string; 16 | }[]; 17 | } 18 | 19 | interface GraphProps { 20 | diagram: Diagram; 21 | } 22 | 23 | const DiagramVis: React.FC = ({ diagram: graph }) => { 24 | const options = { 25 | nodes: { 26 | font: { 27 | size: 15, 28 | }, 29 | }, 30 | edges: { 31 | font: { 32 | size: 12, 33 | }, 34 | arrows: { 35 | to: { 36 | enabled: true, 37 | }, 38 | }, 39 | }, 40 | physics: { 41 | repulsion: { 42 | nodeDistance: 90, 43 | }, 44 | minVelocity: 0.75, 45 | solver: "repulsion", 46 | }, 47 | }; 48 | return ( 49 |
50 | ; 51 |
52 | ); 53 | }; 54 | 55 | export default DiagramVis; 56 | -------------------------------------------------------------------------------- /frontend/app/components/topic-input.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { FormEvent, ChangeEvent } from "react"; 4 | import { Loader2 } from "lucide-react"; 5 | 6 | interface TopicInputProps { 7 | handleSubmit: (e: FormEvent) => void; 8 | handleInputChange: ( 9 | e: ChangeEvent | ChangeEvent 10 | ) => void; 11 | isLoading: boolean; 12 | input: string; 13 | } 14 | 15 | const TopicInput: React.FC = ({ 16 | handleSubmit, 17 | isLoading, 18 | handleInputChange, 19 | input, 20 | }) => { 21 | return ( 22 |
26 | 34 | 43 | {isLoading && } 44 | 45 | ); 46 | }; 47 | 48 | export default TopicInput; 49 | -------------------------------------------------------------------------------- /frontend/app/components/ui/lib/utils.ts: -------------------------------------------------------------------------------- 1 | import { clsx, type ClassValue } from "clsx"; 2 | import { twMerge } from "tailwind-merge"; 3 | 4 | export function cn(...inputs: ClassValue[]) { 5 | return twMerge(clsx(inputs)); 6 | } 7 | -------------------------------------------------------------------------------- /frontend/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/frontend/app/favicon.ico -------------------------------------------------------------------------------- /frontend/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | @layer base { 6 | :root { 7 | --background: 0 0% 100%; 8 | --foreground: 222.2 47.4% 11.2%; 9 | 10 | --muted: 210 40% 96.1%; 11 | --muted-foreground: 215.4 16.3% 46.9%; 12 | 13 | --popover: 0 0% 100%; 14 | --popover-foreground: 222.2 47.4% 11.2%; 15 | 16 | --border: 214.3 31.8% 91.4%; 17 | --input: 214.3 31.8% 91.4%; 18 | 19 | --card: 0 0% 100%; 20 | --card-foreground: 222.2 47.4% 11.2%; 21 | 22 | --primary: 222.2 47.4% 11.2%; 23 | --primary-foreground: 210 40% 98%; 24 | 25 | --secondary: 210 40% 96.1%; 26 | --secondary-foreground: 222.2 47.4% 11.2%; 27 | 28 | --accent: 210 40% 96.1%; 29 | --accent-foreground: 222.2 47.4% 11.2%; 30 | 31 | --destructive: 0 100% 50%; 32 | --destructive-foreground: 210 40% 98%; 33 | 34 | --ring: 215 20.2% 65.1%; 35 | 36 | --radius: 0.5rem; 37 | } 38 | 39 | .dark { 40 | --background: 224 71% 4%; 41 | --foreground: 213 31% 91%; 42 | 43 | --muted: 223 47% 11%; 44 | --muted-foreground: 215.4 16.3% 56.9%; 45 | 46 | --accent: 216 34% 17%; 47 | --accent-foreground: 210 40% 98%; 48 | 49 | --popover: 224 71% 4%; 50 | --popover-foreground: 215 20.2% 65.1%; 51 | 52 | --border: 216 34% 17%; 53 | --input: 216 34% 17%; 54 | 55 | --card: 224 71% 4%; 56 | --card-foreground: 213 31% 91%; 57 | 58 | --primary: 210 40% 98%; 59 | --primary-foreground: 222.2 47.4% 1.2%; 60 | 61 | --secondary: 222.2 47.4% 11.2%; 62 | --secondary-foreground: 210 40% 98%; 63 | 64 | --destructive: 0 63% 31%; 65 | --destructive-foreground: 210 40% 98%; 66 | 67 | --ring: 216 34% 17%; 68 | 69 | --radius: 0.5rem; 70 | } 71 | } 72 | 73 | @layer base { 74 | * { 75 | @apply border-border; 76 | } 77 | body { 78 | @apply bg-background text-foreground; 79 | font-feature-settings: 80 | "rlig" 1, 81 | "calt" 1; 82 | } 83 | .background-gradient { 84 | background-color: #fff; 85 | background-image: radial-gradient( 86 | at 21% 11%, 87 | rgba(186, 186, 233, 0.53) 0, 88 | transparent 50% 89 | ), 90 | radial-gradient(at 85% 0, hsla(46, 57%, 78%, 0.52) 0, transparent 50%), 91 | radial-gradient(at 91% 36%, rgba(194, 213, 255, 0.68) 0, transparent 50%), 92 | radial-gradient(at 8% 40%, rgba(251, 218, 239, 0.46) 0, transparent 50%); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /frontend/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata } from "next"; 2 | import { Inter } from "next/font/google"; 3 | import "./globals.css"; 4 | 5 | const inter = Inter({ subsets: ["latin"] }); 6 | 7 | export const metadata: Metadata = { 8 | title: "Diagram Generator", 9 | description: 10 | "Diagram generator with partial diagram streaming, created by Rohan(@clusteredbytes) using LlamaIndex, vercel-ai-sdk.", 11 | }; 12 | 13 | export default function RootLayout({ 14 | children, 15 | }: { 16 | children: React.ReactNode; 17 | }) { 18 | return ( 19 | 20 | {children} 21 | 22 | ); 23 | } 24 | -------------------------------------------------------------------------------- /frontend/app/observability/index.ts: -------------------------------------------------------------------------------- 1 | export const initObservability = () => {}; 2 | -------------------------------------------------------------------------------- /frontend/app/page.tsx: -------------------------------------------------------------------------------- 1 | import DiagramSection from "./components/diagram-section"; 2 | 3 | export default function Home() { 4 | return ( 5 |
6 | 7 |
8 | ); 9 | } 10 | -------------------------------------------------------------------------------- /frontend/bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/frontend/bun.lockb -------------------------------------------------------------------------------- /frontend/next.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | reactStrictMode: false, 3 | }; 4 | -------------------------------------------------------------------------------- /frontend/next.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "experimental": { 3 | "outputFileTracingIncludes": { 4 | "/*": [ 5 | "./cache/**/*" 6 | ] 7 | } 8 | }, 9 | "output": "export", 10 | "images": { 11 | "unoptimized": true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /frontend/next.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | import fs from "fs"; 3 | import webpack from "./webpack.config.mjs"; 4 | 5 | const nextConfig = JSON.parse(fs.readFileSync("./next.config.json", "utf-8")); 6 | nextConfig.webpack = webpack; 7 | 8 | export default nextConfig; 9 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "event-handler-test-2", 3 | "version": "0.1.0", 4 | "scripts": { 5 | "dev": "next dev", 6 | "build": "next build", 7 | "start": "next start", 8 | "lint": "next lint", 9 | "generate": "node app/api/chat/engine/generate.mjs" 10 | }, 11 | "dependencies": { 12 | "@radix-ui/react-slot": "^1.0.2", 13 | "ai": "^2.2.27", 14 | "class-variance-authority": "^0.7.0", 15 | "clsx": "^1.2.1", 16 | "dotenv": "^16.3.1", 17 | "llamaindex": "latest", 18 | "lucide-react": "^0.294.0", 19 | "next": "^14.0.3", 20 | "react": "^18.2.0", 21 | "react-dom": "^18.2.0", 22 | "react-graph-vis": "^1.0.7", 23 | "react-markdown": "^8.0.7", 24 | "react-syntax-highlighter": "^15.5.0", 25 | "remark": "^14.0.3", 26 | "remark-code-import": "^1.2.0", 27 | "remark-gfm": "^3.0.1", 28 | "remark-math": "^5.1.1", 29 | "supports-color": "^8.1.1", 30 | "tailwind-merge": "^2.1.0" 31 | }, 32 | "devDependencies": { 33 | "@types/node": "^20.10.3", 34 | "@types/react": "^18.2.42", 35 | "@types/react-dom": "^18.2.17", 36 | "autoprefixer": "^10.4.16", 37 | "postcss": "^8.4.32", 38 | "tailwindcss": "^3.3.6", 39 | "typescript": "^5.3.2", 40 | "@types/react-syntax-highlighter": "^15.5.11", 41 | "cross-env": "^7.0.3" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /frontend/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | -------------------------------------------------------------------------------- /frontend/public/llama.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsrohan99/ai-diagram-generator/cf711e3baa39e94db4b7ce1d14a9179e395ac089/frontend/public/llama.png -------------------------------------------------------------------------------- /frontend/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | import { fontFamily } from "tailwindcss/defaultTheme"; 3 | 4 | const config: Config = { 5 | darkMode: ["class"], 6 | content: ["app/**/*.{ts,tsx}", "components/**/*.{ts,tsx}"], 7 | theme: { 8 | container: { 9 | center: true, 10 | padding: "2rem", 11 | screens: { 12 | "2xl": "1400px", 13 | }, 14 | }, 15 | extend: { 16 | colors: { 17 | border: "hsl(var(--border))", 18 | input: "hsl(var(--input))", 19 | ring: "hsl(var(--ring))", 20 | background: "hsl(var(--background))", 21 | foreground: "hsl(var(--foreground))", 22 | primary: { 23 | DEFAULT: "hsl(var(--primary))", 24 | foreground: "hsl(var(--primary-foreground))", 25 | }, 26 | secondary: { 27 | DEFAULT: "hsl(var(--secondary))", 28 | foreground: "hsl(var(--secondary-foreground))", 29 | }, 30 | destructive: { 31 | DEFAULT: "hsl(var(--destructive) / )", 32 | foreground: "hsl(var(--destructive-foreground) / )", 33 | }, 34 | muted: { 35 | DEFAULT: "hsl(var(--muted))", 36 | foreground: "hsl(var(--muted-foreground))", 37 | }, 38 | accent: { 39 | DEFAULT: "hsl(var(--accent))", 40 | foreground: "hsl(var(--accent-foreground))", 41 | }, 42 | popover: { 43 | DEFAULT: "hsl(var(--popover))", 44 | foreground: "hsl(var(--popover-foreground))", 45 | }, 46 | card: { 47 | DEFAULT: "hsl(var(--card))", 48 | foreground: "hsl(var(--card-foreground))", 49 | }, 50 | }, 51 | borderRadius: { 52 | xl: `calc(var(--radius) + 4px)`, 53 | lg: `var(--radius)`, 54 | md: `calc(var(--radius) - 2px)`, 55 | sm: "calc(var(--radius) - 4px)", 56 | }, 57 | fontFamily: { 58 | sans: ["var(--font-sans)", ...fontFamily.sans], 59 | }, 60 | keyframes: { 61 | "accordion-down": { 62 | from: { height: "0" }, 63 | to: { height: "var(--radix-accordion-content-height)" }, 64 | }, 65 | "accordion-up": { 66 | from: { height: "var(--radix-accordion-content-height)" }, 67 | to: { height: "0" }, 68 | }, 69 | }, 70 | animation: { 71 | "accordion-down": "accordion-down 0.2s ease-out", 72 | "accordion-up": "accordion-up 0.2s ease-out", 73 | }, 74 | }, 75 | }, 76 | plugins: [], 77 | }; 78 | export default config; 79 | -------------------------------------------------------------------------------- /frontend/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "noEmit": true, 9 | "esModuleInterop": true, 10 | "module": "esnext", 11 | "moduleResolution": "bundler", 12 | "resolveJsonModule": true, 13 | "isolatedModules": true, 14 | "jsx": "preserve", 15 | "incremental": true, 16 | "plugins": [ 17 | { 18 | "name": "next" 19 | } 20 | ], 21 | "paths": { 22 | "@/*": ["./*"] 23 | }, 24 | "forceConsistentCasingInFileNames": true 25 | }, 26 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], 27 | "exclude": ["node_modules"] 28 | } 29 | -------------------------------------------------------------------------------- /frontend/webpack.config.mjs: -------------------------------------------------------------------------------- 1 | // webpack config must be a function in NextJS that is used to patch the default webpack config provided by NextJS, see https://nextjs.org/docs/pages/api-reference/next-config-js/webpack 2 | export default function webpack(config) { 3 | // See https://webpack.js.org/configuration/resolve/#resolvealias 4 | config.resolve.alias = { 5 | ...config.resolve.alias, 6 | sharp$: false, 7 | "onnxruntime-node$": false, 8 | }; 9 | return config; 10 | } 11 | --------------------------------------------------------------------------------