├── .env.example ├── .gitattributes ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── app ├── api │ ├── generate-code │ │ └── route.ts │ ├── get-default-provider │ │ └── route.ts │ └── get-models │ │ └── route.ts ├── globals.css ├── layout.tsx └── page.tsx ├── components.json ├── components ├── code-editor.tsx ├── generation-view.tsx ├── loading-screen.tsx ├── provider-selector.tsx ├── theme-provider.tsx ├── thinking-indicator.tsx ├── ui │ ├── accordion.tsx │ ├── alert-dialog.tsx │ ├── alert.tsx │ ├── aspect-ratio.tsx │ ├── avatar.tsx │ ├── badge.tsx │ ├── breadcrumb.tsx │ ├── button.tsx │ ├── calendar.tsx │ ├── card.tsx │ ├── carousel.tsx │ ├── chart.tsx │ ├── checkbox.tsx │ ├── collapsible.tsx │ ├── command.tsx │ ├── context-menu.tsx │ ├── dialog.tsx │ ├── drawer.tsx │ ├── dropdown-menu.tsx │ ├── form.tsx │ ├── hover-card.tsx │ ├── input-otp.tsx │ ├── input.tsx │ ├── label.tsx │ ├── menubar.tsx │ ├── navigation-menu.tsx │ ├── origin-button.tsx │ ├── pagination.tsx │ ├── popover.tsx │ ├── progress.tsx │ ├── radio-group.tsx │ ├── resizable.tsx │ ├── scroll-area.tsx │ ├── select.tsx │ ├── separator.tsx │ ├── sheet.tsx │ ├── sidebar.tsx │ ├── skeleton.tsx │ ├── slider.tsx │ ├── sonner.tsx │ ├── switch.tsx │ ├── table.tsx │ ├── tabs.tsx │ ├── textarea.tsx │ ├── toast.tsx │ ├── toaster.tsx │ ├── toggle-group.tsx │ ├── toggle.tsx │ ├── tooltip.tsx │ ├── use-mobile.tsx │ └── use-toast.ts ├── welcome-view.tsx └── work-steps.tsx ├── docker-compose.yml ├── hooks ├── use-mobile.tsx └── use-toast.ts ├── lib ├── providers │ ├── config.ts │ └── provider.ts └── utils.ts ├── next.config.mjs ├── package.json ├── postcss.config.mjs ├── public ├── placeholder-logo.png ├── placeholder-logo.svg ├── placeholder-user.jpg ├── placeholder.jpg └── placeholder.svg ├── tailwind.config.ts ├── tsconfig.json └── vercel.json /.env.example: -------------------------------------------------------------------------------- 1 | # DeepSeek API 2 | DEEPSEEK_API_KEY=your_deepseek_api_key_here 3 | DEEPSEEK_API_BASE=https://api.deepseek.com/v1 4 | 5 | # Custom OpenAI-compatible API 6 | OPENAI_COMPATIBLE_API_KEY=your_api_key_here 7 | OPENAI_COMPATIBLE_API_BASE=https://api.openai.com/v1 8 | # Other possible services for the custom API: 9 | # OPENAI_COMPATIBLE_API_BASE=https://api.together.xyz/v1 # Together AI 10 | # OPENAI_COMPATIBLE_API_BASE=https://api.groq.com/openai/v1 # Groq 11 | # OPENAI_COMPATIBLE_API_BASE=https://api.anthropic.com/v1 # Anthropic 12 | 13 | # Ollama Configuration (Local AI models) 14 | OLLAMA_API_BASE=http://localhost:11434 15 | 16 | # LM Studio Configuration (Local AI models) 17 | LM_STUDIO_API_BASE=http://localhost:1234/v1 18 | 19 | # Default Provider (deepseek, openai_compatible, ollama, lm_studio) 20 | DEFAULT_PROVIDER=lm_studio 21 | 22 | # More providers will be added soon -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | 6 | # next.js 7 | /.next/ 8 | /out/ 9 | 10 | # production 11 | /build 12 | 13 | # debug 14 | npm-debug.log* 15 | yarn-debug.log* 16 | yarn-error.log* 17 | .pnpm-debug.log* 18 | 19 | # env files 20 | .env 21 | .env.local 22 | .env.development.local 23 | .env.test.local 24 | .env.production.local 25 | # but keep example file 26 | !.env.example 27 | 28 | # vercel 29 | .vercel 30 | 31 | # typescript 32 | *.tsbuildinfo 33 | next-env.d.ts 34 | 35 | package-lock.json 36 | unused-components.md 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20-alpine 2 | 3 | # Install required dependencies 4 | RUN apk add --no-cache git 5 | 6 | # Clone repository 7 | RUN git clone https://github.com/weise25/LocalSite-ai.git /app 8 | 9 | # Set working directory 10 | WORKDIR /app 11 | 12 | # Install dependencies 13 | RUN npm install 14 | 15 | # Create script that generates the .env.local file at startup 16 | RUN echo '#!/bin/sh' > /app/entrypoint.sh 17 | RUN echo 'echo "# Configuration generated at startup" > .env.local' >> /app/entrypoint.sh 18 | RUN echo 'echo "DEFAULT_PROVIDER=${DEFAULT_PROVIDER:-lm_studio}" >> .env.local' >> /app/entrypoint.sh 19 | RUN echo 'echo "" >> .env.local' >> /app/entrypoint.sh 20 | RUN echo 'echo "# Ollama Configuration (Local AI models)" >> .env.local' >> /app/entrypoint.sh 21 | RUN echo 'echo "OLLAMA_API_BASE=http://host.docker.internal:11434" >> .env.local' >> /app/entrypoint.sh 22 | RUN echo 'echo "" >> .env.local' >> /app/entrypoint.sh 23 | RUN echo 'echo "# LM Studio Configuration (Local AI models)" >> .env.local' >> /app/entrypoint.sh 24 | RUN echo 'echo "LM_STUDIO_API_BASE=http://host.docker.internal:1234/v1" >> .env.local' >> /app/entrypoint.sh 25 | RUN echo 'exec "$@"' >> /app/entrypoint.sh 26 | RUN chmod +x /app/entrypoint.sh 27 | 28 | # Expose port 29 | EXPOSE 3000 30 | 31 | # Use the entrypoint script 32 | ENTRYPOINT ["/app/entrypoint.sh"] 33 | 34 | # Start the application 35 | CMD ["npm", "run", "dev"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LocalSite AI - now with Thinking Model Support! 2 | 3 | 4 | A modern web application that uses AI to generate HTML, CSS, and JavaScript code based on natural language prompts. Simply describe what you want to build, and the AI will create a complete, self-contained web page for you. 5 | 6 | ## Features 7 | 8 | - **AI-Powered Code Generation**: Generate complete web pages from text descriptions 9 | - **Live Preview**: See your generated code in action with desktop, tablet, and mobile views 10 | - **Code Editing**: Edit the generated code directly in the browser 11 | - **Multiple AI Providers**: Support for DeepSeek, custom OpenAI-compatible APIs, and local models 12 | - **Responsive Design**: Works on desktop and mobile devices 13 | - **Modern UI**: Clean, dark-themed interface with a focus on usability 14 | 15 | ## Tech Stack 16 | 17 | - [Next.js 15](https://nextjs.org/) with App Router 18 | - [React 19](https://react.dev/) 19 | - [Tailwind CSS](https://tailwindcss.com/) 20 | - [Shadcn UI](https://ui.shadcn.com/) 21 | - [OpenAI SDK](https://github.com/openai/openai-node) (for API compatibility) 22 | - [Monaco Editor](https://microsoft.github.io/monaco-editor/) 23 | 24 | ## Getting Started 25 | 26 | ### Prerequisites 27 | 28 | - [Node.js](https://nodejs.org/) (version 18.17 or higher) 29 | - [npm](https://www.npmjs.com/) or [yarn](https://yarnpkg.com/) 30 | - [Ollama](https://ollama.com/download/) or [LM Studio](https://lmstudio.ai/) installed 31 | - OR an API key from one of the supported providers (see below) 32 | 33 | ### Installation 34 | 35 | 1. Clone the repository: 36 | ```bash 37 | git clone https://github.com/weise25/LocalSite-ai.git 38 | cd LocalSite-ai 39 | ``` 40 | 41 | 2. Install the dependencies: 42 | ```bash 43 | npm install 44 | # or 45 | yarn install 46 | ``` 47 | 48 | 3. Rename the `.env.example` file in the root directory to `.env.local` and add your API key: 49 | ``` 50 | # Choose one of the following providers: 51 | 52 | # DeepSeek API 53 | DEEPSEEK_API_KEY=your_deepseek_api_key_here 54 | DEEPSEEK_API_BASE=https://api.deepseek.com/v1 55 | 56 | # Custom OpenAI-compatible API 57 | # OPENAI_COMPATIBLE_API_KEY=your_api_key_here 58 | # OPENAI_COMPATIBLE_API_BASE=https://api.openai.com/v1 59 | 60 | # Default Provider (deepseek, openai_compatible, ollama, lm_studio) 61 | DEFAULT_PROVIDER=lm_studio 62 | ``` 63 | 64 | 4. Start the development server: 65 | ```bash 66 | npm run dev 67 | # or 68 | yarn dev 69 | ``` 70 | 71 | 5. Open [http://localhost:3000](http://localhost:3000) in your browser. 72 | 73 | ## Supported AI Providers 74 | 75 | ### Local Models 76 | 77 | #### Ollama 78 | 79 | 1. Install [Ollama](https://ollama.ai/) on your local machine. 80 | 2. Pull a model like `llama2` or `codellama`. 81 | 3. Start the Ollama server. 82 | 4. Set in your `.env.local` file: 83 | ``` 84 | OLLAMA_API_BASE=http://localhost:11434 85 | DEFAULT_PROVIDER=ollama 86 | ``` 87 | 88 | #### LM Studio 89 | 90 | 1. Install [LM Studio](https://lmstudio.ai/) on your local machine. 91 | 2. Download a model and start the local server. 92 | 3. Set in your `.env.local` file: 93 | ``` 94 | LM_STUDIO_API_BASE=http://localhost:1234/v1 95 | DEFAULT_PROVIDER=lm_studio 96 | ``` 97 | 98 | ### DeepSeek 99 | 100 | 1. Visit [DeepSeek](https://platform.deepseek.com) and create an account or sign in. 101 | 2. Navigate to the API keys section. 102 | 3. Create a new API key and copy it. 103 | 4. Set in your `.env.local` file: 104 | ``` 105 | DEEPSEEK_API_KEY=your_deepseek_api_key 106 | DEEPSEEK_API_BASE=https://api.deepseek.com/v1 107 | ``` 108 | 109 | ### Custom OpenAI-compatible API 110 | 111 | You can use any OpenAI-compatible API: 112 | 113 | 1. Obtain an API key from your desired provider (OpenAI, Together AI, Groq, etc.). 114 | 2. Set in your `.env.local` file: 115 | ``` 116 | OPENAI_COMPATIBLE_API_KEY=your_api_key 117 | OPENAI_COMPATIBLE_API_BASE=https://api.of.provider.com/v1 118 | ``` 119 | 120 | ## Deployment 121 | 122 | ### Deploying on Vercel 123 | 124 | [Vercel](https://vercel.com) is the recommended platform for hosting your Next.js application: 125 | 126 | 1. Create an account on Vercel and connect it to your GitHub account. 127 | 2. Import your repository. 128 | 3. Add the environment variables for your desired provider, e.g.: 129 | - `DEEPSEEK_API_KEY` 130 | - `DEEPSEEK_API_BASE` 131 | - `DEFAULT_PROVIDER` 132 | 4. Click "Deploy". 133 | 134 | ### Other Hosting Options 135 | 136 | The application can also be deployed on: 137 | - [Netlify](https://netlify.com) 138 | - [Cloudflare Pages](https://pages.cloudflare.com) 139 | - Any platform that supports Next.js applications 140 | 141 | **Keep in Mind that if you host it on a platform, (like Vercel, Netlify, etc.) you can not use local models through Ollama or LM Studio, unless using something like Tunneling via [ngrok](https://ngrok.com).** 142 | 143 | ## Usage 144 | 145 | 1. Enter a prompt describing what kind of website you want to create. 146 | 2. Select an AI provider and model from the dropdown menu. 147 | 3. Click "GENERATE". 148 | 4. Wait for the code to be generated. 149 | 5. View the live preview and adjust the viewport (Desktop, Tablet, Mobile). 150 | 6. Toggle edit mode to modify the code if needed. 151 | 7. Copy the code or download it as an HTML file. 152 | 153 | ## Roadmap 154 | 155 | ### AI Models and Providers 156 | - [x] Integration with [Ollama](https://ollama.ai) for local model execution 157 | - [x] Support for [LM Studio](https://lmstudio.ai) to use local models 158 | - [x] Predefined provider: DeepSeek 159 | - [x] Custom OpenAI-compatible API support 160 | - [x] Support thinking models (Qwen3,DeepCoder, etc.) 161 | - [ ] Adding more predefined providers (Anthropic, Groq, etc.) 162 | 163 | ### Advanced Code Generation 164 | - [ ] Choose between different Frameworks and Libraries (React, Vue, Angular, etc.) 165 | - [ ] File-based code generation (multiple files) 166 | - [ ] Save and load projects 167 | - [ ] Agentic diff-editing capabilities 168 | 169 | ### UI/UX Improvements 170 | - [ ] Dark/Light theme toggle 171 | - [ ] Customizable code editor settings 172 | - [ ] Drag-and-drop interface for UI components 173 | - [ ] History of generated code 174 | 175 | ### Accessibility 176 | - [ ] Transcription and voice input for prompts 177 | - [ ] Anything; feel free to make suggestions 178 | 179 | ### Desktop App 180 | - [ ] Turning into a cross-platform desktop app (Electron) 181 | 182 | 183 | ## Contributing 184 | 185 | Contributions are welcome! Please feel free to submit a Pull Request. 186 | 187 | 1. Fork the repository 188 | 2. Create your feature branch (`git checkout -b feature/amazing-feature`) 189 | 3. Commit your changes (`git commit -m 'Add some amazing feature'`) 190 | 4. Push to the branch (`git push origin feature/amazing-feature`) 191 | 5. Open a Pull Request 192 | 193 | ## License 194 | 195 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 196 | 197 | -------------------------------------------------------------------------------- /app/api/generate-code/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest } from 'next/server'; 2 | import { LLMProvider } from '@/lib/providers/config'; 3 | import { createProviderClient } from '@/lib/providers/provider'; 4 | 5 | export async function POST(request: NextRequest) { 6 | try { 7 | // Parse the JSON body 8 | const { prompt, model, provider: providerParam, customSystemPrompt, maxTokens } = await request.json(); 9 | 10 | // Check if prompt and model are provided 11 | if (!prompt || !model) { 12 | return new Response( 13 | JSON.stringify({ error: 'Prompt and model are required' }), 14 | { status: 400, headers: { 'Content-Type': 'application/json' } } 15 | ); 16 | } 17 | 18 | // Parse maxTokens as a number if it's provided 19 | const parsedMaxTokens = maxTokens ? parseInt(maxTokens.toString(), 10) : undefined; 20 | 21 | // Determine the provider to use 22 | let provider: LLMProvider; 23 | 24 | if (providerParam && Object.values(LLMProvider).includes(providerParam as LLMProvider)) { 25 | provider = providerParam as LLMProvider; 26 | } else { 27 | // Use the default provider from environment variables or DeepSeek as fallback 28 | provider = (process.env.DEFAULT_PROVIDER as LLMProvider) || LLMProvider.DEEPSEEK; 29 | } 30 | 31 | // Create the provider client 32 | const providerClient = createProviderClient(provider); 33 | 34 | // Generate code with the selected provider and custom system prompt if provided 35 | const stream = await providerClient.generateCode(prompt, model, customSystemPrompt || null, parsedMaxTokens); 36 | 37 | // Return the stream as response 38 | return new Response(stream, { 39 | headers: { 40 | 'Content-Type': 'text/plain; charset=utf-8', 41 | 'Cache-Control': 'no-cache', 42 | }, 43 | }); 44 | } catch (error) { 45 | console.error('Error generating code:', error); 46 | 47 | // Return a more specific error message if available 48 | const errorMessage = error instanceof Error ? error.message : 'Error generating code'; 49 | 50 | return new Response( 51 | JSON.stringify({ error: errorMessage }), 52 | { status: 500, headers: { 'Content-Type': 'application/json' } } 53 | ); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /app/api/get-default-provider/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import { LLMProvider } from '@/lib/providers/config'; 3 | 4 | export async function GET() { 5 | try { 6 | // Use the default provider from environment variables or DeepSeek as fallback 7 | const defaultProvider = (process.env.DEFAULT_PROVIDER as LLMProvider) || LLMProvider.DEEPSEEK; 8 | 9 | return NextResponse.json({ defaultProvider }); 10 | } catch (error) { 11 | console.error('Error fetching default provider:', error); 12 | 13 | return NextResponse.json( 14 | { error: 'Error fetching default provider' }, 15 | { status: 500 } 16 | ); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /app/api/get-models/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import { LLMProvider, getAvailableProviders } from '@/lib/providers/config'; 3 | import { createProviderClient } from '@/lib/providers/provider'; 4 | 5 | export async function GET(request: NextRequest) { 6 | try { 7 | // Get the provider from the request or use the default provider 8 | const searchParams = request.nextUrl.searchParams; 9 | const providerParam = searchParams.get('provider'); 10 | 11 | let provider: LLMProvider; 12 | 13 | if (providerParam && Object.values(LLMProvider).includes(providerParam as LLMProvider)) { 14 | provider = providerParam as LLMProvider; 15 | } else { 16 | // Use the default provider from environment variables or DeepSeek as fallback 17 | provider = (process.env.DEFAULT_PROVIDER as LLMProvider) || LLMProvider.DEEPSEEK; 18 | } 19 | 20 | // Create the provider client 21 | const providerClient = createProviderClient(provider); 22 | 23 | // Get the available models 24 | const models = await providerClient.getModels(); 25 | 26 | // Return the list of models as JSON response 27 | return NextResponse.json(models); 28 | } catch (error) { 29 | console.error('Error fetching models:', error); 30 | 31 | // Return a more specific error message if available 32 | const errorMessage = error instanceof Error ? error.message : 'Error fetching models'; 33 | 34 | return NextResponse.json( 35 | { error: errorMessage }, 36 | { status: 500 } 37 | ); 38 | } 39 | } 40 | 41 | // Endpoint to get available providers 42 | export async function POST() { 43 | try { 44 | const providers = getAvailableProviders().map(provider => ({ 45 | id: provider.id, 46 | name: provider.name, 47 | description: provider.description, 48 | isLocal: provider.isLocal, 49 | })); 50 | 51 | return NextResponse.json(providers); 52 | } catch (error) { 53 | console.error('Error fetching providers:', error); 54 | 55 | return NextResponse.json( 56 | { error: 'Error fetching providers' }, 57 | { status: 500 } 58 | ); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | :root { 6 | --background: 0 0% 3.9%; 7 | --foreground: 0 0% 98%; 8 | --card: 0 0% 3.9%; 9 | --card-foreground: 0 0% 98%; 10 | --popover: 0 0% 3.9%; 11 | --popover-foreground: 0 0% 98%; 12 | --primary: 0 0% 100%; 13 | --primary-foreground: 0 0% 0%; 14 | --secondary: 240 3.7% 15.9%; 15 | --secondary-foreground: 0 0% 98%; 16 | --muted: 0 0% 14.9%; 17 | --muted-foreground: 0 0% 63.9%; 18 | --accent: 0 0% 100%; 19 | --accent-foreground: 0 0% 9%; 20 | --destructive: 0 62.8% 30.6%; 21 | --destructive-foreground: 0 0% 98%; 22 | --border: 240 3.7% 15.9%; 23 | --input: 240 3.7% 15.9%; 24 | --ring: 0 0% 80%; 25 | --radius: 0.5rem; 26 | } 27 | 28 | * { 29 | @apply border-border; 30 | } 31 | 32 | body { 33 | @apply bg-background text-foreground; 34 | font-feature-settings: "rlig" 1, "calt" 1; 35 | } 36 | 37 | /* Custom scrollbar */ 38 | ::-webkit-scrollbar { 39 | width: 8px; 40 | height: 8px; 41 | } 42 | 43 | ::-webkit-scrollbar-track { 44 | background: #111; 45 | } 46 | 47 | ::-webkit-scrollbar-thumb { 48 | background: #333; 49 | border-radius: 4px; 50 | } 51 | 52 | ::-webkit-scrollbar-thumb:hover { 53 | background: #444; 54 | } 55 | 56 | /* Tech Button Styles */ 57 | .tech-button:hover { 58 | box-shadow: 0 0 20px rgba(42, 87, 139, 0.6), inset 0 1px 2px rgba(135, 207, 255, 0.2), inset 0 -1px 2px rgba(0, 0, 0, 0.3); 59 | transform: translateY(-2px); 60 | } 61 | 62 | .tech-button:active { 63 | transform: translateY(0); 64 | box-shadow: 0 0 10px rgba(42, 87, 139, 0.4), inset 0 2px 3px rgba(0, 0, 0, 0.4), inset 0 -1px 1px rgba(135, 207, 255, 0.1); 65 | } 66 | 67 | .tech-button:disabled { 68 | opacity: 0.6; 69 | cursor: not-allowed; 70 | } 71 | 72 | /* Import Roboto font for tech button */ 73 | @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@500&display=swap'); 74 | -------------------------------------------------------------------------------- /app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type React from "react" 2 | import "@/app/globals.css" 3 | import { Inter } from "next/font/google" 4 | import { ThemeProvider } from "@/components/theme-provider" 5 | 6 | const inter = Inter({ subsets: ["latin"] }) 7 | 8 | export const metadata = { 9 | title: "AI Code Generator", 10 | description: "Modern AI-powered code generation web app", 11 | generator: 'v0.dev' 12 | } 13 | 14 | export default function RootLayout({ 15 | children, 16 | }: Readonly<{ 17 | children: React.ReactNode 18 | }>) { 19 | return ( 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | {children} 29 | 30 | 31 | 32 | ) 33 | } 34 | -------------------------------------------------------------------------------- /components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "default", 4 | "rsc": true, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "tailwind.config.ts", 8 | "css": "app/globals.css", 9 | "baseColor": "neutral", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils", 16 | "ui": "@/components/ui", 17 | "lib": "@/lib", 18 | "hooks": "@/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } -------------------------------------------------------------------------------- /components/code-editor.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { useEffect, useRef, useState } from "react" 4 | import Editor from "@monaco-editor/react" 5 | 6 | interface CodeEditorProps { 7 | code: string; 8 | isEditable?: boolean; 9 | onChange?: (value: string) => void; 10 | } 11 | 12 | export function CodeEditor({ code, isEditable = false, onChange }: CodeEditorProps) { 13 | const editorRef = useRef(null); 14 | const [isInitialMount, setIsInitialMount] = useState(true); 15 | const [isUserEditing, setIsUserEditing] = useState(false); 16 | 17 | const handleEditorDidMount = (editor: any) => { 18 | editorRef.current = editor; 19 | 20 | // Only scroll to the end on initial load 21 | if (isInitialMount) { 22 | editor.revealLine(editor.getModel().getLineCount()); 23 | setIsInitialMount(false); 24 | } 25 | 26 | // Add event listener for user interactions 27 | editor.onDidChangeCursorPosition(() => { 28 | if (isEditable) { 29 | setIsUserEditing(true); 30 | } 31 | }); 32 | }; 33 | 34 | const handleEditorChange = (value: string | undefined) => { 35 | if (onChange && value !== undefined) { 36 | onChange(value); 37 | } 38 | }; 39 | 40 | useEffect(() => { 41 | if (editorRef.current) { 42 | // Only scroll to the end if the user is not actively editing 43 | // or if the editor is not in edit mode 44 | if (!isUserEditing && !isEditable) { 45 | editorRef.current.revealLine(editorRef.current.getModel().getLineCount()); 46 | } 47 | } 48 | }, [code, isUserEditing, isEditable]); 49 | 50 | // Reset the isUserEditing status when edit mode is disabled 51 | useEffect(() => { 52 | if (!isEditable) { 53 | setIsUserEditing(false); 54 | } 55 | }, [isEditable]); 56 | 57 | return ( 58 |
59 | 75 |
76 | ) 77 | } 78 | -------------------------------------------------------------------------------- /components/loading-screen.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { useEffect, useState } from "react" 4 | 5 | export function LoadingScreen() { 6 | const [glitchClass, setGlitchClass] = useState("") 7 | 8 | useEffect(() => { 9 | // Apply glitch effect at intervals 10 | const glitchInterval = setInterval(() => { 11 | setGlitchClass("glitch") 12 | setTimeout(() => setGlitchClass(""), 200) 13 | }, 2000) 14 | 15 | return () => clearInterval(glitchInterval) 16 | }, []) 17 | 18 | return ( 19 |
20 |

24 | LOADING, PLEASE WAIT... 25 |

26 | 27 |
28 |
29 |
30 | 31 | 63 |
64 | ) 65 | } 66 | -------------------------------------------------------------------------------- /components/provider-selector.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { useState, useEffect } from "react" 4 | import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select" 5 | // Import only the icons that are actually used 6 | import { Loader2 } from "lucide-react" 7 | import { toast } from "sonner" 8 | import { LLMProvider } from "@/lib/providers/config" 9 | 10 | interface Provider { 11 | id: LLMProvider 12 | name: string 13 | description: string 14 | isLocal: boolean 15 | } 16 | 17 | interface ProviderSelectorProps { 18 | selectedProvider: string 19 | setSelectedProvider: (value: string) => void 20 | onProviderChange: () => void 21 | } 22 | 23 | export function ProviderSelector({ 24 | selectedProvider, 25 | setSelectedProvider, 26 | onProviderChange 27 | }: ProviderSelectorProps) { 28 | const [providers, setProviders] = useState([]) 29 | const [isLoading, setIsLoading] = useState(false) 30 | 31 | useEffect(() => { 32 | const fetchProviders = async () => { 33 | setIsLoading(true) 34 | try { 35 | // Get the providers 36 | const response = await fetch('/api/get-models', { 37 | method: 'POST', 38 | }) 39 | if (!response.ok) { 40 | throw new Error('Error fetching providers') 41 | } 42 | const data = await response.json() 43 | setProviders(data) 44 | 45 | // If no provider is selected, get the default provider 46 | if (!selectedProvider && data.length > 0) { 47 | try { 48 | // Get the default provider from the API 49 | const defaultResponse = await fetch('/api/get-default-provider') 50 | if (defaultResponse.ok) { 51 | const { defaultProvider } = await defaultResponse.json() 52 | 53 | // Check if the default provider is in the list of available providers 54 | const providerExists = data.some(p => p.id === defaultProvider) 55 | 56 | if (providerExists) { 57 | setSelectedProvider(defaultProvider) 58 | } else { 59 | // Fallback to the first provider if the default provider is not available 60 | setSelectedProvider(data[0].id) 61 | } 62 | } else { 63 | // Fallback to the first provider on error 64 | setSelectedProvider(data[0].id) 65 | } 66 | } catch (error) { 67 | console.error('Error fetching default provider:', error) 68 | // Fallback to the first provider on error 69 | setSelectedProvider(data[0].id) 70 | } 71 | } 72 | } catch (error) { 73 | console.error('Error fetching providers:', error) 74 | toast.error('Providers could not be loaded.') 75 | } finally { 76 | setIsLoading(false) 77 | } 78 | } 79 | 80 | fetchProviders() 81 | }, [selectedProvider, setSelectedProvider]) 82 | 83 | const handleProviderChange = (value: string) => { 84 | setSelectedProvider(value) 85 | onProviderChange() 86 | } 87 | 88 | return ( 89 |
90 | 91 | 126 |
127 | ) 128 | } 129 | -------------------------------------------------------------------------------- /components/theme-provider.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import * as React from 'react' 4 | import { 5 | ThemeProvider as NextThemesProvider, 6 | type ThemeProviderProps, 7 | } from 'next-themes' 8 | 9 | export function ThemeProvider({ children, ...props }: ThemeProviderProps) { 10 | return {children} 11 | } 12 | -------------------------------------------------------------------------------- /components/thinking-indicator.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { useState, useEffect, useRef } from "react" 4 | // Import only the icons that are actually used 5 | import { Loader2, Brain, ChevronDown } from "lucide-react" 6 | 7 | interface ThinkingIndicatorProps { 8 | thinkingOutput: string 9 | isThinking: boolean 10 | position?: "top-left" | "top-right" | "bottom-left" | "bottom-right" 11 | } 12 | 13 | export function ThinkingIndicator({ 14 | thinkingOutput, 15 | isThinking, 16 | position = "top-left" 17 | }: ThinkingIndicatorProps) { 18 | const [isOpen, setIsOpen] = useState(false) 19 | const dropdownRef = useRef(null) 20 | const indicatorRef = useRef(null) 21 | 22 | // Automatically scroll to the end of the thinking output 23 | useEffect(() => { 24 | if (isOpen && dropdownRef.current) { 25 | dropdownRef.current.scrollTop = dropdownRef.current.scrollHeight 26 | } 27 | }, [isOpen, thinkingOutput]) 28 | 29 | if (!thinkingOutput && !isThinking) return null 30 | 31 | // Format the thinking output for better readability 32 | const formattedThinking = thinkingOutput 33 | .split('\n') 34 | .map((line, index) =>
{line}
) 35 | 36 | // Determine dropdown position based on the position prop 37 | let dropdownPosition = "left-0 top-full" 38 | if (position === "top-right") dropdownPosition = "right-0 top-full" 39 | if (position === "bottom-left") dropdownPosition = "left-0 bottom-full" 40 | if (position === "bottom-right") dropdownPosition = "right-0 bottom-full" 41 | 42 | // Animation for the dots 43 | const [dots, setDots] = useState("") 44 | 45 | // Animated dots for "Thinking..." 46 | useEffect(() => { 47 | if (!isThinking) return 48 | 49 | const interval = setInterval(() => { 50 | setDots(prev => { 51 | if (prev === "") return "." 52 | if (prev === ".") return ".." 53 | if (prev === "..") return "..." 54 | return "" 55 | }) 56 | }, 500) // Change every 500ms 57 | 58 | return () => clearInterval(interval) 59 | }, [isThinking]) 60 | 61 | // Status for "Finished thinking" 62 | const [hasFinished, setHasFinished] = useState(false) 63 | 64 | useEffect(() => { 65 | if (isThinking) { 66 | setHasFinished(false) 67 | } else if (thinkingOutput && !hasFinished) { 68 | // When the thinking process is complete, set hasFinished to true 69 | setHasFinished(true) 70 | } 71 | }, [isThinking, thinkingOutput, hasFinished]) 72 | 73 | return ( 74 |
75 |
setIsOpen(!isOpen)} 78 | > 79 | {isThinking ? ( 80 |
81 | 82 |
83 | ) : ( 84 | 85 | )} 86 | 87 | {isThinking ? `Thinking${dots}` : 88 | hasFinished ? ( 89 | Finished thinking 90 | ) : "Thinking"} 91 | 92 | 93 |
94 | 95 | {isOpen && ( 96 |
100 |

THINKING PROCESS:

101 |
102 | {formattedThinking.length > 0 ? ( 103 | formattedThinking 104 | ) : ( 105 |
Waiting for thinking output...
106 | )} 107 |
108 |
109 | )} 110 |
111 | ) 112 | } 113 | -------------------------------------------------------------------------------- /components/ui/accordion.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as React from "react" 4 | import * as AccordionPrimitive from "@radix-ui/react-accordion" 5 | import { ChevronDown } from "lucide-react" 6 | 7 | import { cn } from "@/lib/utils" 8 | 9 | const Accordion = AccordionPrimitive.Root 10 | 11 | const AccordionItem = React.forwardRef< 12 | React.ElementRef, 13 | React.ComponentPropsWithoutRef 14 | >(({ className, ...props }, ref) => ( 15 | 20 | )) 21 | AccordionItem.displayName = "AccordionItem" 22 | 23 | const AccordionTrigger = React.forwardRef< 24 | React.ElementRef, 25 | React.ComponentPropsWithoutRef 26 | >(({ className, children, ...props }, ref) => ( 27 | 28 | svg]:rotate-180", 32 | className 33 | )} 34 | {...props} 35 | > 36 | {children} 37 | 38 | 39 | 40 | )) 41 | AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName 42 | 43 | const AccordionContent = React.forwardRef< 44 | React.ElementRef, 45 | React.ComponentPropsWithoutRef 46 | >(({ className, children, ...props }, ref) => ( 47 | 52 |
{children}
53 |
54 | )) 55 | 56 | AccordionContent.displayName = AccordionPrimitive.Content.displayName 57 | 58 | export { Accordion, AccordionItem, AccordionTrigger, AccordionContent } 59 | -------------------------------------------------------------------------------- /components/ui/alert-dialog.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as React from "react" 4 | import * as AlertDialogPrimitive from "@radix-ui/react-alert-dialog" 5 | 6 | import { cn } from "@/lib/utils" 7 | import { buttonVariants } from "@/components/ui/button" 8 | 9 | const AlertDialog = AlertDialogPrimitive.Root 10 | 11 | const AlertDialogTrigger = AlertDialogPrimitive.Trigger 12 | 13 | const AlertDialogPortal = AlertDialogPrimitive.Portal 14 | 15 | const AlertDialogOverlay = React.forwardRef< 16 | React.ElementRef, 17 | React.ComponentPropsWithoutRef 18 | >(({ className, ...props }, ref) => ( 19 | 27 | )) 28 | AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName 29 | 30 | const AlertDialogContent = React.forwardRef< 31 | React.ElementRef, 32 | React.ComponentPropsWithoutRef 33 | >(({ className, ...props }, ref) => ( 34 | 35 | 36 | 44 | 45 | )) 46 | AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName 47 | 48 | const AlertDialogHeader = ({ 49 | className, 50 | ...props 51 | }: React.HTMLAttributes) => ( 52 |
59 | ) 60 | AlertDialogHeader.displayName = "AlertDialogHeader" 61 | 62 | const AlertDialogFooter = ({ 63 | className, 64 | ...props 65 | }: React.HTMLAttributes) => ( 66 |
73 | ) 74 | AlertDialogFooter.displayName = "AlertDialogFooter" 75 | 76 | const AlertDialogTitle = React.forwardRef< 77 | React.ElementRef, 78 | React.ComponentPropsWithoutRef 79 | >(({ className, ...props }, ref) => ( 80 | 85 | )) 86 | AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName 87 | 88 | const AlertDialogDescription = React.forwardRef< 89 | React.ElementRef, 90 | React.ComponentPropsWithoutRef 91 | >(({ className, ...props }, ref) => ( 92 | 97 | )) 98 | AlertDialogDescription.displayName = 99 | AlertDialogPrimitive.Description.displayName 100 | 101 | const AlertDialogAction = React.forwardRef< 102 | React.ElementRef, 103 | React.ComponentPropsWithoutRef 104 | >(({ className, ...props }, ref) => ( 105 | 110 | )) 111 | AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName 112 | 113 | const AlertDialogCancel = React.forwardRef< 114 | React.ElementRef, 115 | React.ComponentPropsWithoutRef 116 | >(({ className, ...props }, ref) => ( 117 | 126 | )) 127 | AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName 128 | 129 | export { 130 | AlertDialog, 131 | AlertDialogPortal, 132 | AlertDialogOverlay, 133 | AlertDialogTrigger, 134 | AlertDialogContent, 135 | AlertDialogHeader, 136 | AlertDialogFooter, 137 | AlertDialogTitle, 138 | AlertDialogDescription, 139 | AlertDialogAction, 140 | AlertDialogCancel, 141 | } 142 | -------------------------------------------------------------------------------- /components/ui/alert.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react" 2 | import { cva, type VariantProps } from "class-variance-authority" 3 | 4 | import { cn } from "@/lib/utils" 5 | 6 | const alertVariants = cva( 7 | "relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground", 8 | { 9 | variants: { 10 | variant: { 11 | default: "bg-background text-foreground", 12 | destructive: 13 | "border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive", 14 | }, 15 | }, 16 | defaultVariants: { 17 | variant: "default", 18 | }, 19 | } 20 | ) 21 | 22 | const Alert = React.forwardRef< 23 | HTMLDivElement, 24 | React.HTMLAttributes & VariantProps 25 | >(({ className, variant, ...props }, ref) => ( 26 |
32 | )) 33 | Alert.displayName = "Alert" 34 | 35 | const AlertTitle = React.forwardRef< 36 | HTMLParagraphElement, 37 | React.HTMLAttributes 38 | >(({ className, ...props }, ref) => ( 39 |
44 | )) 45 | AlertTitle.displayName = "AlertTitle" 46 | 47 | const AlertDescription = React.forwardRef< 48 | HTMLParagraphElement, 49 | React.HTMLAttributes 50 | >(({ className, ...props }, ref) => ( 51 |
56 | )) 57 | AlertDescription.displayName = "AlertDescription" 58 | 59 | export { Alert, AlertTitle, AlertDescription } 60 | -------------------------------------------------------------------------------- /components/ui/aspect-ratio.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as AspectRatioPrimitive from "@radix-ui/react-aspect-ratio" 4 | 5 | const AspectRatio = AspectRatioPrimitive.Root 6 | 7 | export { AspectRatio } 8 | -------------------------------------------------------------------------------- /components/ui/avatar.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as React from "react" 4 | import * as AvatarPrimitive from "@radix-ui/react-avatar" 5 | 6 | import { cn } from "@/lib/utils" 7 | 8 | const Avatar = React.forwardRef< 9 | React.ElementRef, 10 | React.ComponentPropsWithoutRef 11 | >(({ className, ...props }, ref) => ( 12 | 20 | )) 21 | Avatar.displayName = AvatarPrimitive.Root.displayName 22 | 23 | const AvatarImage = React.forwardRef< 24 | React.ElementRef, 25 | React.ComponentPropsWithoutRef 26 | >(({ className, ...props }, ref) => ( 27 | 32 | )) 33 | AvatarImage.displayName = AvatarPrimitive.Image.displayName 34 | 35 | const AvatarFallback = React.forwardRef< 36 | React.ElementRef, 37 | React.ComponentPropsWithoutRef 38 | >(({ className, ...props }, ref) => ( 39 | 47 | )) 48 | AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName 49 | 50 | export { Avatar, AvatarImage, AvatarFallback } 51 | -------------------------------------------------------------------------------- /components/ui/badge.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react" 2 | import { cva, type VariantProps } from "class-variance-authority" 3 | 4 | import { cn } from "@/lib/utils" 5 | 6 | const badgeVariants = cva( 7 | "inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2", 8 | { 9 | variants: { 10 | variant: { 11 | default: 12 | "border-transparent bg-primary text-primary-foreground hover:bg-primary/80", 13 | secondary: 14 | "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80", 15 | destructive: 16 | "border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80", 17 | outline: "text-foreground", 18 | }, 19 | }, 20 | defaultVariants: { 21 | variant: "default", 22 | }, 23 | } 24 | ) 25 | 26 | export interface BadgeProps 27 | extends React.HTMLAttributes, 28 | VariantProps {} 29 | 30 | function Badge({ className, variant, ...props }: BadgeProps) { 31 | return ( 32 |
33 | ) 34 | } 35 | 36 | export { Badge, badgeVariants } 37 | -------------------------------------------------------------------------------- /components/ui/breadcrumb.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react" 2 | import { Slot } from "@radix-ui/react-slot" 3 | import { ChevronRight, MoreHorizontal } from "lucide-react" 4 | 5 | import { cn } from "@/lib/utils" 6 | 7 | const Breadcrumb = React.forwardRef< 8 | HTMLElement, 9 | React.ComponentPropsWithoutRef<"nav"> & { 10 | separator?: React.ReactNode 11 | } 12 | >(({ ...props }, ref) =>