├── .env.example ├── .gitignore ├── .prettierignore ├── LICENSE ├── README.md ├── app ├── api │ ├── feedback │ │ └── route.ts │ ├── keys │ │ └── route.ts │ └── research │ │ └── route.ts ├── favicon.ico ├── globals.css ├── layout.tsx └── page.tsx ├── components.json ├── components ├── chat │ ├── api-key-dialog.tsx │ ├── chat.tsx │ ├── download-txt.tsx │ ├── input.tsx │ ├── markdown.tsx │ ├── message.tsx │ ├── research-progress.tsx │ └── site-header.tsx └── ui │ ├── button.tsx │ ├── dialog.tsx │ ├── input.tsx │ ├── slider.tsx │ └── tooltip.tsx ├── lib ├── deep-research │ ├── ai │ │ ├── providers.ts │ │ ├── text-splitter.test.ts │ │ └── text-splitter.ts │ ├── deep-research.ts │ ├── feedback.ts │ ├── index.ts │ └── prompt.ts ├── hooks │ └── use-scroll-to-bottom.ts └── utils.ts ├── middleware.ts ├── next-env.d.ts ├── next.config.js ├── package.json ├── postcss.config.js ├── public ├── favicon-16x16.png ├── favicon-32x32.png ├── favicon.ico ├── logo-bg.png ├── logo-text.png ├── og.png ├── providers │ └── openai.webp └── site.webmanifest ├── tailwind.config.ts └── tsconfig.json /.env.example: -------------------------------------------------------------------------------- 1 | 2 | #### AI API KEYS 3 | OPENAI_API_KEY=your-openai-api-key 4 | FIRECRAWL_KEY=your-firecrawl-api-key 5 | NEXT_PUBLIC_ENABLE_API_KEYS=false 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # Output files 4 | output.md 5 | 6 | # Dependencies 7 | node_modules 8 | .pnp 9 | .pnp.js 10 | 11 | # Local env files 12 | .env 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | # Testing 19 | coverage 20 | 21 | # Turbo 22 | .turbo 23 | 24 | # Vercel 25 | .vercel 26 | 27 | # Build Outputs 28 | .next/ 29 | out/ 30 | build 31 | dist 32 | 33 | 34 | # Debug 35 | npm-debug.log* 36 | yarn-debug.log* 37 | yarn-error.log* 38 | 39 | # Misc 40 | .DS_Store 41 | *.pem 42 | 43 | # Package Manager Lock Files 44 | package-lock.json 45 | pnpm-lock.yaml 46 | yarn.lock 47 | 48 | # IDE/Editor specific 49 | .idea/ 50 | .vscode/ 51 | *.swp 52 | *.swo 53 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | *.hbs -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 David Zhang 4 | Copyright (c) 2025 Fekri 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Open Deep Research Web UI 2 | 3 | A modern, interactive web interface built on top of the original [Deep Research CLI](https://github.com/dzhng/deep-research) project. This web app transforms the command-line research assistant into an intuitive, visually appealing experience using Next.js and shadcn/ui. 4 | 5 | ## Overview 6 | 7 | Open Deep Research Web UI is an AI-powered research assistant that transforms the original CLI tool into a modern web interface using Next.js and shadcn/ui. Try it out at [anotherwrapper.com/open-deep-research](https://anotherwrapper.com/open-deep-research) with your own API keys, or host it yourself. 8 | 9 | The system combines search engines (via FireCrawl), web scraping, and language models (via OpenAI) to perform deep research on any topic. Key features include: 10 | 11 | - **Intelligent Research Process:** 12 | 13 | - Performs iterative research by recursively exploring topics in depth 14 | - Uses LLMs to generate targeted search queries based on research goals 15 | - Creates follow-up questions to better understand research needs 16 | - Processes multiple searches and results in parallel for efficiency 17 | - Configurable depth and breadth parameters to control research scope 18 | 19 | - **Research Output:** 20 | 21 | - Produces detailed markdown reports with findings and sources 22 | - Real-time progress tracking of research steps 23 | - Built-in markdown viewer for reviewing results 24 | - Downloadable research reports 25 | 26 | - **Modern Interface:** 27 | - Interactive controls for adjusting research parameters 28 | - Visual feedback for ongoing research progress 29 | - HTTP-only cookie storage for API keys 30 | 31 | The system maintains the core research capabilities of the original CLI while providing an intuitive visual interface for controlling and monitoring the research process. 32 | 33 | ## Sponsors 34 | 35 | This project is proudly sponsored by [AnotherWrapper](https://anotherwrapper.com). 36 | 37 | [![Sponsored by Anotherwrapper](https://anotherwrapper.com/og.png)](https://anotherwrapper.com) 38 | 39 | ## Getting Started 40 | 41 | ### Prerequisites 42 | 43 | - Node.js v14 or later 44 | - API keys for OpenAI and FireCrawl 45 | 46 | ### Installation 47 | 48 | 1. **Clone and Install** 49 | 50 | ```bash 51 | git clone https://github.com/fdarkaou/open-deep-research.git 52 | cd open-deep-research 53 | npm install 54 | ``` 55 | 56 | 2. **Configure Environment** 57 | 58 | Create `.env.local` and add: 59 | 60 | ```bash 61 | OPENAI_API_KEY=your-openai-api-key 62 | FIRECRAWL_KEY=your-firecrawl-api-key 63 | NEXT_PUBLIC_ENABLE_API_KEYS=false # Set to false to disable API key dialog 64 | ``` 65 | 66 | 3. **Run the App** 67 | ```bash 68 | npm run dev 69 | ``` 70 | Visit [http://localhost:3000](http://localhost:3000) 71 | 72 | ## API Key Management 73 | 74 | By default (`NEXT_PUBLIC_ENABLE_API_KEYS=true`), the app includes an API key input dialog that allows users to try out the research assistant directly in their browser using their own API keys. Keys are stored securely in HTTP-only cookies and are never exposed to client-side JavaScript. 75 | 76 | For your own deployment, you can disable this dialog by setting `NEXT_PUBLIC_ENABLE_API_KEYS=false` and configure the API keys directly in your `.env.local` file instead. 77 | 78 | ## License 79 | 80 | MIT License. Feel free to use and modify the code for your own projects as you wish. 81 | 82 | ## Acknowledgements 83 | 84 | - **Original CLI:** [dzhng/deep-research](https://github.com/dzhng/deep-research) 85 | - **Sponsor:** [AnotherWrapper](https://anotherwrapper.com) 86 | - **Tools:** Next.js, shadcn/ui, anotherwrapper, Vercel AI SDK 87 | 88 | Happy researching! 89 | -------------------------------------------------------------------------------- /app/api/feedback/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from "next/server"; 2 | 3 | import { AIModel } from "@/lib/deep-research/ai/providers"; 4 | import { generateFeedback } from "@/lib/deep-research/feedback"; 5 | 6 | export async function POST(req: NextRequest) { 7 | try { 8 | const { query, numQuestions, modelId = "o3-mini" } = await req.json(); 9 | 10 | // Retrieve API key(s) from secure cookies 11 | const openaiKey = req.cookies.get("openai-key")?.value; 12 | const firecrawlKey = req.cookies.get("firecrawl-key")?.value; 13 | 14 | // Add API key validation 15 | if (process.env.NEXT_PUBLIC_ENABLE_API_KEYS === "true") { 16 | if (!openaiKey || !firecrawlKey) { 17 | return NextResponse.json( 18 | { error: "API keys are required but not provided" }, 19 | { status: 401 } 20 | ); 21 | } 22 | } 23 | 24 | console.log("\n🔍 [FEEDBACK ROUTE] === Request Started ==="); 25 | console.log("Query:", query); 26 | console.log("Model ID:", modelId); 27 | console.log("Number of Questions:", numQuestions); 28 | console.log("API Keys Present:", { 29 | OpenAI: openaiKey ? "✅" : "❌", 30 | FireCrawl: firecrawlKey ? "✅" : "❌", 31 | }); 32 | 33 | try { 34 | const questions = await generateFeedback({ 35 | query, 36 | numQuestions, 37 | modelId: modelId as AIModel, 38 | apiKey: openaiKey, 39 | }); 40 | 41 | console.log("\n✅ [FEEDBACK ROUTE] === Success ==="); 42 | console.log("Generated Questions:", questions); 43 | console.log("Number of Questions Generated:", questions.length); 44 | 45 | return NextResponse.json({ questions }); 46 | } catch (error) { 47 | console.error("\n❌ [FEEDBACK ROUTE] === Generation Error ==="); 48 | console.error("Error:", error); 49 | throw error; 50 | } 51 | } catch (error) { 52 | console.error("\n💥 [FEEDBACK ROUTE] === Route Error ==="); 53 | console.error("Error:", error); 54 | 55 | return NextResponse.json( 56 | { 57 | error: "Feedback generation failed", 58 | details: error instanceof Error ? error.message : String(error), 59 | }, 60 | { status: 500 } 61 | ); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /app/api/keys/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | 3 | // Handle GET requests to check keys and POST requests to set keys 4 | export async function GET(req: NextRequest) { 5 | const openaiKey = req.cookies.get('openai-key')?.value; 6 | const firecrawlKey = req.cookies.get('firecrawl-key')?.value; 7 | const keysPresent = Boolean(openaiKey && firecrawlKey); 8 | return NextResponse.json({ keysPresent }); 9 | } 10 | 11 | export async function POST(req: NextRequest) { 12 | try { 13 | const { openaiKey, firecrawlKey } = await req.json(); 14 | const response = NextResponse.json({ success: true }); 15 | response.cookies.set('openai-key', openaiKey, { 16 | httpOnly: true, 17 | secure: process.env.NODE_ENV === 'production', 18 | path: '/', 19 | sameSite: 'strict', 20 | }); 21 | response.cookies.set('firecrawl-key', firecrawlKey, { 22 | httpOnly: true, 23 | secure: process.env.NODE_ENV === 'production', 24 | path: '/', 25 | sameSite: 'strict', 26 | }); 27 | return response; 28 | } catch (error) { 29 | console.error(error); 30 | return NextResponse.json( 31 | { error: 'Failed to set API keys' }, 32 | { status: 500 }, 33 | ); 34 | } 35 | } 36 | 37 | // New: DELETE handler to remove API keys 38 | export async function DELETE(req: NextRequest) { 39 | try { 40 | const response = NextResponse.json({ success: true }); 41 | response.cookies.delete('openai-key'); 42 | response.cookies.delete('firecrawl-key'); 43 | return response; 44 | } catch (error) { 45 | console.error(error); 46 | return NextResponse.json( 47 | { error: 'Failed to remove API keys' }, 48 | { status: 500 }, 49 | ); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /app/api/research/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest } from "next/server"; 2 | 3 | import { 4 | deepResearch, 5 | generateFeedback, 6 | writeFinalReport, 7 | } from "@/lib/deep-research"; 8 | import { createModel, type AIModel } from "@/lib/deep-research/ai/providers"; 9 | 10 | export async function POST(req: NextRequest) { 11 | try { 12 | const { 13 | query, 14 | breadth = 3, 15 | depth = 2, 16 | modelId = "o3-mini", 17 | } = await req.json(); 18 | 19 | // Retrieve API keys from secure cookies 20 | const openaiKey = req.cookies.get("openai-key")?.value; 21 | const firecrawlKey = req.cookies.get("firecrawl-key")?.value; 22 | 23 | // Add API key validation 24 | if (process.env.NEXT_PUBLIC_ENABLE_API_KEYS === "true") { 25 | if (!openaiKey || !firecrawlKey) { 26 | return Response.json( 27 | { error: "API keys are required but not provided" }, 28 | { status: 401 } 29 | ); 30 | } 31 | } 32 | 33 | console.log("\n🔬 [RESEARCH ROUTE] === Request Started ==="); 34 | console.log("Query:", query); 35 | console.log("Model ID:", modelId); 36 | console.log("Configuration:", { 37 | breadth, 38 | depth, 39 | }); 40 | console.log("API Keys Present:", { 41 | OpenAI: openaiKey ? "✅" : "❌", 42 | FireCrawl: firecrawlKey ? "✅" : "❌", 43 | }); 44 | 45 | try { 46 | const model = createModel(modelId as AIModel, openaiKey); 47 | console.log("\n🤖 [RESEARCH ROUTE] === Model Created ==="); 48 | console.log("Using Model:", modelId); 49 | 50 | const encoder = new TextEncoder(); 51 | const stream = new TransformStream(); 52 | const writer = stream.writable.getWriter(); 53 | 54 | (async () => { 55 | try { 56 | console.log("\n🚀 [RESEARCH ROUTE] === Research Started ==="); 57 | 58 | const feedbackQuestions = await generateFeedback({ 59 | query, 60 | modelId, 61 | apiKey: openaiKey, 62 | }); 63 | await writer.write( 64 | encoder.encode( 65 | `data: ${JSON.stringify({ 66 | type: "progress", 67 | step: { 68 | type: "query", 69 | content: "Generated feedback questions", 70 | }, 71 | })}\n\n` 72 | ) 73 | ); 74 | 75 | const { learnings, visitedUrls } = await deepResearch({ 76 | query, 77 | breadth, 78 | depth, 79 | model, 80 | firecrawlKey, 81 | onProgress: async (update: string) => { 82 | console.log("\n📊 [RESEARCH ROUTE] Progress Update:", update); 83 | await writer.write( 84 | encoder.encode( 85 | `data: ${JSON.stringify({ 86 | type: "progress", 87 | step: { 88 | type: "research", 89 | content: update, 90 | }, 91 | })}\n\n` 92 | ) 93 | ); 94 | }, 95 | }); 96 | 97 | console.log("\n✅ [RESEARCH ROUTE] === Research Completed ==="); 98 | console.log("Learnings Count:", learnings.length); 99 | console.log("Visited URLs Count:", visitedUrls.length); 100 | 101 | const report = await writeFinalReport({ 102 | prompt: query, 103 | learnings, 104 | visitedUrls, 105 | model, 106 | }); 107 | 108 | await writer.write( 109 | encoder.encode( 110 | `data: ${JSON.stringify({ 111 | type: "result", 112 | feedbackQuestions, 113 | learnings, 114 | visitedUrls, 115 | report, 116 | })}\n\n` 117 | ) 118 | ); 119 | } catch (error) { 120 | console.error("\n❌ [RESEARCH ROUTE] === Research Process Error ==="); 121 | console.error("Error:", error); 122 | await writer.write( 123 | encoder.encode( 124 | `data: ${JSON.stringify({ 125 | type: "error", 126 | message: "Research failed", 127 | })}\n\n` 128 | ) 129 | ); 130 | } finally { 131 | await writer.close(); 132 | } 133 | })(); 134 | 135 | return new Response(stream.readable, { 136 | headers: { 137 | "Content-Type": "text/event-stream", 138 | "Cache-Control": "no-cache", 139 | Connection: "keep-alive", 140 | }, 141 | }); 142 | } catch (error) { 143 | console.error("\n💥 [RESEARCH ROUTE] === Route Error ==="); 144 | console.error("Error:", error); 145 | return Response.json({ error: "Research failed" }, { status: 500 }); 146 | } 147 | } catch (error) { 148 | console.error("\n💥 [RESEARCH ROUTE] === Parse Error ==="); 149 | console.error("Error:", error); 150 | return Response.json({ error: "Research failed" }, { status: 500 }); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdarkaou/open-deep-research/ce15e95589b55dd6811cc0c3564b4380ae0c7519/app/favicon.ico -------------------------------------------------------------------------------- /app/globals.css: -------------------------------------------------------------------------------- 1 | /* @import url("https://fonts.googleapis.com/css2?family=Poetsen+One&display=swap"); */ 2 | 3 | @tailwind base; 4 | @tailwind components; 5 | @tailwind utilities; 6 | 7 | /* Hide scroll bar */ 8 | @layer utilities { 9 | /* Hide scrollbar for Chrome, Safari and Opera */ 10 | .no-scrollbar::-webkit-scrollbar { 11 | display: none; 12 | } 13 | 14 | /* Hide scrollbar for IE, Edge and Firefox */ 15 | .no-scrollbar { 16 | -ms-overflow-style: none; /* IE and Edge */ 17 | scrollbar-width: none; /* Firefox */ 18 | } 19 | } 20 | 21 | @layer base { 22 | :root { 23 | --background: 0 0% 100%; 24 | --foreground: 222.2 84% 4.9%; 25 | 26 | --card: 0 0% 100%; 27 | --card-foreground: 222.2 84% 4.9%; 28 | 29 | --popover: 0 0% 100%; 30 | --popover-foreground: 222.2 84% 4.9%; 31 | 32 | --primary: 222.2 47.4% 11.2%; 33 | --primary-foreground: 210 40% 98%; 34 | 35 | --secondary: 210 40% 96.1%; 36 | --secondary-foreground: 222.2 47.4% 11.2%; 37 | 38 | --muted: 210 40% 96.1%; 39 | --muted-foreground: 215.4 16.3% 46.9%; 40 | 41 | --accent: 0 0% 9%; /* #171717 */ 42 | --accent-foreground: 0 0% 100%; /* White text */ 43 | 44 | --destructive: 0 84.2% 60.2%; 45 | --destructive-foreground: 210 40% 98%; 46 | 47 | --border: 214.3 31.8% 91.4%; 48 | --input: 214.3 31.8% 91.4%; 49 | --ring: 222.2 84% 4.9%; 50 | 51 | --radius: 0.5rem; 52 | 53 | --sidebar-background: 0 0% 98%; 54 | 55 | --sidebar-foreground: 240 5.3% 26.1%; 56 | 57 | --sidebar-primary: 240 5.9% 10%; 58 | 59 | --sidebar-primary-foreground: 0 0% 98%; 60 | 61 | --sidebar-accent: 240 4.8% 95.9%; 62 | 63 | --sidebar-accent-foreground: 240 5.9% 10%; 64 | 65 | --sidebar-border: 220 13% 91%; 66 | 67 | --sidebar-ring: 217.2 91.2% 59.8%; 68 | } 69 | 70 | .dark { 71 | --background: 222.2 84% 4.9%; 72 | --foreground: 210 40% 98%; 73 | 74 | --card: 222.2 84% 4.9%; 75 | --card-foreground: 210 40% 98%; 76 | 77 | --popover: 222.2 84% 4.9%; 78 | --popover-foreground: 210 40% 98%; 79 | 80 | --primary: 210 40% 98%; 81 | --primary-foreground: 222.2 47.4% 11.2%; 82 | 83 | --secondary: 217.2 32.6% 17.5%; 84 | --secondary-foreground: 210 40% 98%; 85 | 86 | --muted: 217.2 32.6% 17.5%; 87 | --muted-foreground: 215 20.2% 65.1%; 88 | 89 | --accent: 217.2 32.6% 17.5%; 90 | --accent-foreground: 210 40% 98%; 91 | 92 | --destructive: 0 62.8% 30.6%; 93 | --destructive-foreground: 210 40% 98%; 94 | 95 | --border: 217.2 32.6% 17.5%; 96 | --input: 217.2 32.6% 17.5%; 97 | --ring: 212.7 26.8% 83.9%; 98 | --sidebar-background: 240 5.9% 10%; 99 | --sidebar-foreground: 240 4.8% 95.9%; 100 | --sidebar-primary: 224.3 76.3% 48%; 101 | --sidebar-primary-foreground: 0 0% 100%; 102 | --sidebar-accent: 240 3.7% 15.9%; 103 | --sidebar-accent-foreground: 240 4.8% 95.9%; 104 | --sidebar-border: 240 3.7% 15.9%; 105 | --sidebar-ring: 217.2 91.2% 59.8%; 106 | } 107 | } 108 | 109 | @layer base { 110 | * { 111 | @apply border-border; 112 | } 113 | body { 114 | @apply bg-background text-foreground; 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /app/layout.tsx: -------------------------------------------------------------------------------- 1 | import "./globals.css"; 2 | 3 | import type { Metadata } from "next"; 4 | import { Inter } from "next/font/google"; 5 | 6 | const inter = Inter({ subsets: ["latin"] }); 7 | 8 | export const metadata: Metadata = { 9 | title: "Open Deep Research - Open Source AI Research Assistant", 10 | description: 11 | "Open Deep Research is a free, open-source alternative to OpenAI's Deep Research, Google's Gemini, and Anthropic's Claude Research. Powered by o3-mini, this advanced AI research assistant delivers comprehensive market analysis, competitive intelligence, and academic research capabilities. Experience enterprise-grade research automation with features like recursive exploration, multi-source validation, and structured insights extraction. Perfect for startups, researchers, and businesses seeking a transparent, customizable, and powerful research solution without vendor lock-in. Dive deep into any topic with our state-of-the-art natural language processing and automated insight generation.", 12 | }; 13 | export default function RootLayout({ 14 | children, 15 | }: { 16 | children: React.ReactNode; 17 | }) { 18 | return ( 19 | 20 | {children} 21 | 22 | ); 23 | } 24 | -------------------------------------------------------------------------------- /app/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { Chat } from '@/components/chat/chat'; 4 | import { Header } from '@/components/chat/site-header'; 5 | 6 | export default function ResearchPage() { 7 | return ( 8 |
9 |
10 | 11 |
12 | ); 13 | } 14 | -------------------------------------------------------------------------------- /components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "default", 4 | "rsc": true, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "tailwind.config.ts", 8 | "css": "app/globals.css", 9 | "baseColor": "slate", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils" 16 | } 17 | } -------------------------------------------------------------------------------- /components/chat/api-key-dialog.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useState } from "react"; 4 | import Image from "next/image"; 5 | import { 6 | LockIcon, 7 | KeyIcon, 8 | Loader2Icon, 9 | ShieldCheckIcon, 10 | GithubIcon, 11 | } from "lucide-react"; 12 | 13 | import { 14 | Dialog, 15 | DialogContent, 16 | DialogHeader, 17 | DialogTitle, 18 | DialogDescription, 19 | DialogFooter, 20 | } from "@/components/ui/dialog"; 21 | import { Input } from "@/components/ui/input"; 22 | import { Button } from "@/components/ui/button"; 23 | 24 | interface ApiKeyDialogProps { 25 | show: boolean; 26 | onClose: (open: boolean) => void; 27 | onSuccess: () => void; 28 | } 29 | 30 | export function ApiKeyDialog({ show, onClose, onSuccess }: ApiKeyDialogProps) { 31 | const [openaiKey, setOpenaiKey] = useState(""); 32 | const [firecrawlKey, setFirecrawlKey] = useState(""); 33 | const [loading, setLoading] = useState(false); 34 | 35 | const handleApiKeySubmit = async () => { 36 | if (!openaiKey || !firecrawlKey) return; 37 | setLoading(true); 38 | const res = await fetch("/api/keys", { 39 | method: "POST", 40 | headers: { "Content-Type": "application/json" }, 41 | body: JSON.stringify({ openaiKey, firecrawlKey }), 42 | }); 43 | if (res.ok) { 44 | onClose(false); 45 | onSuccess(); 46 | } 47 | setLoading(false); 48 | }; 49 | 50 | return ( 51 | 52 | 53 | 54 | 55 | Open Deep Research 56 | 57 | 58 |
59 |

60 | 61 | Secure API Key Setup 62 |

63 |

64 | To use Deep Research, you'll need to provide your API keys. 65 | These keys are stored securely using HTTP-only cookies and are 66 | never exposed to client-side JavaScript. 67 |

68 |
69 |
70 |

71 | Self-hosting option:{" "} 72 | You can clone the repository and host this application on 73 | your own infrastructure. This gives you complete control 74 | over your data and API key management. 75 |

76 | 82 | View self-hosting instructions 83 | 89 | 95 | 96 | 97 |
98 |
99 |
100 | 101 |
102 |
103 |

104 | OpenAI Logo 111 | OpenAI API Key 112 |

113 |

114 | Powers our advanced language models for research analysis and 115 | synthesis. 116 | 122 | Get your OpenAI key → 123 | 124 |

125 |
126 | 127 |
128 |

129 | 🔥 FireCrawl API Key 130 |

131 |

132 | Enables real-time web crawling and data gathering 133 | capabilities. 134 | 140 | Get your FireCrawl key → 141 | 142 |

143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 | 153 |
154 | setOpenaiKey(e.target.value)} 158 | placeholder="sk-..." 159 | className="pr-10 font-mono text-sm bg-white/50 border-zinc-200 focus:border-zinc-400 focus:ring-zinc-400 h-9 sm:h-10" 160 | /> 161 |
162 | 163 |
164 |
165 |

166 | Starts with 'sk-' and contains about 50 characters 167 |

168 |
169 | 170 |
171 | 174 |
175 | setFirecrawlKey(e.target.value)} 179 | placeholder="fc-..." 180 | className="pr-10 font-mono text-sm bg-white/50 border-zinc-200 focus:border-zinc-400 focus:ring-zinc-400 h-9 sm:h-10" 181 | /> 182 |
183 | 184 |
185 |
186 |

187 | Usually starts with 'fc-' for production keys 188 |

189 |
190 |
191 |
192 | 193 |
194 | 195 | Your keys are stored securely 196 |
197 |
198 | 204 | 205 | Get source code 206 | 207 |
208 | 223 |
224 |
225 |
226 | ); 227 | } 228 | -------------------------------------------------------------------------------- /components/chat/chat.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useState, useEffect } from "react"; 4 | import { Message } from "ai"; 5 | import { motion } from "framer-motion"; 6 | import { BrainCircuitIcon, GithubIcon, PanelRightOpen } from "lucide-react"; 7 | 8 | import { useScrollToBottom } from "@/lib/hooks/use-scroll-to-bottom"; 9 | 10 | import DownloadTxtButton from "./download-txt"; 11 | import { MultimodalInput } from "./input"; 12 | import { PreviewMessage, ProgressStep } from "./message"; 13 | import { ResearchProgress } from "./research-progress"; 14 | 15 | export function Chat({ 16 | id, 17 | initialMessages, 18 | }: { 19 | id: string; 20 | initialMessages: Message[]; 21 | }) { 22 | const [messages, setMessages] = useState(initialMessages); 23 | const [isLoading, setIsLoading] = useState(false); 24 | const [progress, setProgress] = useState([]); 25 | const [containerRef, messagesEndRef] = useScrollToBottom(); 26 | 27 | // New state to store the final report text 28 | const [finalReport, setFinalReport] = useState(null); 29 | 30 | // States for interactive feedback workflow 31 | const [stage, setStage] = useState<"initial" | "feedback" | "researching">( 32 | "initial" 33 | ); 34 | const [initialQuery, setInitialQuery] = useState(""); 35 | 36 | // Add state for mobile progress panel visibility 37 | const [showProgress, setShowProgress] = useState(false); 38 | 39 | // New state to track if we're on mobile (using 768px as breakpoint for md) 40 | const [isMobile, setIsMobile] = useState(false); 41 | useEffect(() => { 42 | const handleResize = () => { 43 | setIsMobile(window.innerWidth < 768); 44 | }; 45 | handleResize(); 46 | window.addEventListener("resize", handleResize); 47 | return () => window.removeEventListener("resize", handleResize); 48 | }, []); 49 | 50 | // Update the condition to only be true when there are actual research steps 51 | const hasStartedResearch = 52 | progress.filter( 53 | (step) => 54 | // Only count non-report steps or initial report steps 55 | step.type !== "report" || 56 | step.content.includes("Generating") || 57 | step.content.includes("Synthesizing") 58 | ).length > 0; 59 | 60 | // Helper function to call the research endpoint 61 | const sendResearchQuery = async ( 62 | query: string, 63 | config: { breadth: number; depth: number; modelId: string } 64 | ) => { 65 | try { 66 | setIsLoading(true); 67 | setProgress([]); 68 | // Inform the user that research has started 69 | setMessages((prev) => [ 70 | ...prev, 71 | { 72 | id: Date.now().toString(), 73 | role: "assistant", 74 | content: "Starting in-depth research based on your inputs...", 75 | }, 76 | ]); 77 | 78 | const response = await fetch("/api/research", { 79 | method: "POST", 80 | headers: { 81 | "Content-Type": "application/json", 82 | }, 83 | body: JSON.stringify({ 84 | query, 85 | breadth: config.breadth, 86 | depth: config.depth, 87 | modelId: config.modelId, 88 | }), 89 | }); 90 | 91 | const reader = response.body?.getReader(); 92 | if (!reader) throw new Error("No reader available"); 93 | 94 | const textDecoder = new TextDecoder(); 95 | let buffer = ""; 96 | const reportParts: string[] = []; 97 | 98 | while (true) { 99 | const { done, value } = await reader.read(); 100 | if (done) break; 101 | 102 | buffer += textDecoder.decode(value, { stream: true }); 103 | const parts = buffer.split("\n\n"); 104 | buffer = parts.pop() || ""; 105 | 106 | for (const part of parts) { 107 | if (part.startsWith("data: ")) { 108 | const jsonStr = part.substring(6).trim(); 109 | if (!jsonStr) continue; 110 | try { 111 | const event = JSON.parse(jsonStr); 112 | if (event.type === "progress") { 113 | if (event.step.type !== "report") { 114 | // Check for duplicates before adding this progress step. 115 | setProgress((prev) => { 116 | if ( 117 | prev.length > 0 && 118 | prev[prev.length - 1].content === event.step.content 119 | ) { 120 | return prev; 121 | } 122 | return [...prev, event.step]; 123 | }); 124 | } 125 | } else if (event.type === "result") { 126 | // Save the final report so we can download it later 127 | setFinalReport(event.report); 128 | setMessages((prev) => [ 129 | ...prev, 130 | { 131 | id: Date.now().toString(), 132 | role: "assistant", 133 | content: event.report, 134 | }, 135 | ]); 136 | } else if (event.type === "report_part") { 137 | reportParts.push(event.content); 138 | } 139 | } catch (e) { 140 | console.error("Error parsing event:", e); 141 | } 142 | } 143 | } 144 | } 145 | 146 | if (reportParts.length > 0) { 147 | // In case the report was sent in parts 148 | const fullReport = reportParts.join("\n"); 149 | setFinalReport(fullReport); 150 | setMessages((prev) => [ 151 | ...prev, 152 | { 153 | id: Date.now().toString(), 154 | role: "assistant", 155 | content: fullReport, 156 | }, 157 | ]); 158 | } 159 | } catch (error) { 160 | console.error("Research error:", error); 161 | setMessages((prev) => [ 162 | ...prev, 163 | { 164 | id: Date.now().toString(), 165 | role: "assistant", 166 | content: "Sorry, there was an error conducting the research.", 167 | }, 168 | ]); 169 | } finally { 170 | setIsLoading(false); 171 | } 172 | }; 173 | 174 | const handleSubmit = async ( 175 | userInput: string, 176 | config: { breadth: number; depth: number; modelId: string } 177 | ) => { 178 | if (!userInput.trim() || isLoading) return; 179 | 180 | // Add user message immediately 181 | setMessages((prev) => [ 182 | ...prev, 183 | { 184 | id: Date.now().toString(), 185 | role: "user", 186 | content: userInput, 187 | }, 188 | ]); 189 | 190 | setIsLoading(true); 191 | 192 | if (stage === "initial") { 193 | // Add thinking message only for initial query 194 | setMessages((prev) => [ 195 | ...prev, 196 | { 197 | id: "thinking", 198 | role: "assistant", 199 | content: "Thinking...", 200 | }, 201 | ]); 202 | 203 | // Handle the user's initial query 204 | setInitialQuery(userInput); 205 | 206 | try { 207 | const response = await fetch("/api/feedback", { 208 | method: "POST", 209 | headers: { "Content-Type": "application/json" }, 210 | body: JSON.stringify({ 211 | query: userInput, 212 | numQuestions: 3, 213 | modelId: config.modelId, 214 | }), 215 | }); 216 | const data = await response.json(); 217 | const questions: string[] = data.questions || []; 218 | setMessages((prev) => { 219 | const filtered = prev.filter((m) => m.id !== "thinking"); 220 | if (questions.length > 0) { 221 | const formattedQuestions = questions 222 | .map((q, index) => `${index + 1}. ${q}`) 223 | .join("\n\n"); 224 | return [ 225 | ...filtered, 226 | { 227 | id: Date.now().toString(), 228 | role: "assistant", 229 | content: `Please answer the following follow-up questions to help clarify your research:\n\n${formattedQuestions}`, 230 | }, 231 | ]; 232 | } 233 | return filtered; 234 | }); 235 | setStage("feedback"); 236 | } catch (error) { 237 | console.error("Feedback generation error:", error); 238 | setMessages((prev) => [ 239 | ...prev.filter((m) => m.id !== "thinking"), 240 | { 241 | id: Date.now().toString(), 242 | role: "assistant", 243 | content: "Sorry, there was an error generating feedback questions.", 244 | }, 245 | ]); 246 | } finally { 247 | setIsLoading(false); 248 | } 249 | } else if (stage === "feedback") { 250 | // In feedback stage, combine the initial query and follow-up answers 251 | const combined = `Initial Query: ${initialQuery}\nFollow-up Answers:\n${userInput}`; 252 | setStage("researching"); 253 | try { 254 | await sendResearchQuery(combined, config); 255 | } finally { 256 | setIsLoading(false); 257 | // Reset the stage so further messages will be processed 258 | setStage("initial"); 259 | // Inform the user that a new research session can be started 260 | setMessages((prev) => [ 261 | ...prev, 262 | { 263 | id: Date.now().toString(), 264 | role: "assistant", 265 | content: 266 | "Research session complete. You can now ask another question to begin a new research session.", 267 | }, 268 | ]); 269 | } 270 | } 271 | }; 272 | 273 | return ( 274 |
275 | {/* Main container with dynamic width */} 276 | 287 | {/* Messages Container */} 288 |
294 | {/* Welcome Message (if no research started and no messages) */} 295 | {!hasStartedResearch && messages.length === 0 && ( 296 |
297 | 313 | 325 | 338 | 339 | 340 | 341 |
342 | 349 | Open Deep Research 350 | 351 | 352 | 358 | An open source alternative to OpenAI and Gemini's deep 359 | research capabilities. Ask any question to generate a 360 | comprehensive report. 361 | 362 | 363 | 369 | 379 | 380 | View source code 381 | 382 | 383 |
384 |
385 |
386 | )} 387 | 388 | {/* Messages */} 389 |
390 | {messages.map((message) => ( 391 | 392 | ))} 393 |
394 | {finalReport && ( 395 |
396 | 397 |
398 | )} 399 |
400 |
401 | 402 | {/* Input - Fixed to bottom */} 403 |
404 |
405 | 416 |
417 |
418 | 419 | 420 | {/* Research Progress Panel */} 421 | 434 | 435 | 436 | 437 | {/* Mobile Toggle Button - Only show when research has started */} 438 | {hasStartedResearch && ( 439 | 459 | )} 460 |
461 | ); 462 | } 463 | -------------------------------------------------------------------------------- /components/chat/download-txt.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { DownloadIcon } from 'lucide-react'; 3 | 4 | interface DownloadTxtButtonProps { 5 | reportText: string; 6 | fileName?: string; 7 | } 8 | 9 | const DownloadTxtButton: React.FC = ({ 10 | reportText, 11 | fileName = 'research_report.txt', 12 | }) => { 13 | const handleDownload = () => { 14 | // Create a blob from the report text content. 15 | const blob = new Blob([reportText], { type: 'text/plain;charset=utf-8' }); 16 | // Create a temporary URL for the blob. 17 | const url = window.URL.createObjectURL(blob); 18 | // Create a temporary anchor element. 19 | const link = document.createElement('a'); 20 | link.href = url; 21 | link.download = fileName; 22 | // Append the link, trigger click, remove it, and revoke the URL. 23 | document.body.appendChild(link); 24 | link.click(); 25 | document.body.removeChild(link); 26 | window.URL.revokeObjectURL(url); 27 | }; 28 | 29 | return ( 30 | 64 | ); 65 | }; 66 | 67 | export default DownloadTxtButton; 68 | -------------------------------------------------------------------------------- /components/chat/input.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useEffect, useRef, useState } from "react"; 4 | import Image from "next/image"; 5 | import cx from "classnames"; 6 | import { motion } from "framer-motion"; 7 | import { 8 | ArrowUpIcon, 9 | CheckCircleIcon, 10 | ChevronDown, 11 | DownloadIcon, 12 | Settings2, 13 | XCircleIcon, 14 | } from "lucide-react"; 15 | 16 | import { Button } from "@/components/ui/button"; 17 | import { Slider } from "@/components/ui/slider"; 18 | import { 19 | availableModels, 20 | type AIModelDisplayInfo, 21 | } from "@/lib/deep-research/ai/providers"; 22 | import { ApiKeyDialog } from "@/components/chat/api-key-dialog"; 23 | 24 | interface MultimodalInputProps { 25 | onSubmit: ( 26 | input: string, 27 | config: { 28 | breadth: number; 29 | depth: number; 30 | modelId: string; 31 | } 32 | ) => void; 33 | isLoading: boolean; 34 | placeholder?: string; 35 | isAuthenticated?: boolean; 36 | onDownload?: () => void; 37 | canDownload?: boolean; 38 | } 39 | 40 | export function MultimodalInput({ 41 | onSubmit, 42 | isLoading, 43 | placeholder = "What would you like to research?", 44 | onDownload, 45 | canDownload = false, 46 | }: MultimodalInputProps) { 47 | const [input, setInput] = useState(""); 48 | const [breadth, setBreadth] = useState(4); 49 | const [depth, setDepth] = useState(2); 50 | const [selectedModel, setSelectedModel] = useState( 51 | availableModels.find((model) => model.id === "o3-mini") || 52 | availableModels[0] 53 | ); 54 | const [isModelDropdownOpen, setIsModelDropdownOpen] = useState(false); 55 | const [showApiKeyPrompt, setShowApiKeyPrompt] = useState(false); 56 | const [hasKeys, setHasKeys] = useState(false); 57 | const textareaRef = useRef(null); 58 | 59 | // Read the feature flag from environment variables. 60 | const enableApiKeys = process.env.NEXT_PUBLIC_ENABLE_API_KEYS === "true"; 61 | // When API keys are disabled via env flag, always consider keys as present. 62 | const effectiveHasKeys = enableApiKeys ? hasKeys : true; 63 | 64 | // Check for keys using the consolidated endpoint 65 | useEffect(() => { 66 | const checkKeys = async () => { 67 | const res = await fetch("/api/keys"); 68 | const data = await res.json(); 69 | setHasKeys(data.keysPresent); 70 | if (!data.keysPresent && enableApiKeys) { 71 | setShowApiKeyPrompt(true); 72 | } else { 73 | setShowApiKeyPrompt(false); 74 | } 75 | }; 76 | checkKeys(); 77 | }, [enableApiKeys]); 78 | 79 | // New: Remove API keys handler 80 | const handleRemoveKeys = async () => { 81 | if (!window.confirm("Are you sure you want to remove your API keys?")) 82 | return; 83 | try { 84 | const res = await fetch("/api/keys", { 85 | method: "DELETE", 86 | }); 87 | if (res.ok) { 88 | setHasKeys(false); 89 | } 90 | } catch (error) { 91 | console.error("Error removing keys:", error); 92 | } 93 | }; 94 | 95 | const handleSubmit = () => { 96 | if (!input.trim() || isLoading) return; 97 | if (enableApiKeys && !effectiveHasKeys) { 98 | // Re-open the API key modal if keys are missing 99 | setShowApiKeyPrompt(true); 100 | return; 101 | } 102 | onSubmit(input, { 103 | breadth, 104 | depth, 105 | modelId: selectedModel.id, 106 | }); 107 | setInput(""); 108 | }; 109 | 110 | useEffect(() => { 111 | if (textareaRef.current) { 112 | textareaRef.current.style.height = "inherit"; 113 | textareaRef.current.style.height = `${textareaRef.current.scrollHeight}px`; 114 | } 115 | }, [input]); 116 | 117 | const DownloadButton = () => ( 118 | 127 | ); 128 | 129 | return ( 130 |
131 | {/* Conditionally render API key dialog only if enabled */} 132 | {enableApiKeys && ( 133 | { 137 | setShowApiKeyPrompt(false); 138 | setHasKeys(true); 139 | }} 140 | /> 141 | )} 142 | 143 |