├── .env.example
├── .gitignore
├── .nvmrc
├── .prettierignore
├── LICENSE
├── README.md
├── app
├── PostHogPageView.tsx
├── api
│ └── research
│ │ └── route.ts
├── favicon.ico
├── globals.css
├── layout.tsx
├── opengraph-image.png
├── page.tsx
├── posthog.ts
├── providers.tsx
└── twitter-image.png
├── components.json
├── components
├── chat
│ ├── api-key-dialog.tsx
│ ├── chat.tsx
│ ├── download-txt.tsx
│ ├── input.tsx
│ ├── markdown.tsx
│ ├── message.tsx
│ └── research-progress.tsx
├── site-header.tsx
├── theme-provider.tsx
└── ui
│ ├── button.tsx
│ ├── dialog.tsx
│ ├── input.tsx
│ ├── slider.tsx
│ └── tooltip.tsx
├── lib
├── constants.ts
├── deep-research
│ ├── ai
│ │ ├── providers.ts
│ │ ├── text-splitter.test.ts
│ │ └── text-splitter.ts
│ ├── deep-research.ts
│ ├── feedback.ts
│ ├── index.ts
│ └── prompt.ts
├── hooks
│ └── use-scroll-to-bottom.ts
└── utils.ts
├── middleware.ts
├── next-env.d.ts
├── next.config.ts
├── package.json
├── pnpm-lock.yaml
├── postcss.config.js
├── public
├── logo.png
└── providers
│ └── openai.webp
├── supabase
├── .gitignore
├── config.toml
├── functions
│ ├── _shared
│ │ ├── cors.ts
│ │ ├── feedback.ts
│ │ └── types.ts
│ ├── feedback
│ │ └── index.ts
│ ├── keys
│ │ └── index.ts
│ └── research
│ │ ├── deep-research
│ │ ├── ai
│ │ │ ├── providers.ts
│ │ │ ├── text-splitter.test.ts
│ │ │ └── text-splitter.ts
│ │ ├── deep-research.ts
│ │ ├── feedback.ts
│ │ ├── index.ts
│ │ └── prompt.ts
│ │ └── index.ts
└── seed.sql
├── tailwind.config.ts
└── tsconfig.json
/.env.example:
--------------------------------------------------------------------------------
1 |
2 | #### AI API KEYS
3 | OPENAI_API_KEY=your-openai-api-key
4 | FIRECRAWL_KEY=your-firecrawl-api-key
5 | NEXT_PUBLIC_ENABLE_API_KEYS=false
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # Output files
4 | output.md
5 |
6 | # Dependencies
7 | node_modules
8 | .pnp
9 | .pnp.js
10 |
11 | # Local env files
12 | .env
13 | .env.local
14 | .env.development.local
15 | .env.test.local
16 | .env.production.local
17 |
18 | # Testing
19 | coverage
20 |
21 | # Turbo
22 | .turbo
23 |
24 | # Vercel
25 | .vercel
26 |
27 | # Build Outputs
28 | .next/
29 | out/
30 | build
31 | dist
32 |
33 | .open-next
34 |
35 | # Debug
36 | npm-debug.log*
37 | yarn-debug.log*
38 | yarn-error.log*
39 |
40 | # Misc
41 | .DS_Store
42 | *.pem
43 |
44 |
45 | # IDE/Editor specific
46 | .idea/
47 | .vscode/
48 | *.swp
49 | *.swo
50 |
--------------------------------------------------------------------------------
/.nvmrc:
--------------------------------------------------------------------------------
1 | 20.10.0
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | *.hbs
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 David Zhang
4 | Copyright (c) 2025 Fekri
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Supa Deep Research
2 |
3 | This app was created by the [Supavec team](https://www.supavec.com), based on [open-deep-research](https://github.com/fdarkaou/open-deep-research).
4 |
5 | I wanted to run this app on affordable + OSS stack, which is Supabase.
6 |
7 | There're 3 main APIs:
8 | - /api/feedback: Supabase Edge Functions
9 | - /api/keys: Supabase Edge Functions
10 | - /api/research: (Still) Vercel
11 |
12 | I really want to move `/api/research` to Supabase Edge Functions, but the CPU time exceeded error prevents me from doing so.
13 |
14 | Hopefully soon!
15 |
16 | (`/supabase/functions/api/research` does work on your local tho with `$ supabase functions serve research --no-verify-jwt`)
17 |
18 | Also, I tried to [host this app on Cloudflare Workers](https://x.com/martindonadieu/status/1889630161819074988), but it was too slow. A simple API call takes about 10 secs (sry I'm a CF noob).
19 |
20 | ## Overview
21 |
22 | Supa Deep Research Web UI is an AI-powered research assistant that transforms the original CLI tool into a modern web interface using Next.js and shadcn/ui. Try it out at [supa-deep-research.com](https://www.supa-deep-research.com) with your own API keys, or host it yourself.
23 |
24 | The system combines search engines (via FireCrawl), web scraping, and language models (via OpenAI) to perform deep research on any topic. Key features include:
25 |
26 | - **Intelligent Research Process:**
27 |
28 | - Performs iterative research by recursively exploring topics in depth
29 | - Uses LLMs to generate targeted search queries based on research goals
30 | - Creates follow-up questions to better understand research needs
31 | - Processes multiple searches and results in parallel for efficiency
32 | - Configurable depth and breadth parameters to control research scope
33 |
34 | - **Research Output:**
35 |
36 | - Produces detailed markdown reports with findings and sources
37 | - Real-time progress tracking of research steps
38 | - Built-in markdown viewer for reviewing results
39 | - Downloadable research reports
40 |
41 | - **Modern Interface:**
42 | - Interactive controls for adjusting research parameters
43 | - Visual feedback for ongoing research progress
44 | - HTTP-only cookie storage for API keys
45 |
46 | The system maintains the core research capabilities of the original CLI while providing an intuitive visual interface for controlling and monitoring the research process.
47 |
48 |
49 | ### Installation
50 |
51 | 1. **Clone and Install**
52 |
53 | ```bash
54 | git clone https://github.com/taishikato/supa-deep-research.git
55 | cd open-deep-research
56 | npm install
57 | ```
58 |
59 | 2. **Configure Environment**
60 |
61 | Create `.env.local` and add:
62 |
63 | ```bash
64 | OPENAI_API_KEY=your-openai-api-key
65 | FIRECRAWL_KEY=your-firecrawl-api-key
66 | NEXT_PUBLIC_ENABLE_API_KEYS=false # Set to false to disable API key dialog
67 | ```
68 |
69 | 3. **Run the App**
70 | ```bash
71 | npm run dev
72 | ```
73 | Visit [http://localhost:3000](http://localhost:3000)
74 |
75 | ## API Key Management
76 |
77 | By default (`NEXT_PUBLIC_ENABLE_API_KEYS=true`), the app includes an API key input dialog that allows users to try out the research assistant directly in their browser using their own API keys. Keys are stored securely in HTTP-only cookies and are never exposed to client-side JavaScript.
78 |
79 | For your own deployment, you can disable this dialog by setting `NEXT_PUBLIC_ENABLE_API_KEYS=false` and configure the API keys directly in your `.env.local` file instead.
80 |
81 | ## License
82 |
83 | MIT License. Feel free to use and modify the code for your own projects as you wish.
84 |
85 | ## Acknowledgements
86 |
87 | - **Original CLI:** [dzhng/deep-research](https://github.com/dzhng/deep-research)
88 | - **Original Web UI:** [Open Deep Research](https://anotherwrapper.com/open-deep-research)
89 | - **Tools:** Next.js, shadcn/ui, Vercel AI SDK, Supabase
90 |
91 | Happy researching!
92 |
--------------------------------------------------------------------------------
/app/PostHogPageView.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { usePathname, useSearchParams } from "next/navigation";
4 | import { useEffect, Suspense } from "react";
5 | import { usePostHog } from "posthog-js/react";
6 |
7 | function PostHogPageView(): null {
8 | const pathname = usePathname();
9 | const searchParams = useSearchParams();
10 | const posthog = usePostHog();
11 |
12 | // Track pageviews
13 | useEffect(() => {
14 | if (pathname && posthog) {
15 | let url = window.origin + pathname;
16 | if (searchParams.toString()) {
17 | url = url + `?${searchParams.toString()}`;
18 | }
19 |
20 | posthog.capture("$pageview", { $current_url: url });
21 | }
22 | }, [pathname, searchParams, posthog]);
23 |
24 | return null;
25 | }
26 |
27 | // Wrap this in Suspense to avoid the `useSearchParams` usage above
28 | // from de-opting the whole app into client-side rendering
29 | // See: https://nextjs.org/docs/messages/deopted-into-client-rendering
30 | export function SuspendedPostHogPageView() {
31 | return (
32 |
33 |
34 |
35 | );
36 | }
37 |
--------------------------------------------------------------------------------
/app/api/research/route.ts:
--------------------------------------------------------------------------------
1 | import { NextRequest } from "next/server";
2 |
3 | export const maxDuration = 300;
4 |
5 | import {
6 | deepResearch,
7 | generateFeedback,
8 | writeFinalReport,
9 | } from "@/lib/deep-research";
10 | import { type AIModel, createModel } from "@/lib/deep-research/ai/providers";
11 | import PostHogClient from "@/app/posthog";
12 |
13 | export async function POST(req: NextRequest) {
14 | const posthog = PostHogClient();
15 |
16 | posthog.capture({
17 | distinctId: req.headers.get("x-forwarded-for") ?? "unknown",
18 | event: "Research started",
19 | });
20 |
21 | try {
22 | const {
23 | query,
24 | breadth = 3,
25 | depth = 2,
26 | modelId = "o3-mini",
27 | } = await req.json();
28 |
29 | // Retrieve API keys from secure cookies
30 | const openaiKey = req.cookies.get("openai-key")?.value;
31 | const firecrawlKey = req.cookies.get("firecrawl-key")?.value;
32 |
33 | // Add API key validation
34 | if (process.env.NEXT_PUBLIC_ENABLE_API_KEYS === "true") {
35 | if (!openaiKey || !firecrawlKey) {
36 | return Response.json(
37 | { error: "API keys are required but not provided" },
38 | { status: 401 },
39 | );
40 | }
41 | }
42 |
43 | console.log("\n🔬 [RESEARCH ROUTE] === Request Started ===");
44 | console.log("Query:", query);
45 | console.log("Model ID:", modelId);
46 | console.log("Configuration:", {
47 | breadth,
48 | depth,
49 | });
50 | console.log("API Keys Present:", {
51 | OpenAI: openaiKey ? "✅" : "❌",
52 | FireCrawl: firecrawlKey ? "✅" : "❌",
53 | });
54 |
55 | try {
56 | const model = createModel(modelId as AIModel, openaiKey);
57 | console.log("\n🤖 [RESEARCH ROUTE] === Model Created ===");
58 | console.log("Using Model:", modelId);
59 |
60 | // Create a ReadableStream with a custom controller
61 | const stream = new ReadableStream({
62 | start: async (controller) => {
63 | const encoder = new TextEncoder();
64 |
65 | const writeToStream = async (data: any) => {
66 | try {
67 | const encodedData = encoder.encode(
68 | `data: ${JSON.stringify(data)}\n\n`,
69 | );
70 | controller.enqueue(encodedData);
71 | } catch (error) {
72 | console.error("Stream write error:", error);
73 | }
74 | };
75 |
76 | try {
77 | console.log("\n🚀 [RESEARCH ROUTE] === Research Started ===");
78 |
79 | const feedbackQuestions = await generateFeedback({
80 | query,
81 | apiKey: openaiKey,
82 | modelId,
83 | });
84 | console.log("before writeToStream");
85 |
86 | await writeToStream({
87 | type: "progress",
88 | step: {
89 | type: "query",
90 | content: "Generated feedback questions",
91 | },
92 | });
93 | console.log("after writeToStream");
94 |
95 | const { learnings, visitedUrls } = await deepResearch({
96 | query,
97 | breadth,
98 | depth,
99 | model,
100 | firecrawlKey,
101 | onProgress: async (update: string) => {
102 | console.log("\n📊 [RESEARCH ROUTE] Progress Update:", update);
103 | await writeToStream({
104 | type: "progress",
105 | step: {
106 | type: "research",
107 | content: update,
108 | },
109 | });
110 | },
111 | });
112 |
113 | console.log("\n✅ [RESEARCH ROUTE] === Research Completed ===");
114 | console.log("Learnings Count:", learnings.length);
115 | console.log("Visited URLs Count:", visitedUrls.length);
116 |
117 | const report = await writeFinalReport({
118 | prompt: query,
119 | learnings,
120 | visitedUrls,
121 | model,
122 | });
123 |
124 | await writeToStream({
125 | type: "result",
126 | feedbackQuestions,
127 | learnings,
128 | visitedUrls,
129 | report,
130 | });
131 | } catch (error) {
132 | console.error(
133 | "\n❌ [RESEARCH ROUTE] === Research Process Error ===",
134 | );
135 | console.error("Error:", error);
136 | await writeToStream({
137 | type: "error",
138 | message: "Research failed",
139 | });
140 | } finally {
141 | controller.close();
142 | }
143 | },
144 | });
145 |
146 | return new Response(stream, {
147 | headers: {
148 | "Content-Type": "text/event-stream",
149 | "Cache-Control": "no-cache",
150 | Connection: "keep-alive",
151 | },
152 | });
153 | } catch (error) {
154 | console.error("\n💥 [RESEARCH ROUTE] === Route Error ===");
155 | console.error("Error:", error);
156 | return Response.json({ error: "Research failed" }, { status: 500 });
157 | }
158 | } catch (error) {
159 | console.error("\n💥 [RESEARCH ROUTE] === Parse Error ===");
160 | console.error("Error:", error);
161 | return Response.json({ error: "Research failed" }, { status: 500 });
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/supavec/supa-deep-research/8a51cdf82e0e04cf51cfc0c28854a2701ee6a456/app/favicon.ico
--------------------------------------------------------------------------------
/app/globals.css:
--------------------------------------------------------------------------------
1 | /* @import url("https://fonts.googleapis.com/css2?family=Poetsen+One&display=swap"); */
2 |
3 | @tailwind base;
4 | @tailwind components;
5 | @tailwind utilities;
6 |
7 | /* Hide scroll bar */
8 | @layer utilities {
9 | /* Hide scrollbar for Chrome, Safari and Opera */
10 | .no-scrollbar::-webkit-scrollbar {
11 | display: none;
12 | }
13 |
14 | /* Hide scrollbar for IE, Edge and Firefox */
15 | .no-scrollbar {
16 | -ms-overflow-style: none; /* IE and Edge */
17 | scrollbar-width: none; /* Firefox */
18 | }
19 | }
20 |
21 | @layer base {
22 | :root {
23 | --background: 0 0% 100%;
24 | --foreground: 222.2 84% 4.9%;
25 |
26 | --card: 0 0% 100%;
27 | --card-foreground: 222.2 84% 4.9%;
28 |
29 | --popover: 0 0% 100%;
30 | --popover-foreground: 222.2 84% 4.9%;
31 |
32 | --primary: 222.2 47.4% 11.2%;
33 | --primary-foreground: 210 40% 98%;
34 |
35 | --secondary: 210 40% 96.1%;
36 | --secondary-foreground: 222.2 47.4% 11.2%;
37 |
38 | --muted: 210 40% 96.1%;
39 | --muted-foreground: 215.4 16.3% 46.9%;
40 |
41 | --accent: 0 0% 9%; /* #171717 */
42 | --accent-foreground: 0 0% 100%; /* White text */
43 |
44 | --destructive: 0 84.2% 60.2%;
45 | --destructive-foreground: 210 40% 98%;
46 |
47 | --border: 214.3 31.8% 91.4%;
48 | --input: 214.3 31.8% 91.4%;
49 | --ring: 222.2 84% 4.9%;
50 |
51 | --radius: 0.5rem;
52 |
53 | --sidebar-background: 0 0% 98%;
54 |
55 | --sidebar-foreground: 240 5.3% 26.1%;
56 |
57 | --sidebar-primary: 240 5.9% 10%;
58 |
59 | --sidebar-primary-foreground: 0 0% 98%;
60 |
61 | --sidebar-accent: 240 4.8% 95.9%;
62 |
63 | --sidebar-accent-foreground: 240 5.9% 10%;
64 |
65 | --sidebar-border: 220 13% 91%;
66 |
67 | --sidebar-ring: 217.2 91.2% 59.8%;
68 | }
69 |
70 | .dark {
71 | --background: 222.2 84% 4.9%;
72 | --foreground: 210 40% 98%;
73 |
74 | --card: 222.2 84% 4.9%;
75 | --card-foreground: 210 40% 98%;
76 |
77 | --popover: 222.2 84% 4.9%;
78 | --popover-foreground: 210 40% 98%;
79 |
80 | --primary: 210 40% 98%;
81 | --primary-foreground: 222.2 47.4% 11.2%;
82 |
83 | --secondary: 217.2 32.6% 17.5%;
84 | --secondary-foreground: 210 40% 98%;
85 |
86 | --muted: 217.2 32.6% 17.5%;
87 | --muted-foreground: 215 20.2% 65.1%;
88 |
89 | --accent: 217.2 32.6% 17.5%;
90 | --accent-foreground: 210 40% 98%;
91 |
92 | --destructive: 0 62.8% 30.6%;
93 | --destructive-foreground: 210 40% 98%;
94 |
95 | --border: 217.2 32.6% 17.5%;
96 | --input: 217.2 32.6% 17.5%;
97 | --ring: 212.7 26.8% 83.9%;
98 | --sidebar-background: 240 5.9% 10%;
99 | --sidebar-foreground: 240 4.8% 95.9%;
100 | --sidebar-primary: 224.3 76.3% 48%;
101 | --sidebar-primary-foreground: 0 0% 100%;
102 | --sidebar-accent: 240 3.7% 15.9%;
103 | --sidebar-accent-foreground: 240 4.8% 95.9%;
104 | --sidebar-border: 240 3.7% 15.9%;
105 | --sidebar-ring: 217.2 91.2% 59.8%;
106 | }
107 | }
108 |
109 | @layer base {
110 | * {
111 | @apply border-border;
112 | }
113 | body {
114 | @apply bg-background text-foreground;
115 | }
116 | }
117 |
--------------------------------------------------------------------------------
/app/layout.tsx:
--------------------------------------------------------------------------------
1 | import "./globals.css";
2 |
3 | import type { Metadata } from "next";
4 | import { ThemeProvider } from "@/components/theme-provider";
5 | import { APP_NAME, APP_DESCRIPTION } from "@/lib/constants";
6 | import { GeistMono } from "geist/font/mono";
7 | import { GeistSans } from "geist/font/sans";
8 | import { CSPostHogProvider } from "./providers";
9 | import { GoogleAnalytics } from "@next/third-parties/google";
10 |
11 | export const metadata: Metadata = {
12 | title: APP_NAME,
13 | description: APP_DESCRIPTION,
14 | };
15 |
16 | export default function RootLayout({
17 | children,
18 | }: {
19 | children: React.ReactNode;
20 | }) {
21 | return (
22 |
27 |
29 |
34 | {children}
35 |
36 |
37 |
38 |
39 | );
40 | }
41 |
--------------------------------------------------------------------------------
/app/opengraph-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/supavec/supa-deep-research/8a51cdf82e0e04cf51cfc0c28854a2701ee6a456/app/opengraph-image.png
--------------------------------------------------------------------------------
/app/page.tsx:
--------------------------------------------------------------------------------
1 | import { Chat } from "@/components/chat/chat";
2 | import { Header } from "@/components/site-header";
3 |
4 | export default function ResearchPage() {
5 | return (
6 |
7 |
8 |
9 |
10 |
11 |
12 | );
13 | }
14 |
--------------------------------------------------------------------------------
/app/posthog.ts:
--------------------------------------------------------------------------------
1 | import { PostHog } from "posthog-node";
2 |
3 | export default function PostHogClient() {
4 | const posthogClient = new PostHog(process.env.NEXT_PUBLIC_POSTHOG_KEY!, {
5 | host: process.env.NEXT_PUBLIC_POSTHOG_HOST!,
6 | flushAt: 1,
7 | flushInterval: 0,
8 | });
9 | return posthogClient;
10 | }
11 |
--------------------------------------------------------------------------------
/app/providers.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import posthog from "posthog-js";
4 | import { PostHogProvider } from "posthog-js/react";
5 | import { SuspendedPostHogPageView } from "./PostHogPageView";
6 |
7 | if (typeof window !== "undefined") {
8 | posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY as string, {
9 | api_host: "/ingest",
10 | ui_host: "https://us.posthog.com",
11 | person_profiles: "identified_only",
12 | });
13 | }
14 | export function CSPostHogProvider({ children }: { children: React.ReactNode }) {
15 | return (
16 |
17 |
18 | {children}
19 |
20 | );
21 | }
22 |
--------------------------------------------------------------------------------
/app/twitter-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/supavec/supa-deep-research/8a51cdf82e0e04cf51cfc0c28854a2701ee6a456/app/twitter-image.png
--------------------------------------------------------------------------------
/components.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://ui.shadcn.com/schema.json",
3 | "style": "default",
4 | "rsc": true,
5 | "tsx": true,
6 | "tailwind": {
7 | "config": "tailwind.config.ts",
8 | "css": "app/globals.css",
9 | "baseColor": "slate",
10 | "cssVariables": true,
11 | "prefix": ""
12 | },
13 | "aliases": {
14 | "components": "@/components",
15 | "utils": "@/lib/utils"
16 | }
17 | }
--------------------------------------------------------------------------------
/components/chat/api-key-dialog.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState } from "react";
4 | import Image from "next/image";
5 | import {
6 | LockIcon,
7 | KeyIcon,
8 | Loader2Icon,
9 | ShieldCheckIcon,
10 | GithubIcon,
11 | } from "lucide-react";
12 |
13 | import {
14 | Dialog,
15 | DialogContent,
16 | DialogHeader,
17 | DialogTitle,
18 | DialogDescription,
19 | DialogFooter,
20 | } from "@/components/ui/dialog";
21 | import { Input } from "@/components/ui/input";
22 | import { Button } from "@/components/ui/button";
23 | import { APP_NAME } from "@/lib/constants";
24 |
25 | interface ApiKeyDialogProps {
26 | show: boolean;
27 | onClose: (open: boolean) => void;
28 | onSuccess: () => void;
29 | }
30 |
31 | export function ApiKeyDialog({ show, onClose, onSuccess }: ApiKeyDialogProps) {
32 | const [openaiKey, setOpenaiKey] = useState("");
33 | const [firecrawlKey, setFirecrawlKey] = useState("");
34 | const [loading, setLoading] = useState(false);
35 |
36 | const handleApiKeySubmit = async () => {
37 | if (!openaiKey || !firecrawlKey) return;
38 | setLoading(true);
39 | const res = await fetch("/api/keys", {
40 | method: "POST",
41 | headers: { "Content-Type": "application/json" },
42 | body: JSON.stringify({ openaiKey, firecrawlKey }),
43 | });
44 | if (res.ok) {
45 | onClose(false);
46 | onSuccess();
47 | }
48 | setLoading(false);
49 | };
50 |
51 | return (
52 |
53 |
54 |
55 |
56 | {APP_NAME}
57 |
58 |
59 |
60 |
61 |
62 |
63 | Secure API Key Setup
64 |
65 |
66 | To use Deep Research, you'll need to provide your API keys.
67 | These keys are stored securely using HTTP-only cookies and are
68 | never exposed to client-side JavaScript.
69 |
70 |
71 |
72 |
73 | Self-hosting option: {" "}
74 | You can clone the repository and host this application on
75 | your own infrastructure. This gives you complete control
76 | over your data and API key management.
77 |
78 |
84 | View self-hosting instructions
85 |
91 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
113 | OpenAI API Key
114 |
115 |
116 | Powers our advanced language models for research analysis
117 | and synthesis.
118 |
124 | Get your OpenAI key →
125 |
126 |
127 |
128 |
129 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 | OpenAI API Key
155 |
156 |
157 |
setOpenaiKey(e.target.value)}
161 | placeholder="sk-..."
162 | className="pr-10 font-mono text-sm bg-background/50 border-border focus:border-primary focus:ring-primary h-9 sm:h-10"
163 | />
164 |
165 |
166 |
167 |
168 |
169 | Starts with 'sk-' and contains about 50 characters
170 |
171 |
172 |
173 |
174 |
175 | FireCrawl API Key
176 |
177 |
178 |
setFirecrawlKey(e.target.value)}
182 | placeholder="fc-..."
183 | className="pr-10 font-mono text-sm bg-background/50 border-border focus:border-primary focus:ring-primary h-9 sm:h-10"
184 | />
185 |
186 |
187 |
188 |
189 |
190 | Usually starts with 'fc-' for production keys
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 | Your keys are stored securely
199 |
200 |
211 |
217 | {loading ? (
218 |
219 |
220 | Setting up...
221 |
222 | ) : (
223 | "Start Researching"
224 | )}
225 |
226 |
227 |
228 |
229 | );
230 | }
231 |
--------------------------------------------------------------------------------
/components/chat/chat.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState, useEffect } from "react";
4 | import { type Message } from "ai";
5 | import { motion } from "framer-motion";
6 | import { GithubIcon, PanelRightOpen } from "lucide-react";
7 |
8 | import { useScrollToBottom } from "@/lib/hooks/use-scroll-to-bottom";
9 |
10 | import DownloadTxtButton from "./download-txt";
11 | import { MultimodalInput } from "./input";
12 | import { PreviewMessage, ProgressStep } from "./message";
13 | import { ResearchProgress } from "./research-progress";
14 |
15 | export function Chat({
16 | id,
17 | initialMessages,
18 | }: {
19 | id: string;
20 | initialMessages: Message[];
21 | }) {
22 | const [messages, setMessages] = useState(initialMessages);
23 | const [isLoading, setIsLoading] = useState(false);
24 | const [progress, setProgress] = useState([]);
25 | const [containerRef, messagesEndRef] = useScrollToBottom();
26 |
27 | // New state to store the final report text
28 | const [finalReport, setFinalReport] = useState(null);
29 |
30 | // States for interactive feedback workflow
31 | const [stage, setStage] = useState<"initial" | "feedback" | "researching">(
32 | "initial"
33 | );
34 | const [initialQuery, setInitialQuery] = useState("");
35 |
36 | // Add state for mobile progress panel visibility
37 | const [showProgress, setShowProgress] = useState(false);
38 |
39 | // New state to track if we're on mobile (using 768px as breakpoint for md)
40 | const [isMobile, setIsMobile] = useState(false);
41 | useEffect(() => {
42 | const handleResize = () => {
43 | setIsMobile(window.innerWidth < 768);
44 | };
45 | handleResize();
46 | window.addEventListener("resize", handleResize);
47 | return () => window.removeEventListener("resize", handleResize);
48 | }, []);
49 |
50 | // Update the condition to only be true when there are actual research steps
51 | const hasStartedResearch =
52 | progress.filter(
53 | (step) =>
54 | // Only count non-report steps or initial report steps
55 | step.type !== "report" ||
56 | step.content.includes("Generating") ||
57 | step.content.includes("Synthesizing")
58 | ).length > 0;
59 |
60 | // Helper function to call the research endpoint
61 | const sendResearchQuery = async (
62 | query: string,
63 | config: { breadth: number; depth: number; modelId: string }
64 | ) => {
65 | try {
66 | setIsLoading(true);
67 | setProgress([]);
68 |
69 | // Create the EventSource for SSE
70 | const response = await fetch("/api/research", {
71 | method: "POST",
72 | headers: {
73 | "Content-Type": "application/json",
74 | Accept: "text/event-stream",
75 | },
76 | body: JSON.stringify({
77 | query,
78 | breadth: config.breadth,
79 | depth: config.depth,
80 | modelId: config.modelId,
81 | }),
82 | });
83 |
84 | if (!response.ok) {
85 | throw new Error(`HTTP error! status: ${response.status}`);
86 | }
87 |
88 | const reader = response.body?.getReader();
89 | if (!reader) throw new Error("No reader available");
90 |
91 | const decoder = new TextDecoder();
92 | let buffer = "";
93 |
94 | while (true) {
95 | const { value, done } = await reader.read();
96 | if (done) break;
97 |
98 | buffer += decoder.decode(value, { stream: true });
99 |
100 | // Process complete messages
101 | const messages = buffer.split("\n\n");
102 | buffer = messages.pop() || ""; // Keep the last incomplete message in buffer
103 |
104 | for (const message of messages) {
105 | const lines = message.split("\n");
106 | const dataLine = lines.find((line) => line.startsWith("data: "));
107 | if (!dataLine) continue;
108 |
109 | try {
110 | const event = JSON.parse(dataLine.slice(6));
111 | console.log("Received event:", event); // Debug log
112 |
113 | switch (event.type) {
114 | case "connected":
115 | console.log("SSE Connection established");
116 | break;
117 |
118 | case "progress":
119 | setProgress((prev) => {
120 | // Avoid duplicate progress updates
121 | if (
122 | prev.length > 0 &&
123 | prev[prev.length - 1].content === event.step.content
124 | ) {
125 | return prev;
126 | }
127 | return [...prev, event.step];
128 | });
129 | break;
130 |
131 | case "result":
132 | setFinalReport(event.report);
133 | setMessages((prev) => [
134 | ...prev,
135 | {
136 | id: crypto.randomUUID(),
137 | role: "assistant",
138 | content: event.report,
139 | },
140 | ]);
141 | break;
142 |
143 | case "error":
144 | throw new Error(event.message);
145 | }
146 | } catch (e) {
147 | console.error("Error parsing SSE message:", e);
148 | }
149 | }
150 | }
151 | } catch (error) {
152 | console.error("Research error:", error);
153 | setMessages((prev) => [
154 | ...prev,
155 | {
156 | id: crypto.randomUUID(),
157 | role: "assistant",
158 | content: "Sorry, there was an error conducting the research.",
159 | },
160 | ]);
161 | } finally {
162 | setIsLoading(false);
163 | }
164 | };
165 |
166 | const handleSubmit = async (
167 | userInput: string,
168 | config: { breadth: number; depth: number; modelId: string }
169 | ) => {
170 | if (!userInput.trim() || isLoading) return;
171 |
172 | // Add user message immediately
173 | setMessages((prev) => [
174 | ...prev,
175 | {
176 | id: crypto.randomUUID(),
177 | role: "user",
178 | content: userInput,
179 | },
180 | ]);
181 |
182 | setIsLoading(true);
183 |
184 | console.log({ stage });
185 |
186 | if (stage === "initial") {
187 | // Add thinking message only for initial query
188 | setMessages((prev) => [
189 | ...prev,
190 | {
191 | id: "thinking",
192 | role: "assistant",
193 | content: "Thinking...",
194 | },
195 | ]);
196 |
197 | // Handle the user's initial query
198 | setInitialQuery(userInput);
199 |
200 | try {
201 | const response = await fetch("/api/feedback", {
202 | method: "POST",
203 | headers: {
204 | "Content-Type": "application/json",
205 | },
206 | body: JSON.stringify({
207 | query: userInput,
208 | numQuestions: 3,
209 | modelId: config.modelId,
210 | }),
211 | });
212 | const data = await response.json();
213 | const questions: string[] = data.questions || [];
214 | setMessages((prev) => {
215 | const filtered = prev.filter((m) => m.id !== "thinking");
216 | if (questions.length > 0) {
217 | const formattedQuestions = questions
218 | .map((q, index) => `${index + 1}. ${q}`)
219 | .join("\n\n");
220 | return [
221 | ...filtered,
222 | {
223 | id: crypto.randomUUID(),
224 | role: "assistant",
225 | content: `Please answer the following follow-up questions to help clarify your research:\n\n${formattedQuestions}`,
226 | },
227 | ];
228 | }
229 | return filtered;
230 | });
231 | setStage("feedback");
232 | } catch (error) {
233 | console.error("Feedback generation error:", error);
234 | setMessages((prev) => [
235 | ...prev.filter((m) => m.id !== "thinking"),
236 | {
237 | id: crypto.randomUUID(),
238 | role: "assistant",
239 | content: "Sorry, there was an error generating feedback questions.",
240 | },
241 | ]);
242 | } finally {
243 | setIsLoading(false);
244 | }
245 | } else if (stage === "feedback") {
246 | // In feedback stage, combine the initial query and follow-up answers
247 | const combined = `Initial Query: ${initialQuery}\nFollow-up Answers:\n${userInput}`;
248 | setStage("researching");
249 | try {
250 | await sendResearchQuery(combined, config);
251 | } finally {
252 | setIsLoading(false);
253 | // Reset the stage so further messages will be processed
254 | setStage("initial");
255 | // Inform the user that a new research session can be started
256 | setMessages((prev) => [
257 | ...prev,
258 | {
259 | id: crypto.randomUUID(),
260 | role: "assistant",
261 | content:
262 | "Research session complete. You can now ask another question to begin a new research session.",
263 | },
264 | ]);
265 | }
266 | }
267 | };
268 |
269 | return (
270 |
271 | {/* Main container with dynamic width */}
272 |
283 | {/* Messages Container */}
284 |
290 | {/* Welcome Message (if no research started and no messages) */}
291 | {!hasStartedResearch && messages.length === 0 && (
292 |
293 |
309 |
321 |
334 |
335 |
336 |
337 |
338 |
345 | Supa Deep Research
346 |
347 |
348 |
354 | An open source alternative to OpenAI and Gemini's deep
355 | research capabilities with Supabase Edge Functions. Ask any
356 | question to generate a comprehensive report.
357 |
358 |
359 |
365 |
375 |
376 | View source code
377 |
378 |
379 |
380 |
381 |
382 | )}
383 |
384 | {/* Messages */}
385 |
386 | {messages.map((message) => (
387 |
388 | ))}
389 |
390 | {finalReport && (
391 |
392 |
393 |
394 | )}
395 |
396 |
397 |
398 | {/* Input - Fixed to bottom */}
399 |
414 |
415 |
416 | {/* Research Progress Panel */}
417 |
430 |
431 |
432 |
433 | {/* Mobile Toggle Button - Only show when research has started */}
434 | {hasStartedResearch && (
435 |
setShowProgress(!showProgress)}
437 | className={`
438 | md:hidden
439 | fixed
440 | bottom-24
441 | right-4
442 | z-50
443 | p-3
444 | bg-primary
445 | text-primary-foreground
446 | rounded-full
447 | shadow-lg
448 | transition-transform
449 | ${showProgress ? "rotate-180" : ""}
450 | `}
451 | aria-label="Toggle research progress"
452 | >
453 |
454 |
455 | )}
456 |
457 | );
458 | }
459 |
--------------------------------------------------------------------------------
/components/chat/download-txt.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { DownloadIcon } from 'lucide-react';
3 |
4 | interface DownloadTxtButtonProps {
5 | reportText: string;
6 | fileName?: string;
7 | }
8 |
9 | const DownloadTxtButton: React.FC = ({
10 | reportText,
11 | fileName = 'research_report.txt',
12 | }) => {
13 | const handleDownload = () => {
14 | // Create a blob from the report text content.
15 | const blob = new Blob([reportText], { type: 'text/plain;charset=utf-8' });
16 | // Create a temporary URL for the blob.
17 | const url = window.URL.createObjectURL(blob);
18 | // Create a temporary anchor element.
19 | const link = document.createElement('a');
20 | link.href = url;
21 | link.download = fileName;
22 | // Append the link, trigger click, remove it, and revoke the URL.
23 | document.body.appendChild(link);
24 | link.click();
25 | document.body.removeChild(link);
26 | window.URL.revokeObjectURL(url);
27 | };
28 |
29 | return (
30 |
60 |
61 |
62 | Download Report
63 |
64 | );
65 | };
66 |
67 | export default DownloadTxtButton;
68 |
--------------------------------------------------------------------------------
/components/chat/input.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useEffect, useRef, useState } from "react";
4 | import Image from "next/image";
5 | import cx from "classnames";
6 | import {
7 | ArrowUpIcon,
8 | CheckCircleIcon,
9 | ChevronDown,
10 | DownloadIcon,
11 | Loader2,
12 | XCircleIcon,
13 | } from "lucide-react";
14 |
15 | import { Button } from "@/components/ui/button";
16 | import { Slider } from "@/components/ui/slider";
17 | import {
18 | availableModels,
19 | type AIModelDisplayInfo,
20 | } from "@/lib/deep-research/ai/providers";
21 | import { ApiKeyDialog } from "@/components/chat/api-key-dialog";
22 |
23 | type MultimodalInputProps = {
24 | onSubmit: (
25 | input: string,
26 | config: {
27 | breadth: number;
28 | depth: number;
29 | modelId: string;
30 | }
31 | ) => void;
32 | isLoading: boolean;
33 | placeholder?: string;
34 | isAuthenticated?: boolean;
35 | onDownload?: () => void;
36 | canDownload?: boolean;
37 | };
38 |
39 | export function MultimodalInput({
40 | onSubmit,
41 | isLoading,
42 | placeholder = "What would you like to research?",
43 | onDownload,
44 | canDownload = false,
45 | }: MultimodalInputProps) {
46 | const [input, setInput] = useState("");
47 | const [breadth, setBreadth] = useState(4);
48 | const [depth, setDepth] = useState(2);
49 | const [selectedModel, setSelectedModel] = useState(
50 | availableModels.find((model) => model.id === "o3-mini") ||
51 | availableModels[0]
52 | );
53 | const [isModelDropdownOpen, setIsModelDropdownOpen] = useState(false);
54 | const [showApiKeyPrompt, setShowApiKeyPrompt] = useState(false);
55 | const [hasKeys, setHasKeys] = useState(false);
56 | const textareaRef = useRef(null);
57 |
58 | // Read the feature flag from environment variables.
59 | const enableApiKeys = process.env.NEXT_PUBLIC_ENABLE_API_KEYS === "true";
60 |
61 | // When API keys are disabled via env flag, always consider keys as present.
62 | const effectiveHasKeys = enableApiKeys ? hasKeys : true;
63 |
64 | // Check for keys using the consolidated endpoint
65 | useEffect(() => {
66 | const checkKeys = async () => {
67 | const res = await fetch("/api/keys");
68 | const data = await res.json();
69 | setHasKeys(data.keysPresent);
70 | if (!data.keysPresent && enableApiKeys) {
71 | setShowApiKeyPrompt(true);
72 | } else {
73 | setShowApiKeyPrompt(false);
74 | }
75 | };
76 | checkKeys();
77 | }, [enableApiKeys]);
78 |
79 | // New: Remove API keys handler
80 | const handleRemoveKeys = async () => {
81 | if (!window.confirm("Are you sure you want to remove your API keys?"))
82 | return;
83 | try {
84 | const res = await fetch("/api/keys", {
85 | method: "DELETE",
86 | });
87 | if (res.ok) {
88 | setHasKeys(false);
89 | }
90 | } catch (error) {
91 | console.error("Error removing keys:", error);
92 | }
93 | };
94 |
95 | const handleSubmit = () => {
96 | if (!input.trim() || isLoading) return;
97 | if (enableApiKeys && !effectiveHasKeys) {
98 | // Re-open the API key modal if keys are missing
99 | setShowApiKeyPrompt(true);
100 | return;
101 | }
102 | onSubmit(input, {
103 | breadth,
104 | depth,
105 | modelId: selectedModel.id,
106 | });
107 | setInput("");
108 | };
109 |
110 | useEffect(() => {
111 | if (textareaRef.current) {
112 | textareaRef.current.style.height = "inherit";
113 | textareaRef.current.style.height = `${textareaRef.current.scrollHeight}px`;
114 | }
115 | }, [input]);
116 |
117 | const DownloadButton = () => (
118 |
124 |
125 | Download Report
126 |
127 | );
128 |
129 | return (
130 |
131 | {/* Conditionally render API key dialog only if enabled */}
132 | {enableApiKeys && (
133 |
{
137 | setShowApiKeyPrompt(false);
138 | setHasKeys(true);
139 | }}
140 | />
141 | )}
142 |
143 |
420 | );
421 | }
422 |
--------------------------------------------------------------------------------
/components/chat/markdown.tsx:
--------------------------------------------------------------------------------
1 | import React, { memo } from 'react';
2 | import Link from 'next/link';
3 | import ReactMarkdown from 'react-markdown';
4 | import remarkGfm from 'remark-gfm';
5 |
6 | const NonMemoizedMarkdown = ({ children }: { children: string }) => {
7 | const components = {
8 | // Root wrapper
9 | root: ({ children }: any) => (
10 |
11 | {children}
12 |
13 | ),
14 |
15 | // Code blocks remain the same size
16 | code: ({ node, inline, className, children, ...props }: any) => {
17 | const match = /language-(\w+)/.exec(className || '');
18 | return !inline && match ? (
19 |
34 | {children}
35 |
36 | ) : (
37 |
48 | {children}
49 |
50 | );
51 | },
52 |
53 | // Headings keep their larger sizes
54 | h1: ({ node, children, ...props }: any) => {
55 | return (
56 |
68 | {children}
69 |
70 | );
71 | },
72 |
73 | h2: ({ node, children, ...props }: any) => {
74 | return (
75 |
85 | {children}
86 |
87 | );
88 | },
89 |
90 | h3: ({ node, children, ...props }: any) => {
91 | return (
92 |
102 | {children}
103 |
104 | );
105 | },
106 |
107 | // All regular text elements use text-sm
108 | p: ({ node, children, ...props }: any) => {
109 | return (
110 |
119 | {children}
120 |
121 | );
122 | },
123 |
124 | ul: ({ node, children, ...props }: any) => {
125 | return (
126 |
140 | );
141 | },
142 |
143 | ol: ({ node, children, ...props }: any) => {
144 | return (
145 |
157 | {children}
158 |
159 | );
160 | },
161 |
162 | li: ({ node, children, ...props }: any) => {
163 | return (
164 |
172 | {children}
173 |
174 | );
175 | },
176 |
177 | // Table elements use text-sm
178 | table: ({ node, children, ...props }: any) => {
179 | return (
180 |
193 | );
194 | },
195 |
196 | th: ({ node, children, ...props }: any) => {
197 | return (
198 |
211 | {children}
212 |
213 | );
214 | },
215 |
216 | td: ({ node, children, ...props }: any) => {
217 | return (
218 |
228 | {children}
229 |
230 | );
231 | },
232 |
233 | // Blockquotes use text-sm
234 | blockquote: ({ node, children, ...props }: any) => {
235 | return (
236 |
248 | {children}
249 |
250 | );
251 | },
252 |
253 | // Links
254 | a: ({ node, children, ...props }: any) => {
255 | return (
256 |
270 | {children}
271 |
272 | );
273 | },
274 |
275 | // Bold text
276 | strong: ({ node, children, ...props }: any) => {
277 | return (
278 |
285 | {children}
286 |
287 | );
288 | },
289 | };
290 |
291 | return (
292 |
293 |
294 | {children}
295 |
296 |
297 | );
298 | };
299 |
300 | export const Markdown = memo(
301 | NonMemoizedMarkdown,
302 | (prevProps, nextProps) => prevProps.children === nextProps.children,
303 | );
304 |
--------------------------------------------------------------------------------
/components/chat/message.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import { Message } from 'ai';
4 | import { motion } from 'framer-motion';
5 | import {
6 | BookOpenIcon,
7 | BrainCircuitIcon,
8 | GavelIcon,
9 | SearchIcon,
10 | } from 'lucide-react';
11 |
12 | import { Markdown } from './markdown';
13 |
14 | export type ProgressStep = {
15 | type: 'query' | 'research' | 'learning' | 'report';
16 | content: string;
17 | queries?: Array<{
18 | query: string;
19 | researchGoal: string;
20 | }>;
21 | };
22 |
23 | export function PreviewMessage({ message }: { message: Message }) {
24 | // Helper function to format follow-up questions into markdown
25 | const formatFollowUpQuestions = (content: string) => {
26 | if (content.includes('follow-up questions')) {
27 | // Split the content into introduction and questions
28 | const [intro, ...questions] = content.split('\n').filter(Boolean);
29 |
30 | // Format as markdown
31 | return `${intro}\n\n${questions
32 | .map(q => {
33 | // If the line starts with a number, format it as a markdown list item
34 | if (/^\d+\./.test(q)) {
35 | return q.trim();
36 | }
37 | return q;
38 | })
39 | .join('\n\n')}`;
40 | }
41 | return content;
42 | };
43 |
44 | return (
45 |
50 |
57 |
64 | {message.role === 'assistant' ? (
65 |
66 | {formatFollowUpQuestions(message.content)}
67 |
68 | ) : (
69 |
{message.content}
70 | )}
71 |
72 |
73 |
74 | );
75 | }
76 |
77 | export function ResearchProgress({
78 | progress,
79 | isLoading,
80 | }: {
81 | progress: ProgressStep[];
82 | isLoading: boolean;
83 | }) {
84 | // Filter out individual report word updates
85 | const filteredProgress = progress.filter(step => {
86 | if (step.type === 'report') {
87 | // Only show the initial "Generating report" step
88 | return (
89 | step.content.includes('Generating') ||
90 | step.content.includes('Synthesizing')
91 | );
92 | }
93 | return true;
94 | });
95 |
96 | if (!isLoading && filteredProgress.length === 0) {
97 | return (
98 |
99 |
100 |
101 |
102 | Begin your research journey
103 |
104 |
105 |
106 | );
107 | }
108 |
109 | return (
110 |
111 | {filteredProgress.map((step, index) => (
112 |
118 |
119 | {step.type === 'query' && }
120 | {step.type === 'research' && }
121 | {step.type === 'learning' && }
122 | {step.type === 'report' && }
123 |
124 |
125 |
{step.type}
126 |
{step.content}
127 |
128 | {step.queries && (
129 |
130 | {step.queries.map((query, idx) => (
131 |
138 | {query.query}
139 |
140 | {query.researchGoal}
141 |
142 |
143 | ))}
144 |
145 | )}
146 |
147 |
148 | ))}
149 |
150 | );
151 | }
152 |
--------------------------------------------------------------------------------
/components/chat/research-progress.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import { useEffect, useRef, useState } from 'react';
4 | import { AnimatePresence, motion } from 'framer-motion';
5 | import {
6 | BrainCircuitIcon,
7 | FileSearchIcon,
8 | Loader2Icon,
9 | PlayIcon,
10 | SearchIcon,
11 | SparklesIcon,
12 | } from 'lucide-react';
13 |
14 | import { ProgressStep } from './message';
15 |
16 | // Simplified configuration with a more minimal color palette
17 | const actionConfig = {
18 | 'Generating up to': {
19 | icon: ,
20 | text: 'Generating',
21 | },
22 | Created: {
23 | icon: ,
24 | text: 'Created',
25 | },
26 | Researching: {
27 | icon: ,
28 | text: 'Researching',
29 | },
30 | Found: {
31 | icon: ,
32 | text: 'Found',
33 | },
34 | Ran: {
35 | icon: ,
36 | text: 'Processing',
37 | },
38 | Generated: {
39 | icon: ,
40 | text: 'Generated',
41 | },
42 | };
43 |
44 | export function ResearchProgress({
45 | progress,
46 | isLoading,
47 | }: {
48 | progress: ProgressStep[];
49 | isLoading: boolean;
50 | }) {
51 | const containerRef = useRef(null);
52 | const [userHasScrolled, setUserHasScrolled] = useState(false);
53 |
54 | // Handle auto-scrolling
55 | useEffect(() => {
56 | const container = containerRef.current;
57 | if (!container || userHasScrolled) return;
58 |
59 | container.scrollTop = container.scrollHeight;
60 | }, [progress, userHasScrolled]);
61 |
62 | // Handle scroll events
63 | const handleScroll = () => {
64 | const container = containerRef.current;
65 | if (!container) return;
66 |
67 | const isAtBottom =
68 | Math.abs(
69 | container.scrollHeight - container.scrollTop - container.clientHeight,
70 | ) < 10;
71 |
72 | setUserHasScrolled(!isAtBottom);
73 | };
74 |
75 | const getConfig = (content: string) => {
76 | const firstWord = content.split('\n')[0].split(' ')[0];
77 | for (const [key, config] of Object.entries(actionConfig)) {
78 | if (firstWord.startsWith(key)) {
79 | return config;
80 | }
81 | }
82 | return actionConfig['Researching'];
83 | };
84 |
85 | // Remove the empty state UI since it's now in the main chat
86 | if (!isLoading && progress.length === 0) {
87 | return null;
88 | }
89 |
90 | return (
91 |
96 |
97 | {progress.map((step, index) => {
98 | const [title, ...details] = step.content.split('\n');
99 | const config = getConfig(title);
100 |
101 | return (
102 |
112 |
136 |
137 |
138 |
139 | {config.icon}
140 |
141 |
142 |
143 |
144 |
145 | {config.text}
146 | {' '}
147 |
148 | {title.split(' ').slice(1).join(' ')}
149 |
150 |
151 |
152 | {details.length > 0 && (
153 |
154 | {details.join('\n')}
155 |
156 | )}
157 |
158 | {step.queries && (
159 |
160 | {step.queries.map((query, idx) => (
161 |
183 |
184 | {query.query}
185 |
186 |
187 | {query.researchGoal}
188 |
189 |
190 | ))}
191 |
192 | )}
193 |
194 |
195 |
196 | );
197 | })}
198 |
199 |
200 | );
201 | }
202 |
--------------------------------------------------------------------------------
/components/site-header.tsx:
--------------------------------------------------------------------------------
1 | import { APP_NAME } from "@/lib/constants";
2 |
3 | export function Header() {
4 | return (
5 | <>
6 |
24 | >
25 | );
26 | }
27 |
--------------------------------------------------------------------------------
/components/theme-provider.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import * as React from "react";
4 | import { ThemeProvider as NextThemesProvider } from "next-themes";
5 |
6 | export function ThemeProvider({
7 | children,
8 | ...props
9 | }: React.ComponentProps) {
10 | return {children} ;
11 | }
12 |
--------------------------------------------------------------------------------
/components/ui/button.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { Slot } from "@radix-ui/react-slot"
3 | import { cva, type VariantProps } from "class-variance-authority"
4 |
5 | import { cn } from "@/lib/utils"
6 |
7 | const buttonVariants = cva(
8 | "inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50",
9 | {
10 | variants: {
11 | variant: {
12 | default: "bg-primary text-primary-foreground hover:bg-primary/90",
13 | destructive:
14 | "bg-destructive text-destructive-foreground hover:bg-destructive/90",
15 | outline:
16 | "border border-input bg-background hover:bg-accent hover:text-accent-foreground",
17 | secondary:
18 | "bg-secondary text-secondary-foreground hover:bg-secondary/80",
19 | ghost: "hover:bg-accent hover:text-accent-foreground",
20 | link: "text-primary underline-offset-4 hover:underline",
21 | },
22 | size: {
23 | default: "h-10 px-4 py-2",
24 | sm: "h-9 rounded-md px-3",
25 | lg: "h-11 rounded-md px-8",
26 | icon: "h-10 w-10",
27 | },
28 | },
29 | defaultVariants: {
30 | variant: "default",
31 | size: "default",
32 | },
33 | }
34 | )
35 |
36 | export interface ButtonProps
37 | extends React.ButtonHTMLAttributes,
38 | VariantProps {
39 | asChild?: boolean
40 | }
41 |
42 | const Button = React.forwardRef(
43 | ({ className, variant, size, asChild = false, ...props }, ref) => {
44 | const Comp = asChild ? Slot : "button"
45 | return (
46 |
51 | )
52 | }
53 | )
54 | Button.displayName = "Button"
55 |
56 | export { Button, buttonVariants }
57 |
--------------------------------------------------------------------------------
/components/ui/dialog.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import * as React from "react";
4 | import * as DialogPrimitive from "@radix-ui/react-dialog";
5 | import { X } from "lucide-react";
6 |
7 | import { cn } from "@/lib/utils";
8 |
9 | const Dialog = DialogPrimitive.Root;
10 |
11 | const DialogTrigger = DialogPrimitive.Trigger;
12 |
13 | const DialogPortal = DialogPrimitive.Portal;
14 |
15 | const DialogClose = DialogPrimitive.Close;
16 |
17 | const DialogOverlay = React.forwardRef<
18 | React.ElementRef,
19 | React.ComponentPropsWithoutRef
20 | >(({ className, ...props }, ref) => (
21 |
29 | ));
30 | DialogOverlay.displayName = DialogPrimitive.Overlay.displayName;
31 |
32 | const DialogContent = React.forwardRef<
33 | React.ElementRef,
34 | React.ComponentPropsWithoutRef
35 | >(({ className, children, ...props }, ref) => (
36 |
37 |
38 |
46 | {children}
47 |
48 |
49 | Close
50 |
51 |
52 |
53 | ));
54 | DialogContent.displayName = DialogPrimitive.Content.displayName;
55 |
56 | const DialogHeader = ({
57 | className,
58 | ...props
59 | }: React.HTMLAttributes) => (
60 |
67 | );
68 | DialogHeader.displayName = "DialogHeader";
69 |
70 | const DialogFooter = ({
71 | className,
72 | ...props
73 | }: React.HTMLAttributes) => (
74 |
81 | );
82 | DialogFooter.displayName = "DialogFooter";
83 |
84 | const DialogTitle = React.forwardRef<
85 | React.ElementRef,
86 | React.ComponentPropsWithoutRef
87 | >(({ className, ...props }, ref) => (
88 |
96 | ));
97 | DialogTitle.displayName = DialogPrimitive.Title.displayName;
98 |
99 | const DialogDescription = React.forwardRef<
100 | React.ElementRef,
101 | React.ComponentPropsWithoutRef
102 | >(({ className, ...props }, ref) => (
103 |
108 | ));
109 | DialogDescription.displayName = DialogPrimitive.Description.displayName;
110 |
111 | export {
112 | Dialog,
113 | DialogPortal,
114 | DialogOverlay,
115 | DialogClose,
116 | DialogTrigger,
117 | DialogContent,
118 | DialogHeader,
119 | DialogFooter,
120 | DialogTitle,
121 | DialogDescription,
122 | };
123 |
--------------------------------------------------------------------------------
/components/ui/input.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 |
3 | import { cn } from "@/lib/utils"
4 |
5 | export interface InputProps
6 | extends React.InputHTMLAttributes {}
7 |
8 | const Input = React.forwardRef(
9 | ({ className, type, ...props }, ref) => {
10 | return (
11 |
20 | )
21 | }
22 | )
23 | Input.displayName = "Input"
24 |
25 | export { Input }
26 |
--------------------------------------------------------------------------------
/components/ui/slider.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import * as React from 'react';
4 | import * as SliderPrimitive from '@radix-ui/react-slider';
5 |
6 | import { cn } from '@/lib/utils';
7 |
8 | const Slider = React.forwardRef<
9 | React.ElementRef,
10 | React.ComponentPropsWithoutRef
11 | >(({ className, ...props }, ref) => (
12 |
20 |
21 |
22 |
23 |
24 |
25 | ));
26 | Slider.displayName = SliderPrimitive.Root.displayName;
27 |
28 | export { Slider };
29 |
--------------------------------------------------------------------------------
/components/ui/tooltip.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import * as React from 'react';
4 | import * as TooltipPrimitive from '@radix-ui/react-tooltip';
5 |
6 | import { cn } from '@/lib/utils';
7 |
8 | const TooltipProvider = TooltipPrimitive.Provider;
9 |
10 | const Tooltip = TooltipPrimitive.Root;
11 |
12 | const TooltipTrigger = TooltipPrimitive.Trigger;
13 |
14 | const TooltipContent = React.forwardRef<
15 | React.ElementRef,
16 | React.ComponentPropsWithoutRef
17 | >(({ className, sideOffset = 4, ...props }, ref) => (
18 |
19 |
28 |
29 | ));
30 | TooltipContent.displayName = TooltipPrimitive.Content.displayName;
31 |
32 | export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider };
33 |
--------------------------------------------------------------------------------
/lib/constants.ts:
--------------------------------------------------------------------------------
1 | export const APP_NAME = "Supa Deep Research";
2 | export const APP_DESCRIPTION = "AI-powered research assistant";
3 |
--------------------------------------------------------------------------------
/lib/deep-research/ai/providers.ts:
--------------------------------------------------------------------------------
1 | import { createOpenAI } from '@ai-sdk/openai';
2 | import { getEncoding } from 'js-tiktoken';
3 |
4 | import { RecursiveCharacterTextSplitter } from './text-splitter';
5 |
6 | // Model Display Information
7 | export const AI_MODEL_DISPLAY = {
8 | 'gpt-4o': {
9 | id: 'gpt-4o',
10 | name: 'GPT-4o',
11 | logo: '/providers/openai.webp',
12 | vision: true,
13 | },
14 | 'gpt-4o-mini': {
15 | id: 'gpt-4o-mini',
16 | name: 'GPT-4o mini',
17 | logo: '/providers/openai.webp',
18 | vision: true,
19 | },
20 | 'o3-mini': {
21 | id: 'o3-mini',
22 | name: 'o3 mini',
23 | logo: '/providers/openai.webp',
24 | vision: false,
25 | },
26 | } as const;
27 |
28 | export type AIModel = keyof typeof AI_MODEL_DISPLAY;
29 | export type AIModelDisplayInfo = (typeof AI_MODEL_DISPLAY)[AIModel];
30 | export const availableModels = Object.values(AI_MODEL_DISPLAY);
31 |
32 | // OpenAI Client
33 | const openai = createOpenAI({
34 | apiKey: process.env.OPENAI_KEY!,
35 | });
36 |
37 | // Create model instances with configurations
38 | export function createModel(modelId: AIModel, apiKey?: string) {
39 | const client = createOpenAI({
40 | apiKey: apiKey || process.env.OPENAI_KEY!,
41 | });
42 |
43 | return client(modelId, {
44 | structuredOutputs: true,
45 | ...(modelId === 'o3-mini' ? { reasoningEffort: 'medium' } : {}),
46 | });
47 | }
48 |
49 | // Token handling
50 | const MinChunkSize = 140;
51 | const encoder = getEncoding('o200k_base');
52 |
53 | // trim prompt to maximum context size
54 | export function trimPrompt(prompt: string, contextSize = 120_000) {
55 | if (!prompt) {
56 | return '';
57 | }
58 |
59 | const length = encoder.encode(prompt).length;
60 | if (length <= contextSize) {
61 | return prompt;
62 | }
63 |
64 | const overflowTokens = length - contextSize;
65 | // on average it's 3 characters per token, so multiply by 3 to get a rough estimate of the number of characters
66 | const chunkSize = prompt.length - overflowTokens * 3;
67 | if (chunkSize < MinChunkSize) {
68 | return prompt.slice(0, MinChunkSize);
69 | }
70 |
71 | const splitter = new RecursiveCharacterTextSplitter({
72 | chunkSize,
73 | chunkOverlap: 0,
74 | });
75 | const trimmedPrompt = splitter.splitText(prompt)[0] ?? '';
76 |
77 | // last catch, there's a chance that the trimmed prompt is same length as the original prompt, due to how tokens are split & innerworkings of the splitter, handle this case by just doing a hard cut
78 | if (trimmedPrompt.length === prompt.length) {
79 | return trimPrompt(prompt.slice(0, chunkSize), contextSize);
80 | }
81 |
82 | // recursively trim until the prompt is within the context size
83 | return trimPrompt(trimmedPrompt, contextSize);
84 | }
85 |
--------------------------------------------------------------------------------
/lib/deep-research/ai/text-splitter.test.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert';
2 | import { describe, it } from 'node:test';
3 |
4 | import { RecursiveCharacterTextSplitter } from './text-splitter';
5 |
6 | describe('RecursiveCharacterTextSplitter', () => {
7 | it('Should correctly split text by separators', () => {
8 | const splitter = new RecursiveCharacterTextSplitter({
9 | chunkSize: 50,
10 | chunkOverlap: 10,
11 | });
12 | assert.deepEqual(
13 | splitter.splitText(
14 | 'Hello world, this is a test of the recursive text splitter.',
15 | ),
16 | ['Hello world', 'this is a test of the recursive text splitter'],
17 | );
18 |
19 | splitter.chunkSize = 100;
20 | assert.deepEqual(
21 | splitter.splitText(
22 | 'Hello world, this is a test of the recursive text splitter. If I have a period, it should split along the period.',
23 | ),
24 | [
25 | 'Hello world, this is a test of the recursive text splitter',
26 | 'If I have a period, it should split along the period.',
27 | ],
28 | );
29 |
30 | splitter.chunkSize = 110;
31 | assert.deepEqual(
32 | splitter.splitText(
33 | 'Hello world, this is a test of the recursive text splitter. If I have a period, it should split along the period.\nOr, if there is a new line, it should prioritize splitting on new lines instead.',
34 | ),
35 | [
36 | 'Hello world, this is a test of the recursive text splitter',
37 | 'If I have a period, it should split along the period.',
38 | 'Or, if there is a new line, it should prioritize splitting on new lines instead.',
39 | ],
40 | );
41 | });
42 |
43 | it('Should handle empty string', () => {
44 | const splitter = new RecursiveCharacterTextSplitter({
45 | chunkSize: 50,
46 | chunkOverlap: 10,
47 | });
48 | assert.deepEqual(splitter.splitText(''), []);
49 | });
50 | });
51 |
--------------------------------------------------------------------------------
/lib/deep-research/ai/text-splitter.ts:
--------------------------------------------------------------------------------
1 | interface TextSplitterParams {
2 | chunkSize: number;
3 |
4 | chunkOverlap: number;
5 | }
6 |
7 | abstract class TextSplitter implements TextSplitterParams {
8 | chunkSize = 1000;
9 | chunkOverlap = 200;
10 |
11 | constructor(fields?: Partial) {
12 | this.chunkSize = fields?.chunkSize ?? this.chunkSize;
13 | this.chunkOverlap = fields?.chunkOverlap ?? this.chunkOverlap;
14 | if (this.chunkOverlap >= this.chunkSize) {
15 | throw new Error('Cannot have chunkOverlap >= chunkSize');
16 | }
17 | }
18 |
19 | abstract splitText(text: string): string[];
20 |
21 | createDocuments(texts: string[]): string[] {
22 | const documents: string[] = [];
23 | for (let i = 0; i < texts.length; i += 1) {
24 | const text = texts[i];
25 | for (const chunk of this.splitText(text!)) {
26 | documents.push(chunk);
27 | }
28 | }
29 | return documents;
30 | }
31 |
32 | splitDocuments(documents: string[]): string[] {
33 | return this.createDocuments(documents);
34 | }
35 |
36 | private joinDocs(docs: string[], separator: string): string | null {
37 | const text = docs.join(separator).trim();
38 | return text === '' ? null : text;
39 | }
40 |
41 | mergeSplits(splits: string[], separator: string): string[] {
42 | const docs: string[] = [];
43 | const currentDoc: string[] = [];
44 | let total = 0;
45 | for (const d of splits) {
46 | const _len = d.length;
47 | if (total + _len >= this.chunkSize) {
48 | if (total > this.chunkSize) {
49 | console.warn(
50 | `Created a chunk of size ${total}, +
51 | which is longer than the specified ${this.chunkSize}`,
52 | );
53 | }
54 | if (currentDoc.length > 0) {
55 | const doc = this.joinDocs(currentDoc, separator);
56 | if (doc !== null) {
57 | docs.push(doc);
58 | }
59 | // Keep on popping if:
60 | // - we have a larger chunk than in the chunk overlap
61 | // - or if we still have any chunks and the length is long
62 | while (
63 | total > this.chunkOverlap ||
64 | (total + _len > this.chunkSize && total > 0)
65 | ) {
66 | total -= currentDoc[0]!.length;
67 | currentDoc.shift();
68 | }
69 | }
70 | }
71 | currentDoc.push(d);
72 | total += _len;
73 | }
74 | const doc = this.joinDocs(currentDoc, separator);
75 | if (doc !== null) {
76 | docs.push(doc);
77 | }
78 | return docs;
79 | }
80 | }
81 |
82 | export interface RecursiveCharacterTextSplitterParams
83 | extends TextSplitterParams {
84 | separators: string[];
85 | }
86 |
87 | export class RecursiveCharacterTextSplitter
88 | extends TextSplitter
89 | implements RecursiveCharacterTextSplitterParams
90 | {
91 | separators: string[] = ['\n\n', '\n', '.', ',', '>', '<', ' ', ''];
92 |
93 | constructor(fields?: Partial) {
94 | super(fields);
95 | this.separators = fields?.separators ?? this.separators;
96 | }
97 |
98 | splitText(text: string): string[] {
99 | const finalChunks: string[] = [];
100 |
101 | // Get appropriate separator to use
102 | let separator: string = this.separators[this.separators.length - 1]!;
103 | for (const s of this.separators) {
104 | if (s === '') {
105 | separator = s;
106 | break;
107 | }
108 | if (text.includes(s)) {
109 | separator = s;
110 | break;
111 | }
112 | }
113 |
114 | // Now that we have the separator, split the text
115 | let splits: string[];
116 | if (separator) {
117 | splits = text.split(separator);
118 | } else {
119 | splits = text.split('');
120 | }
121 |
122 | // Now go merging things, recursively splitting longer texts.
123 | let goodSplits: string[] = [];
124 | for (const s of splits) {
125 | if (s.length < this.chunkSize) {
126 | goodSplits.push(s);
127 | } else {
128 | if (goodSplits.length) {
129 | const mergedText = this.mergeSplits(goodSplits, separator);
130 | finalChunks.push(...mergedText);
131 | goodSplits = [];
132 | }
133 | const otherInfo = this.splitText(s);
134 | finalChunks.push(...otherInfo);
135 | }
136 | }
137 | if (goodSplits.length) {
138 | const mergedText = this.mergeSplits(goodSplits, separator);
139 | finalChunks.push(...mergedText);
140 | }
141 | return finalChunks;
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/lib/deep-research/deep-research.ts:
--------------------------------------------------------------------------------
1 | import FirecrawlApp, { SearchResponse } from '@mendable/firecrawl-js';
2 | import { generateObject } from 'ai';
3 | import { compact } from 'lodash-es';
4 | import { z } from 'zod';
5 |
6 | import { createModel, trimPrompt } from './ai/providers';
7 | import { systemPrompt } from './prompt';
8 |
9 | type ResearchResult = {
10 | learnings: string[];
11 | visitedUrls: string[];
12 | };
13 |
14 | type DeepResearchOptions = {
15 | query: string;
16 | breadth?: number;
17 | depth?: number;
18 | learnings?: string[];
19 | visitedUrls?: string[];
20 | onProgress?: (update: string) => Promise;
21 | model: ReturnType;
22 | firecrawlKey?: string;
23 | };
24 |
25 | // Update the firecrawl initialization to use the provided key
26 | const getFirecrawl = (apiKey?: string) =>
27 | new FirecrawlApp({
28 | apiKey: apiKey ?? process.env.FIRECRAWL_KEY ?? '',
29 | apiUrl: process.env.FIRECRAWL_BASE_URL,
30 | });
31 |
32 | // Helper function to format progress messages consistently
33 | const formatProgress = {
34 | generating: (count: number, query: string) =>
35 | `Generating up to ${count} SERP queries\n${query}`,
36 |
37 | created: (count: number, queries: string) =>
38 | `Created ${count} SERP queries\n${queries}`,
39 |
40 | researching: (query: string) => `Researching\n${query}`,
41 |
42 | found: (count: number, query: string) => `Found ${count} results\n${query}`,
43 |
44 | ran: (query: string, count: number) =>
45 | `Ran "${query}"\n${count} content items found`,
46 |
47 | generated: (count: number, query: string) =>
48 | `Generated ${count} learnings\n${query}`,
49 | };
50 |
51 | // Helper function to log and stream messages
52 | async function logProgress(
53 | message: string,
54 | onProgress?: (update: string) => Promise,
55 | ) {
56 | if (onProgress) {
57 | await onProgress(message);
58 | }
59 | }
60 |
61 | // take en user query, return a list of SERP queries
62 | async function generateSerpQueries({
63 | query,
64 | numQueries = 3,
65 | learnings,
66 | onProgress,
67 | model,
68 | }: {
69 | query: string;
70 | numQueries?: number;
71 |
72 | // optional, if provided, the research will continue from the last learning
73 | learnings?: string[];
74 | onProgress?: (update: string) => Promise;
75 | model: ReturnType;
76 | }) {
77 | await logProgress(formatProgress.generating(numQueries, query), onProgress);
78 |
79 | const res = await generateObject({
80 | model,
81 | system: systemPrompt(),
82 | prompt: `Given the following prompt from the user, generate a list of SERP queries to research the topic. Return a maximum of ${numQueries} queries, but feel free to return less if the original prompt is clear. Make sure each query is unique and not similar to each other: ${query} \n\n${
83 | learnings
84 | ? `Here are some learnings from previous research, use them to generate more specific queries: ${learnings.join(
85 | '\n',
86 | )}`
87 | : ''
88 | }`,
89 | schema: z.object({
90 | queries: z
91 | .array(
92 | z.object({
93 | query: z.string().describe('The SERP query'),
94 | researchGoal: z
95 | .string()
96 | .describe(
97 | 'First talk about the goal of the research that this query is meant to accomplish, then go deeper into how to advance the research once the results are found, mention additional research directions. Be as specific as possible, especially for additional research directions.',
98 | ),
99 | }),
100 | )
101 | .describe(`List of SERP queries, max of ${numQueries}`),
102 | }),
103 | });
104 |
105 | const queriesList = res.object.queries.map(q => q.query).join(', ');
106 | await logProgress(
107 | formatProgress.created(res.object.queries.length, queriesList),
108 | onProgress,
109 | );
110 |
111 | return res.object.queries.slice(0, numQueries).map(q => q.query);
112 | }
113 |
114 | async function processSerpResult({
115 | query,
116 | result,
117 | numLearnings = 3,
118 | numFollowUpQuestions = 3,
119 | onProgress,
120 | model,
121 | }: {
122 | query: string;
123 | result: SearchResponse;
124 | numLearnings?: number;
125 | numFollowUpQuestions?: number;
126 | onProgress?: (update: string) => Promise;
127 | model: ReturnType;
128 | }) {
129 | const contents = compact(result.data.map(item => item.markdown)).map(
130 | content => trimPrompt(content, 25_000),
131 | );
132 |
133 | await logProgress(formatProgress.ran(query, contents.length), onProgress);
134 |
135 | const res = await generateObject({
136 | model,
137 | abortSignal: AbortSignal.timeout(60_000),
138 | system: systemPrompt(),
139 | prompt: `Given the following contents from a SERP search for the query ${query} , generate a list of learnings from the contents. Return a maximum of ${numLearnings} learnings, but feel free to return less if the contents are clear. Make sure each learning is unique and not similar to each other. The learnings should be concise and to the point, as detailed and information dense as possible. Make sure to include any entities like people, places, companies, products, things, etc in the learnings, as well as any exact metrics, numbers, or dates. The learnings will be used to research the topic further.\n\n${contents
140 | .map(content => `\n${content}\n `)
141 | .join('\n')} `,
142 | schema: z.object({
143 | learnings: z
144 | .array(z.string())
145 | .describe(`List of learnings, max of ${numLearnings}`),
146 | followUpQuestions: z
147 | .array(z.string())
148 | .describe(
149 | `List of follow-up questions to research the topic further, max of ${numFollowUpQuestions}`,
150 | ),
151 | }),
152 | });
153 |
154 | await logProgress(
155 | formatProgress.generated(res.object.learnings.length, query),
156 | onProgress,
157 | );
158 |
159 | return res.object;
160 | }
161 |
162 | export async function writeFinalReport({
163 | prompt,
164 | learnings,
165 | visitedUrls,
166 | model,
167 | }: {
168 | prompt: string;
169 | learnings: string[];
170 | visitedUrls: string[];
171 | model: ReturnType;
172 | }) {
173 | const learningsString = trimPrompt(
174 | learnings
175 | .map(learning => `\n${learning}\n `)
176 | .join('\n'),
177 | 150_000,
178 | );
179 |
180 | const res = await generateObject({
181 | model,
182 | system: systemPrompt(),
183 | prompt: `Given the following prompt from the user, write a final report on the topic using the learnings from research and format it in proper Markdown. Use Markdown syntax (headings, lists, horizontal rules, etc.) to structure the document. Aim for a detailed report of at least 3 pages.\n\n${prompt} \n\nHere are all the learnings from previous research:\n\n\n${learningsString}\n `,
184 | schema: z.object({
185 | reportMarkdown: z
186 | .string()
187 | .describe('Final report on the topic in Markdown'),
188 | }),
189 | });
190 |
191 | // Append the visited URLs as a markdown formatted Sources section
192 | const urlsSection = `\n\n## Sources\n\n${visitedUrls
193 | .map(url => `- ${url}`)
194 | .join('\n')}`;
195 |
196 | // Prepend a primary markdown heading to make sure the UI renders it as markdown
197 | return `# Research Report\n\n${res.object.reportMarkdown}${urlsSection}`;
198 | }
199 |
200 | export async function deepResearch({
201 | query,
202 | breadth = 3,
203 | depth = 2,
204 | learnings = [],
205 | visitedUrls = [],
206 | onProgress,
207 | model,
208 | firecrawlKey,
209 | }: DeepResearchOptions): Promise {
210 | const firecrawl = getFirecrawl(firecrawlKey);
211 | const results: ResearchResult[] = [];
212 |
213 | // Generate SERP queries
214 | await logProgress(formatProgress.generating(breadth, query), onProgress);
215 |
216 | const serpQueries = await generateSerpQueries({
217 | query,
218 | learnings,
219 | numQueries: breadth,
220 | onProgress,
221 | model,
222 | });
223 |
224 | await logProgress(
225 | formatProgress.created(serpQueries.length, serpQueries.join(', ')),
226 | onProgress,
227 | );
228 |
229 | // Process each SERP query
230 | for (const serpQuery of serpQueries) {
231 | try {
232 | await logProgress(formatProgress.researching(serpQuery), onProgress);
233 |
234 | const searchResults = await firecrawl.search(serpQuery, {
235 | timeout: 15000,
236 | limit: 5,
237 | scrapeOptions: { formats: ['markdown'] },
238 | });
239 |
240 | await logProgress(
241 | formatProgress.found(searchResults.data.length, serpQuery),
242 | onProgress,
243 | );
244 |
245 | if (searchResults.data.length > 0) {
246 | await logProgress(
247 | formatProgress.ran(serpQuery, searchResults.data.length),
248 | onProgress,
249 | );
250 |
251 | const newLearnings = await processSerpResult({
252 | query: serpQuery,
253 | result: searchResults,
254 | numLearnings: Math.ceil(breadth / 2),
255 | numFollowUpQuestions: Math.ceil(breadth / 2),
256 | onProgress,
257 | model,
258 | });
259 |
260 | await logProgress(
261 | formatProgress.generated(newLearnings.learnings.length, serpQuery),
262 | onProgress,
263 | );
264 |
265 | results.push({
266 | learnings: newLearnings.learnings,
267 | visitedUrls: searchResults.data
268 | .map(r => r.url)
269 | .filter((url): url is string => url != null),
270 | });
271 | }
272 | } catch (e) {
273 | console.error(`Error running query: ${serpQuery}: `, e);
274 | await logProgress(`Error running "${serpQuery}": ${e}`, onProgress);
275 | results.push({
276 | learnings: [],
277 | visitedUrls: [],
278 | });
279 | }
280 | }
281 |
282 | return {
283 | learnings: Array.from(new Set(results.flatMap(r => r.learnings))),
284 | visitedUrls: Array.from(new Set(results.flatMap(r => r.visitedUrls))),
285 | };
286 | }
287 |
--------------------------------------------------------------------------------
/lib/deep-research/feedback.ts:
--------------------------------------------------------------------------------
1 | import { type AIModel } from "./ai/providers";
2 |
3 | export async function generateFeedback({
4 | query,
5 | numQuestions = 3,
6 | modelId = "o3-mini",
7 | apiKey,
8 | }: {
9 | query: string;
10 | numQuestions?: number;
11 | modelId?: AIModel;
12 | apiKey?: string;
13 | }) {
14 | try {
15 | console.log("\n📝 [FEEDBACK] Preparing prompts...");
16 | const systemPrompt =
17 | `You are a helpful AI assistant that generates insightful follow-up questions based on a given query or statement. Your questions should:
18 | - Be relevant to the original query
19 | - Encourage deeper thinking
20 | - Be clear and concise
21 | - Avoid yes/no questions
22 | - Each be unique and explore different aspects`;
23 |
24 | const userPrompt =
25 | `Generate ${numQuestions} insightful follow-up questions for this query: "${query}"`;
26 |
27 | const response = await fetch("https://api.openai.com/v1/chat/completions", {
28 | method: "POST",
29 | headers: {
30 | "Content-Type": "application/json",
31 | "Authorization": `Bearer ${apiKey}`,
32 | },
33 | body: JSON.stringify({
34 | model: "gpt-3.5-turbo",
35 | messages: [
36 | { role: "system", content: systemPrompt },
37 | { role: "user", content: userPrompt },
38 | ],
39 | temperature: 0.7,
40 | }),
41 | });
42 |
43 | if (!response.ok) {
44 | const error = await response.json();
45 | console.error("\n❌ [FEEDBACK] API Error:", error);
46 | throw new Error(`OpenAI API error: ${JSON.stringify(error)}`);
47 | }
48 |
49 | const data = await response.json();
50 |
51 | const content = data.choices?.[0]?.message?.content;
52 |
53 | if (!content) {
54 | console.error("\n❌ [FEEDBACK] No content in response");
55 | throw new Error("No content received from OpenAI");
56 | }
57 |
58 | console.log("\n✂️ [FEEDBACK] Processing questions...");
59 | // Split the content into individual questions and clean them up
60 | const questions = content
61 | .split(/\d+\.\s+/)
62 | .filter(Boolean)
63 | .map((q: string) => q.trim())
64 | .slice(0, numQuestions);
65 |
66 | return questions;
67 | } catch (error) {
68 | console.error("\n💥 [FEEDBACK] Function Error:", error);
69 | throw error;
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/lib/deep-research/index.ts:
--------------------------------------------------------------------------------
1 | export { deepResearch } from './deep-research';
2 | export { generateFeedback } from './feedback';
3 | export { writeFinalReport } from './deep-research';
4 | export { systemPrompt } from './prompt';
5 |
--------------------------------------------------------------------------------
/lib/deep-research/prompt.ts:
--------------------------------------------------------------------------------
1 | export const systemPrompt = () => {
2 | const now = new Date().toISOString();
3 | return `You are an expert researcher. Today is ${now}. Follow these instructions when responding:
4 | - You may be asked to research subjects that is after your knowledge cutoff, assume the user is right when presented with news.
5 | - The user is a highly experienced analyst, no need to simplify it, be as detailed as possible and make sure your response is correct.
6 | - Be highly organized.
7 | - Suggest solutions that I didn't think about.
8 | - Be proactive and anticipate my needs.
9 | - Treat me as an expert in all subject matter.
10 | - Mistakes erode my trust, so be accurate and thorough.
11 | - Provide detailed explanations, I'm comfortable with lots of detail.
12 | - Value good arguments over authorities, the source is irrelevant.
13 | - Consider new technologies and contrarian ideas, not just the conventional wisdom.
14 | - You may use high levels of speculation or prediction, just flag it for me.`;
15 | };
16 |
--------------------------------------------------------------------------------
/lib/hooks/use-scroll-to-bottom.ts:
--------------------------------------------------------------------------------
1 | import { useEffect, useRef, RefObject } from 'react';
2 |
3 | export function useScrollToBottom(): [
4 | RefObject,
5 | RefObject,
6 | ] {
7 | const containerRef = useRef(null);
8 | const endRef = useRef(null);
9 |
10 | useEffect(() => {
11 | const container = containerRef.current;
12 | const end = endRef.current;
13 |
14 | if (container && end) {
15 | const observer = new MutationObserver(() => {
16 | end.scrollIntoView({ behavior: 'instant', block: 'end' });
17 | });
18 |
19 | observer.observe(container, {
20 | childList: true,
21 | subtree: true,
22 | attributes: true,
23 | characterData: true,
24 | });
25 |
26 | return () => observer.disconnect();
27 | }
28 | }, []);
29 |
30 | return [containerRef, endRef];
31 | }
32 |
--------------------------------------------------------------------------------
/lib/utils.ts:
--------------------------------------------------------------------------------
1 | import { type ClassValue, clsx } from "clsx";
2 | import { twMerge } from "tailwind-merge";
3 |
4 | export function cn(...inputs: ClassValue[]) {
5 | return twMerge(clsx(inputs));
6 | }
7 |
8 | export const isMobile = () => {
9 | if (typeof window === "undefined") return false;
10 | const width = window.innerWidth;
11 | return width <= 1024;
12 | };
13 |
14 | export function getCurrentFormattedDate(): string {
15 | const currentDate = new Date();
16 | const options: Intl.DateTimeFormatOptions = {
17 | year: "numeric",
18 | month: "long",
19 | day: "numeric",
20 | hour: "numeric",
21 | minute: "numeric",
22 | hour12: true,
23 | };
24 | return new Intl.DateTimeFormat("en-US", options).format(currentDate);
25 | }
26 |
27 | export function formatTimestamp(timestamp: number): string {
28 | const date = new Date(timestamp);
29 | const dateString = date.toLocaleDateString("en-US", {
30 | year: "numeric",
31 | month: "long",
32 | day: "numeric",
33 | });
34 | const timeString = date.toLocaleTimeString("en-US", {
35 | hour: "numeric",
36 | minute: "2-digit",
37 | hour12: true,
38 | });
39 |
40 | return `${dateString} at ${timeString}`;
41 | }
42 |
--------------------------------------------------------------------------------
/middleware.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 |
3 | export async function middleware(request: NextRequest) {}
4 |
5 | export const config = {
6 | matcher: [
7 | /*
8 | * Match all request paths except for the ones starting with:
9 | * - _next/static (static files)
10 | * - _next/image (image optimization files)
11 | * - favicon.ico (favicon file)
12 | * Feel free to modify this pattern to include more paths.
13 | */
14 | '/((?!_next/static|_next/image|favicon.ico|.*\\.(?:svg|png|jpg|jpeg|gif|webp)$).*)',
15 | ],
16 | };
17 |
--------------------------------------------------------------------------------
/next-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 | ///
3 |
4 | // NOTE: This file should not be edited
5 | // see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
6 |
--------------------------------------------------------------------------------
/next.config.ts:
--------------------------------------------------------------------------------
1 | const nextConfig = {
2 | async rewrites() {
3 | return [
4 | {
5 | source: "/ingest/static/:path*",
6 | destination: "https://us-assets.i.posthog.com/static/:path*",
7 | },
8 | {
9 | source: "/ingest/:path*",
10 | destination: "https://us.i.posthog.com/:path*",
11 | },
12 | {
13 | source: "/ingest/decide",
14 | destination: "https://us.i.posthog.com/decide",
15 | },
16 | {
17 | source: "/api/feedback",
18 | destination: `${process.env.SUPABASE_FUNCTION_URL}/feedback`,
19 | },
20 | {
21 | source: "/api/keys",
22 | destination: `${process.env.SUPABASE_FUNCTION_URL}/keys`,
23 | },
24 | // until Supabase Edge Functions doesn't do `CPU Time exceeded`
25 | // {
26 | // source: "/api/research",
27 | // destination: `${SUPABASE_FUNCTION_URL}/research`,
28 | // },
29 | ];
30 | },
31 | };
32 |
33 | export default nextConfig;
34 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "supa-deep-research",
3 | "private": true,
4 | "scripts": {
5 | "dev": "next dev --turbopack",
6 | "build": "next build",
7 | "start": "next start"
8 | },
9 | "dependencies": {
10 | "@ai-sdk/openai": "^1.1.9",
11 | "@edge-runtime/primitives": "^6.0.0",
12 | "@mendable/firecrawl-js": "^1.16.0",
13 | "@next/third-parties": "^15.1.7",
14 | "@opennextjs/aws": "^3.4.2",
15 | "@opennextjs/cloudflare": "^0.4.8",
16 | "@radix-ui/react-dialog": "^1.1.6",
17 | "@radix-ui/react-select": "^2.1.6",
18 | "@radix-ui/react-slider": "^1.2.3",
19 | "@radix-ui/react-slot": "^1.1.2",
20 | "@radix-ui/react-tooltip": "^1.1.8",
21 | "ai": "^4.1.17",
22 | "autoprefixer": "10.4.17",
23 | "class-variance-authority": "^0.7.0",
24 | "classnames": "^2.5.1",
25 | "framer-motion": "^11.18.1",
26 | "geist": "^1.3.1",
27 | "js-tiktoken": "^1.0.17",
28 | "lodash-es": "^4.17.21",
29 | "lucide-react": "^0.378.0",
30 | "next": "15.1.7",
31 | "next-themes": "^0.4.4",
32 | "p-limit": "^6.2.0",
33 | "postcss": "8.4.33",
34 | "posthog-js": "^1.217.2",
35 | "posthog-node": "^4.5.0",
36 | "react": "19.0.0",
37 | "react-dom": "19.0.0",
38 | "react-markdown": "^9.0.1",
39 | "redaxios": "^0.5.1",
40 | "remark-gfm": "^4.0.0",
41 | "tailwindcss": "3.4.1",
42 | "tailwindcss-animate": "^1.0.7",
43 | "typescript": "5.3.3",
44 | "usehooks-ts": "^3.1.0",
45 | "zod": "^3.24.1"
46 | },
47 | "devDependencies": {
48 | "@tailwindcss/typography": "^0.5.12",
49 | "@types/lodash-es": "^4.17.12",
50 | "@types/node": "20.11.5",
51 | "@types/react": "19.0.8",
52 | "@types/react-dom": "19.0.3",
53 | "clsx": "^2.1.1",
54 | "tailwind-merge": "^2.3.0"
55 | },
56 | "overrides": {
57 | "@types/react": "19.0.8",
58 | "@types/react-dom": "19.0.3"
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/postcss.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | };
7 |
--------------------------------------------------------------------------------
/public/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/supavec/supa-deep-research/8a51cdf82e0e04cf51cfc0c28854a2701ee6a456/public/logo.png
--------------------------------------------------------------------------------
/public/providers/openai.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/supavec/supa-deep-research/8a51cdf82e0e04cf51cfc0c28854a2701ee6a456/public/providers/openai.webp
--------------------------------------------------------------------------------
/supabase/.gitignore:
--------------------------------------------------------------------------------
1 | # Supabase
2 | .branches
3 | .temp
4 | .env
5 |
--------------------------------------------------------------------------------
/supabase/config.toml:
--------------------------------------------------------------------------------
1 | # A string used to distinguish different Supabase projects on the same host. Defaults to the
2 | # working directory name when running `supabase init`.
3 | project_id = "open-deep-research"
4 |
5 | [api]
6 | enabled = true
7 | # Port to use for the API URL.
8 | port = 54321
9 | # Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
10 | # endpoints. `public` is always included.
11 | schemas = ["public", "graphql_public"]
12 | # Extra schemas to add to the search_path of every request. `public` is always included.
13 | extra_search_path = ["public", "extensions"]
14 | # The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
15 | # for accidental or malicious requests.
16 | max_rows = 1000
17 |
18 | [db]
19 | # Port to use for the local database URL.
20 | port = 54322
21 | # Port used by db diff command to initialize the shadow database.
22 | shadow_port = 54320
23 | # The database major version to use. This has to be the same as your remote database's. Run `SHOW
24 | # server_version;` on the remote database to check.
25 | major_version = 15
26 |
27 | [db.pooler]
28 | enabled = false
29 | # Port to use for the local connection pooler.
30 | port = 54329
31 | # Specifies when a server connection can be reused by other clients.
32 | # Configure one of the supported pooler modes: `transaction`, `session`.
33 | pool_mode = "transaction"
34 | # How many server connections to allow per user/database pair.
35 | default_pool_size = 20
36 | # Maximum number of client connections allowed.
37 | max_client_conn = 100
38 |
39 | [realtime]
40 | enabled = true
41 | # Bind realtime via either IPv4 or IPv6. (default: IPv4)
42 | # ip_version = "IPv6"
43 | # The maximum length in bytes of HTTP request headers. (default: 4096)
44 | # max_header_length = 4096
45 |
46 | [studio]
47 | enabled = true
48 | # Port to use for Supabase Studio.
49 | port = 54323
50 | # External URL of the API server that frontend connects to.
51 | api_url = "http://127.0.0.1"
52 | # OpenAI API Key to use for Supabase AI in the Supabase Studio.
53 | openai_api_key = "env(OPENAI_API_KEY)"
54 |
55 | # Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
56 | # are monitored, and you can view the emails that would have been sent from the web interface.
57 | [inbucket]
58 | enabled = true
59 | # Port to use for the email testing server web interface.
60 | port = 54324
61 | # Uncomment to expose additional ports for testing user applications that send emails.
62 | # smtp_port = 54325
63 | # pop3_port = 54326
64 |
65 | [storage]
66 | enabled = true
67 | # The maximum file size allowed (e.g. "5MB", "500KB").
68 | file_size_limit = "50MiB"
69 |
70 | [storage.image_transformation]
71 | enabled = true
72 |
73 | [auth]
74 | enabled = true
75 | # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
76 | # in emails.
77 | site_url = "http://127.0.0.1:3000"
78 | # A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
79 | additional_redirect_urls = ["https://127.0.0.1:3000"]
80 | # How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
81 | jwt_expiry = 3600
82 | # If disabled, the refresh token will never expire.
83 | enable_refresh_token_rotation = true
84 | # Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
85 | # Requires enable_refresh_token_rotation = true.
86 | refresh_token_reuse_interval = 10
87 | # Allow/disallow new user signups to your project.
88 | enable_signup = true
89 | # Allow/disallow anonymous sign-ins to your project.
90 | enable_anonymous_sign_ins = false
91 | # Allow/disallow testing manual linking of accounts
92 | enable_manual_linking = false
93 |
94 | [auth.email]
95 | # Allow/disallow new user signups via email to your project.
96 | enable_signup = true
97 | # If enabled, a user will be required to confirm any email change on both the old, and new email
98 | # addresses. If disabled, only the new email is required to confirm.
99 | double_confirm_changes = true
100 | # If enabled, users need to confirm their email address before signing in.
101 | enable_confirmations = false
102 | # Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email.
103 | max_frequency = "1s"
104 |
105 | # Uncomment to customize email template
106 | # [auth.email.template.invite]
107 | # subject = "You have been invited"
108 | # content_path = "./supabase/templates/invite.html"
109 |
110 | [auth.sms]
111 | # Allow/disallow new user signups via SMS to your project.
112 | enable_signup = true
113 | # If enabled, users need to confirm their phone number before signing in.
114 | enable_confirmations = false
115 | # Template for sending OTP to users
116 | template = "Your code is {{ .Code }} ."
117 | # Controls the minimum amount of time that must pass before sending another sms otp.
118 | max_frequency = "5s"
119 |
120 | # Use pre-defined map of phone number to OTP for testing.
121 | # [auth.sms.test_otp]
122 | # 4152127777 = "123456"
123 |
124 | # This hook runs before a token is issued and allows you to add additional claims based on the authentication method used.
125 | # [auth.hook.custom_access_token]
126 | # enabled = true
127 | # uri = "pg-functions:////"
128 |
129 | # Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
130 | [auth.sms.twilio]
131 | enabled = false
132 | account_sid = ""
133 | message_service_sid = ""
134 | # DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
135 | auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
136 |
137 | # Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
138 | # `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`,
139 | # `twitter`, `slack`, `spotify`, `workos`, `zoom`.
140 | [auth.external.apple]
141 | enabled = false
142 | client_id = ""
143 | # DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
144 | secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
145 | # Overrides the default auth redirectUrl.
146 | redirect_uri = ""
147 | # Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
148 | # or any other third-party OIDC providers.
149 | url = ""
150 | # If enabled, the nonce check will be skipped. Required for local sign in with Google auth.
151 | skip_nonce_check = false
152 |
153 | [analytics]
154 | enabled = false
155 | port = 54327
156 | vector_port = 54328
157 | # Configure one of the supported backends: `postgres`, `bigquery`.
158 | backend = "postgres"
159 |
160 | # Experimental features may be deprecated any time
161 | [experimental]
162 | # Configures Postgres storage engine to use OrioleDB (S3)
163 | orioledb_version = ""
164 | # Configures S3 bucket URL, eg. .s3-.amazonaws.com
165 | s3_host = "env(S3_HOST)"
166 | # Configures S3 bucket region, eg. us-east-1
167 | s3_region = "env(S3_REGION)"
168 | # Configures AWS_ACCESS_KEY_ID for S3 bucket
169 | s3_access_key = "env(S3_ACCESS_KEY)"
170 | # Configures AWS_SECRET_ACCESS_KEY for S3 bucket
171 | s3_secret_key = "env(S3_SECRET_KEY)"
172 |
--------------------------------------------------------------------------------
/supabase/functions/_shared/cors.ts:
--------------------------------------------------------------------------------
1 | export const corsHeaders = {
2 | "Access-Control-Allow-Origin": "*",
3 | "Access-Control-Allow-Headers":
4 | "authorization, x-client-info, apikey, content-type, x-openai-key, x-firecrawl-key",
5 | "Access-Control-Allow-Methods": "POST, OPTIONS",
6 | };
7 |
--------------------------------------------------------------------------------
/supabase/functions/_shared/feedback.ts:
--------------------------------------------------------------------------------
1 | import { AIModel } from "./types.ts";
2 |
3 | interface FeedbackParams {
4 | query: string;
5 | numQuestions: number;
6 | modelId: AIModel;
7 | apiKey?: string;
8 | }
9 |
10 | export async function generateFeedback({
11 | query,
12 | numQuestions,
13 | modelId,
14 | apiKey,
15 | }: FeedbackParams): Promise {
16 | const systemPrompt =
17 | `You are a helpful AI assistant that generates insightful follow-up questions based on a given query or statement. Your questions should:
18 | - Be relevant to the original query
19 | - Encourage deeper thinking
20 | - Be clear and concise
21 | - Avoid yes/no questions
22 | - Each be unique and explore different aspects`;
23 |
24 | const userPrompt =
25 | `Generate ${numQuestions} insightful follow-up questions for this query: "${query}"`;
26 |
27 | const response = await fetch("https://api.openai.com/v1/chat/completions", {
28 | method: "POST",
29 | headers: {
30 | "Content-Type": "application/json",
31 | Authorization: `Bearer ${apiKey}`,
32 | },
33 | body: JSON.stringify({
34 | model: "gpt-3.5-turbo",
35 | messages: [
36 | { role: "system", content: systemPrompt },
37 | { role: "user", content: userPrompt },
38 | ],
39 | temperature: 0.7,
40 | }),
41 | });
42 |
43 | if (!response.ok) {
44 | const error = await response.json();
45 | throw new Error(`OpenAI API error: ${JSON.stringify(error)}`);
46 | }
47 |
48 | const data = await response.json();
49 | const content = data.choices[0]?.message?.content;
50 |
51 | if (!content) {
52 | throw new Error("No content received from OpenAI");
53 | }
54 |
55 | // Split the content into individual questions and clean them up
56 | const questions = content
57 | .split(/\d+\.\s+/)
58 | .filter(Boolean)
59 | .map((q: string) => q.trim());
60 |
61 | return questions;
62 | }
63 |
--------------------------------------------------------------------------------
/supabase/functions/_shared/types.ts:
--------------------------------------------------------------------------------
1 | export type AIModel = "o3-mini" | string;
2 |
--------------------------------------------------------------------------------
/supabase/functions/feedback/index.ts:
--------------------------------------------------------------------------------
1 | import { corsHeaders } from "../_shared/cors.ts";
2 | import { AIModel } from "../_shared/types.ts";
3 | import { getCookies } from "jsr:@std/http/cookie";
4 |
5 | import { generateFeedback } from "../_shared/feedback.ts";
6 |
7 | Deno.serve(async (req) => {
8 | // Handle CORS preflight requests
9 | if (req.method === "OPTIONS") {
10 | return new Response("ok", { headers: corsHeaders });
11 | }
12 |
13 | try {
14 | const { query, numQuestions, modelId = "o3-mini" } = await req.json();
15 |
16 | const cookies = getCookies(req.headers);
17 |
18 | // Get API keys from cookies
19 | const openaiKey = cookies["openai-key"];
20 | const firecrawlKey = cookies["firecrawl-key"];
21 |
22 | // Add API key validation
23 | if (Deno.env.get("ENABLE_API_KEYS") === "true") {
24 | if (!openaiKey || !firecrawlKey) {
25 | return new Response(
26 | JSON.stringify({
27 | error: "API keys are required but not provided",
28 | }),
29 | {
30 | status: 401,
31 | headers: { ...corsHeaders, "Content-Type": "application/json" },
32 | },
33 | );
34 | }
35 | }
36 |
37 | console.log("\n🔍 [FEEDBACK FUNCTION] === Request Started ===");
38 | console.log("Query:", query);
39 | console.log("Model ID:", modelId);
40 | console.log("Number of Questions:", numQuestions);
41 | console.log("API Keys Present:", {
42 | OpenAI: openaiKey ? "✅" : "❌",
43 | FireCrawl: firecrawlKey ? "✅" : "❌",
44 | });
45 |
46 | try {
47 | const questions = await generateFeedback({
48 | query,
49 | numQuestions,
50 | modelId: modelId as AIModel,
51 | apiKey: openaiKey ?? undefined,
52 | });
53 |
54 | console.log("\n✅ [FEEDBACK FUNCTION] === Success ===");
55 | console.log("Generated Questions:", questions);
56 | console.log("Number of Questions Generated:", questions.length);
57 |
58 | return new Response(
59 | JSON.stringify({ questions }),
60 | {
61 | headers: { ...corsHeaders, "Content-Type": "application/json" },
62 | },
63 | );
64 | } catch (error) {
65 | console.error("\n❌ [FEEDBACK FUNCTION] === Generation Error ===");
66 | console.error("Error:", error);
67 | throw error;
68 | }
69 | } catch (error) {
70 | console.error("\n💥 [FEEDBACK FUNCTION] === Function Error ===");
71 | console.error("Error:", error);
72 |
73 | return new Response(
74 | JSON.stringify({
75 | error: "Feedback generation failed",
76 | details: error instanceof Error ? error.message : String(error),
77 | }),
78 | {
79 | status: 500,
80 | headers: { ...corsHeaders, "Content-Type": "application/json" },
81 | },
82 | );
83 | }
84 | });
85 |
--------------------------------------------------------------------------------
/supabase/functions/keys/index.ts:
--------------------------------------------------------------------------------
1 | import { corsHeaders } from "../_shared/cors.ts";
2 | import { getCookies, setCookie } from "jsr:@std/http/cookie";
3 |
4 | Deno.serve(async (req) => {
5 | // Handle CORS preflight requests
6 | if (req.method === "OPTIONS") {
7 | return new Response(null, {
8 | headers: corsHeaders,
9 | });
10 | }
11 |
12 | if (req.method === "GET") {
13 | const cookies = getCookies(req.headers);
14 |
15 | // Get API keys from cookies
16 | const openaiKey = cookies["openai-key"];
17 | const firecrawlKey = cookies["firecrawl-key"];
18 | const keysPresent = Boolean(openaiKey && firecrawlKey);
19 |
20 | return new Response(
21 | JSON.stringify({ keysPresent }),
22 | {
23 | headers: { "Content-Type": "application/json" },
24 | },
25 | );
26 | }
27 |
28 | if (req.method === "POST") {
29 | try {
30 | const { openaiKey, firecrawlKey } = await req.json();
31 |
32 | const response = new Response(JSON.stringify({ success: true }), {
33 | headers: { "Content-Type": "application/json" },
34 | });
35 |
36 | // Set cookies
37 | setCookie(response.headers, {
38 | name: "openai-key",
39 | value: openaiKey,
40 | httpOnly: true,
41 | secure: true,
42 | path: "/",
43 | sameSite: "Strict",
44 | });
45 | setCookie(response.headers, {
46 | name: "firecrawl-key",
47 | value: firecrawlKey,
48 | httpOnly: true,
49 | secure: true,
50 | path: "/",
51 | sameSite: "Strict",
52 | });
53 |
54 | return response;
55 | } catch (error) {
56 | console.error(error);
57 | return new Response(
58 | JSON.stringify({ error: "Failed to set API keys" }),
59 | {
60 | status: 500,
61 | headers: { "Content-Type": "application/json" },
62 | },
63 | );
64 | }
65 | }
66 |
67 | if (req.method === "DELETE") {
68 | try {
69 | const headers = new Headers({
70 | ...corsHeaders,
71 | "Content-Type": "application/json",
72 | });
73 |
74 | // Delete cookies with the same attributes as when setting them
75 | setCookie(headers, {
76 | name: "openai-key",
77 | value: "",
78 | httpOnly: true,
79 | secure: true,
80 | path: "/",
81 | sameSite: "Strict",
82 | maxAge: 0,
83 | expires: new Date(0),
84 | });
85 | setCookie(headers, {
86 | name: "firecrawl-key",
87 | value: "",
88 | httpOnly: true,
89 | secure: true,
90 | path: "/",
91 | sameSite: "Strict",
92 | maxAge: 0,
93 | expires: new Date(0),
94 | });
95 |
96 | return new Response(JSON.stringify({ success: true }), {
97 | headers,
98 | });
99 | } catch (error) {
100 | console.error(error);
101 | return new Response(
102 | JSON.stringify({ error: "Failed to remove API keys" }),
103 | {
104 | status: 500,
105 | headers: { ...corsHeaders, "Content-Type": "application/json" },
106 | },
107 | );
108 | }
109 | }
110 |
111 | return new Response(
112 | JSON.stringify({ error: "Method not allowed" }),
113 | {
114 | status: 405,
115 | headers: { "Content-Type": "application/json" },
116 | },
117 | );
118 | });
119 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/ai/providers.ts:
--------------------------------------------------------------------------------
1 | import { createOpenAI } from "npm:@ai-sdk/openai";
2 | import { getEncoding } from "npm:js-tiktoken";
3 | import process from "node:process";
4 |
5 | import { RecursiveCharacterTextSplitter } from "./text-splitter.ts";
6 |
7 | // Model Display Information
8 | export const AI_MODEL_DISPLAY = {
9 | "gpt-4o": {
10 | id: "gpt-4o",
11 | name: "GPT-4o",
12 | logo: "/providers/openai.webp",
13 | vision: true,
14 | },
15 | "gpt-4o-mini": {
16 | id: "gpt-4o-mini",
17 | name: "GPT-4o mini",
18 | logo: "/providers/openai.webp",
19 | vision: true,
20 | },
21 | "o3-mini": {
22 | id: "o3-mini",
23 | name: "o3 mini",
24 | logo: "/providers/openai.webp",
25 | vision: false,
26 | },
27 | } as const;
28 |
29 | export type AIModel = keyof typeof AI_MODEL_DISPLAY;
30 | export type AIModelDisplayInfo = (typeof AI_MODEL_DISPLAY)[AIModel];
31 | export const availableModels = Object.values(AI_MODEL_DISPLAY);
32 |
33 | // Create model instances with configurations
34 | export function createModel(modelId: AIModel, apiKey?: string) {
35 | const client = createOpenAI({
36 | apiKey: apiKey || process.env.OPENAI_KEY!,
37 | });
38 |
39 | return client(modelId, {
40 | structuredOutputs: true,
41 | ...(modelId === "o3-mini" ? { reasoningEffort: "medium" } : {}),
42 | });
43 | }
44 |
45 | // Token handling
46 | const MinChunkSize = 140;
47 | const encoder = getEncoding("o200k_base");
48 |
49 | // trim prompt to maximum context size
50 | export function trimPrompt(prompt: string, contextSize = 120_000) {
51 | if (!prompt) {
52 | return "";
53 | }
54 |
55 | const length = encoder.encode(prompt).length;
56 | if (length <= contextSize) {
57 | return prompt;
58 | }
59 |
60 | const overflowTokens = length - contextSize;
61 | // on average it's 3 characters per token, so multiply by 3 to get a rough estimate of the number of characters
62 | const chunkSize = prompt.length - overflowTokens * 3;
63 | if (chunkSize < MinChunkSize) {
64 | return prompt.slice(0, MinChunkSize);
65 | }
66 |
67 | const splitter = new RecursiveCharacterTextSplitter({
68 | chunkSize,
69 | chunkOverlap: 0,
70 | });
71 | const trimmedPrompt = splitter.splitText(prompt)[0] ?? "";
72 |
73 | // last catch, there's a chance that the trimmed prompt is same length as the original prompt, due to how tokens are split & innerworkings of the splitter, handle this case by just doing a hard cut
74 | if (trimmedPrompt.length === prompt.length) {
75 | return trimPrompt(prompt.slice(0, chunkSize), contextSize);
76 | }
77 |
78 | // recursively trim until the prompt is within the context size
79 | return trimPrompt(trimmedPrompt, contextSize);
80 | }
81 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/ai/text-splitter.test.ts:
--------------------------------------------------------------------------------
1 | import assert from "node:assert";
2 | import { describe, it } from "node:test";
3 |
4 | import { RecursiveCharacterTextSplitter } from "./text-splitter.ts";
5 |
6 | describe("RecursiveCharacterTextSplitter", () => {
7 | it("Should correctly split text by separators", () => {
8 | const splitter = new RecursiveCharacterTextSplitter({
9 | chunkSize: 50,
10 | chunkOverlap: 10,
11 | });
12 | assert.deepEqual(
13 | splitter.splitText(
14 | "Hello world, this is a test of the recursive text splitter.",
15 | ),
16 | ["Hello world", "this is a test of the recursive text splitter"],
17 | );
18 |
19 | splitter.chunkSize = 100;
20 | assert.deepEqual(
21 | splitter.splitText(
22 | "Hello world, this is a test of the recursive text splitter. If I have a period, it should split along the period.",
23 | ),
24 | [
25 | "Hello world, this is a test of the recursive text splitter",
26 | "If I have a period, it should split along the period.",
27 | ],
28 | );
29 |
30 | splitter.chunkSize = 110;
31 | assert.deepEqual(
32 | splitter.splitText(
33 | "Hello world, this is a test of the recursive text splitter. If I have a period, it should split along the period.\nOr, if there is a new line, it should prioritize splitting on new lines instead.",
34 | ),
35 | [
36 | "Hello world, this is a test of the recursive text splitter",
37 | "If I have a period, it should split along the period.",
38 | "Or, if there is a new line, it should prioritize splitting on new lines instead.",
39 | ],
40 | );
41 | });
42 |
43 | it("Should handle empty string", () => {
44 | const splitter = new RecursiveCharacterTextSplitter({
45 | chunkSize: 50,
46 | chunkOverlap: 10,
47 | });
48 | assert.deepEqual(splitter.splitText(""), []);
49 | });
50 | });
51 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/ai/text-splitter.ts:
--------------------------------------------------------------------------------
1 | interface TextSplitterParams {
2 | chunkSize: number;
3 |
4 | chunkOverlap: number;
5 | }
6 |
7 | abstract class TextSplitter implements TextSplitterParams {
8 | chunkSize = 1000;
9 | chunkOverlap = 200;
10 |
11 | constructor(fields?: Partial) {
12 | this.chunkSize = fields?.chunkSize ?? this.chunkSize;
13 | this.chunkOverlap = fields?.chunkOverlap ?? this.chunkOverlap;
14 | if (this.chunkOverlap >= this.chunkSize) {
15 | throw new Error('Cannot have chunkOverlap >= chunkSize');
16 | }
17 | }
18 |
19 | abstract splitText(text: string): string[];
20 |
21 | createDocuments(texts: string[]): string[] {
22 | const documents: string[] = [];
23 | for (let i = 0; i < texts.length; i += 1) {
24 | const text = texts[i];
25 | for (const chunk of this.splitText(text!)) {
26 | documents.push(chunk);
27 | }
28 | }
29 | return documents;
30 | }
31 |
32 | splitDocuments(documents: string[]): string[] {
33 | return this.createDocuments(documents);
34 | }
35 |
36 | private joinDocs(docs: string[], separator: string): string | null {
37 | const text = docs.join(separator).trim();
38 | return text === '' ? null : text;
39 | }
40 |
41 | mergeSplits(splits: string[], separator: string): string[] {
42 | const docs: string[] = [];
43 | const currentDoc: string[] = [];
44 | let total = 0;
45 | for (const d of splits) {
46 | const _len = d.length;
47 | if (total + _len >= this.chunkSize) {
48 | if (total > this.chunkSize) {
49 | console.warn(
50 | `Created a chunk of size ${total}, +
51 | which is longer than the specified ${this.chunkSize}`,
52 | );
53 | }
54 | if (currentDoc.length > 0) {
55 | const doc = this.joinDocs(currentDoc, separator);
56 | if (doc !== null) {
57 | docs.push(doc);
58 | }
59 | // Keep on popping if:
60 | // - we have a larger chunk than in the chunk overlap
61 | // - or if we still have any chunks and the length is long
62 | while (
63 | total > this.chunkOverlap ||
64 | (total + _len > this.chunkSize && total > 0)
65 | ) {
66 | total -= currentDoc[0]!.length;
67 | currentDoc.shift();
68 | }
69 | }
70 | }
71 | currentDoc.push(d);
72 | total += _len;
73 | }
74 | const doc = this.joinDocs(currentDoc, separator);
75 | if (doc !== null) {
76 | docs.push(doc);
77 | }
78 | return docs;
79 | }
80 | }
81 |
82 | export interface RecursiveCharacterTextSplitterParams
83 | extends TextSplitterParams {
84 | separators: string[];
85 | }
86 |
87 | export class RecursiveCharacterTextSplitter
88 | extends TextSplitter
89 | implements RecursiveCharacterTextSplitterParams
90 | {
91 | separators: string[] = ['\n\n', '\n', '.', ',', '>', '<', ' ', ''];
92 |
93 | constructor(fields?: Partial) {
94 | super(fields);
95 | this.separators = fields?.separators ?? this.separators;
96 | }
97 |
98 | splitText(text: string): string[] {
99 | const finalChunks: string[] = [];
100 |
101 | // Get appropriate separator to use
102 | let separator: string = this.separators[this.separators.length - 1]!;
103 | for (const s of this.separators) {
104 | if (s === '') {
105 | separator = s;
106 | break;
107 | }
108 | if (text.includes(s)) {
109 | separator = s;
110 | break;
111 | }
112 | }
113 |
114 | // Now that we have the separator, split the text
115 | let splits: string[];
116 | if (separator) {
117 | splits = text.split(separator);
118 | } else {
119 | splits = text.split('');
120 | }
121 |
122 | // Now go merging things, recursively splitting longer texts.
123 | let goodSplits: string[] = [];
124 | for (const s of splits) {
125 | if (s.length < this.chunkSize) {
126 | goodSplits.push(s);
127 | } else {
128 | if (goodSplits.length) {
129 | const mergedText = this.mergeSplits(goodSplits, separator);
130 | finalChunks.push(...mergedText);
131 | goodSplits = [];
132 | }
133 | const otherInfo = this.splitText(s);
134 | finalChunks.push(...otherInfo);
135 | }
136 | }
137 | if (goodSplits.length) {
138 | const mergedText = this.mergeSplits(goodSplits, separator);
139 | finalChunks.push(...mergedText);
140 | }
141 | return finalChunks;
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/deep-research.ts:
--------------------------------------------------------------------------------
1 | import FirecrawlApp, { SearchResponse } from "npm:@mendable/firecrawl-js";
2 | import { generateObject } from "npm:ai";
3 | import { compact } from "npm:lodash-es";
4 | import { z } from "npm:zod";
5 | import process from "node:process";
6 |
7 | import { createModel, trimPrompt } from "./ai/providers.ts";
8 | import { systemPrompt } from "./prompt.ts";
9 |
10 | type ResearchResult = {
11 | learnings: string[];
12 | visitedUrls: string[];
13 | };
14 |
15 | type DeepResearchOptions = {
16 | query: string;
17 | breadth?: number;
18 | depth?: number;
19 | learnings?: string[];
20 | visitedUrls?: string[];
21 | onProgress?: (update: string) => Promise;
22 | model: ReturnType;
23 | firecrawlKey?: string;
24 | };
25 |
26 | // Update the firecrawl initialization to use the provided key
27 | const getFirecrawl = (apiKey?: string) =>
28 | new FirecrawlApp({
29 | apiKey: apiKey ?? process.env.FIRECRAWL_KEY ?? "",
30 | apiUrl: process.env.FIRECRAWL_BASE_URL,
31 | });
32 |
33 | // Helper function to format progress messages consistently
34 | const formatProgress = {
35 | generating: (count: number, query: string) =>
36 | `Generating up to ${count} SERP queries\n${query}`,
37 |
38 | created: (count: number, queries: string) =>
39 | `Created ${count} SERP queries\n${queries}`,
40 |
41 | researching: (query: string) => `Researching\n${query}`,
42 |
43 | found: (count: number, query: string) => `Found ${count} results\n${query}`,
44 |
45 | ran: (query: string, count: number) =>
46 | `Ran "${query}"\n${count} content items found`,
47 |
48 | generated: (count: number, query: string) =>
49 | `Generated ${count} learnings\n${query}`,
50 | };
51 |
52 | // Helper function to log and stream messages
53 | async function logProgress(
54 | message: string,
55 | onProgress?: (update: string) => Promise,
56 | ) {
57 | if (onProgress) {
58 | await onProgress(message);
59 | }
60 | }
61 |
62 | // take en user query, return a list of SERP queries
63 | async function generateSerpQueries({
64 | query,
65 | numQueries = 3,
66 | learnings,
67 | onProgress,
68 | model,
69 | }: {
70 | query: string;
71 | numQueries?: number;
72 |
73 | // optional, if provided, the research will continue from the last learning
74 | learnings?: string[];
75 | onProgress?: (update: string) => Promise;
76 | model: ReturnType;
77 | }) {
78 | await logProgress(formatProgress.generating(numQueries, query), onProgress);
79 |
80 | const res = await generateObject({
81 | model,
82 | system: systemPrompt(),
83 | prompt:
84 | `Given the following prompt from the user, generate a list of SERP queries to research the topic. Return a maximum of ${numQueries} queries, but feel free to return less if the original prompt is clear. Make sure each query is unique and not similar to each other: ${query} \n\n${
85 | learnings
86 | ? `Here are some learnings from previous research, use them to generate more specific queries: ${
87 | learnings.join(
88 | "\n",
89 | )
90 | }`
91 | : ""
92 | }`,
93 | schema: z.object({
94 | queries: z
95 | .array(
96 | z.object({
97 | query: z.string().describe("The SERP query"),
98 | researchGoal: z
99 | .string()
100 | .describe(
101 | "First talk about the goal of the research that this query is meant to accomplish, then go deeper into how to advance the research once the results are found, mention additional research directions. Be as specific as possible, especially for additional research directions.",
102 | ),
103 | }),
104 | )
105 | .describe(`List of SERP queries, max of ${numQueries}`),
106 | }),
107 | });
108 |
109 | const queriesList = res.object.queries.map((q) => q.query).join(", ");
110 | await logProgress(
111 | formatProgress.created(res.object.queries.length, queriesList),
112 | onProgress,
113 | );
114 |
115 | return res.object.queries.slice(0, numQueries).map((q) => q.query);
116 | }
117 |
118 | async function processSerpResult({
119 | query,
120 | result,
121 | numLearnings = 3,
122 | numFollowUpQuestions = 3,
123 | onProgress,
124 | model,
125 | }: {
126 | query: string;
127 | result: SearchResponse;
128 | numLearnings?: number;
129 | numFollowUpQuestions?: number;
130 | onProgress?: (update: string) => Promise;
131 | model: ReturnType;
132 | }) {
133 | const contents = compact(result.data.map((item) => item.markdown)).map(
134 | (content) => trimPrompt(content, 25_000),
135 | );
136 |
137 | await logProgress(formatProgress.ran(query, contents.length), onProgress);
138 |
139 | const res = await generateObject({
140 | model,
141 | abortSignal: AbortSignal.timeout(60_000),
142 | system: systemPrompt(),
143 | prompt:
144 | `Given the following contents from a SERP search for the query ${query} , generate a list of learnings from the contents. Return a maximum of ${numLearnings} learnings, but feel free to return less if the contents are clear. Make sure each learning is unique and not similar to each other. The learnings should be concise and to the point, as detailed and information dense as possible. Make sure to include any entities like people, places, companies, products, things, etc in the learnings, as well as any exact metrics, numbers, or dates. The learnings will be used to research the topic further.\n\n${
145 | contents
146 | .map((content) => `\n${content}\n `)
147 | .join("\n")
148 | } `,
149 | schema: z.object({
150 | learnings: z
151 | .array(z.string())
152 | .describe(`List of learnings, max of ${numLearnings}`),
153 | followUpQuestions: z
154 | .array(z.string())
155 | .describe(
156 | `List of follow-up questions to research the topic further, max of ${numFollowUpQuestions}`,
157 | ),
158 | }),
159 | });
160 |
161 | await logProgress(
162 | formatProgress.generated(res.object.learnings.length, query),
163 | onProgress,
164 | );
165 |
166 | return res.object;
167 | }
168 |
169 | export async function writeFinalReport({
170 | prompt,
171 | learnings,
172 | visitedUrls,
173 | model,
174 | }: {
175 | prompt: string;
176 | learnings: string[];
177 | visitedUrls: string[];
178 | model: ReturnType;
179 | }) {
180 | const learningsString = trimPrompt(
181 | learnings
182 | .map((learning) => `\n${learning}\n `)
183 | .join("\n"),
184 | 150_000,
185 | );
186 |
187 | const res = await generateObject({
188 | model,
189 | system: systemPrompt(),
190 | prompt:
191 | `Given the following prompt from the user, write a final report on the topic using the learnings from research and format it in proper Markdown. Use Markdown syntax (headings, lists, horizontal rules, etc.) to structure the document. Aim for a detailed report of at least 3 pages.\n\n${prompt} \n\nHere are all the learnings from previous research:\n\n\n${learningsString}\n `,
192 | schema: z.object({
193 | reportMarkdown: z
194 | .string()
195 | .describe("Final report on the topic in Markdown"),
196 | }),
197 | });
198 |
199 | // Append the visited URLs as a markdown formatted Sources section
200 | const urlsSection = `\n\n## Sources\n\n${
201 | visitedUrls
202 | .map((url) => `- ${url}`)
203 | .join("\n")
204 | }`;
205 |
206 | // Prepend a primary markdown heading to make sure the UI renders it as markdown
207 | return `# Research Report\n\n${res.object.reportMarkdown}${urlsSection}`;
208 | }
209 |
210 | export async function deepResearch({
211 | query,
212 | breadth = 3,
213 | depth = 2,
214 | learnings = [],
215 | visitedUrls = [],
216 | onProgress,
217 | model,
218 | firecrawlKey,
219 | }: DeepResearchOptions): Promise {
220 | const firecrawl = getFirecrawl(firecrawlKey);
221 | const results: ResearchResult[] = [];
222 |
223 | // Generate SERP queries
224 | await logProgress(formatProgress.generating(breadth, query), onProgress);
225 |
226 | const serpQueries = await generateSerpQueries({
227 | query,
228 | learnings,
229 | numQueries: breadth,
230 | onProgress,
231 | model,
232 | });
233 |
234 | await logProgress(
235 | formatProgress.created(serpQueries.length, serpQueries.join(", ")),
236 | onProgress,
237 | );
238 |
239 | // Process each SERP query
240 | for (const serpQuery of serpQueries) {
241 | try {
242 | await logProgress(formatProgress.researching(serpQuery), onProgress);
243 |
244 | const searchResults = await firecrawl.search(serpQuery, {
245 | timeout: 15000,
246 | limit: 5,
247 | scrapeOptions: { formats: ["markdown"] },
248 | });
249 |
250 | await logProgress(
251 | formatProgress.found(searchResults.data.length, serpQuery),
252 | onProgress,
253 | );
254 |
255 | if (searchResults.data.length > 0) {
256 | await logProgress(
257 | formatProgress.ran(serpQuery, searchResults.data.length),
258 | onProgress,
259 | );
260 |
261 | const newLearnings = await processSerpResult({
262 | query: serpQuery,
263 | result: searchResults,
264 | numLearnings: Math.ceil(breadth / 2),
265 | numFollowUpQuestions: Math.ceil(breadth / 2),
266 | onProgress,
267 | model,
268 | });
269 |
270 | await logProgress(
271 | formatProgress.generated(newLearnings.learnings.length, serpQuery),
272 | onProgress,
273 | );
274 |
275 | results.push({
276 | learnings: newLearnings.learnings,
277 | visitedUrls: searchResults.data
278 | .map((r) => r.url)
279 | .filter((url): url is string => url != null),
280 | });
281 | }
282 | } catch (e) {
283 | console.error(`Error running query: ${serpQuery}: `, e);
284 | await logProgress(`Error running "${serpQuery}": ${e}`, onProgress);
285 | results.push({
286 | learnings: [],
287 | visitedUrls: [],
288 | });
289 | }
290 | }
291 |
292 | return {
293 | learnings: Array.from(new Set(results.flatMap((r) => r.learnings))),
294 | visitedUrls: Array.from(new Set(results.flatMap((r) => r.visitedUrls))),
295 | };
296 | }
297 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/feedback.ts:
--------------------------------------------------------------------------------
1 | import { generateObject } from "npm:ai";
2 | import { z } from "npm:zod";
3 |
4 | import { type AIModel, createModel } from "./ai/providers.ts";
5 | import { systemPrompt } from "./prompt.ts";
6 |
7 | export async function generateFeedback({
8 | query,
9 | numQuestions = 3,
10 | modelId = "o3-mini",
11 | apiKey,
12 | }: {
13 | query: string;
14 | numQuestions?: number;
15 | modelId?: AIModel;
16 | apiKey?: string;
17 | }) {
18 | const model = createModel(modelId, apiKey);
19 |
20 | const userFeedback = await generateObject({
21 | model,
22 | system: systemPrompt(),
23 | prompt:
24 | `Given the following query from the user, ask some follow up questions to clarify the research direction. Return a maximum of ${numQuestions} questions, but feel free to return less if the original query is clear: ${query} `,
25 | schema: z.object({
26 | questions: z
27 | .array(z.string())
28 | .describe(
29 | `Follow up questions to clarify the research direction, max of ${numQuestions}`,
30 | ),
31 | }),
32 | });
33 |
34 | return userFeedback.object.questions.slice(0, numQuestions);
35 | }
36 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/index.ts:
--------------------------------------------------------------------------------
1 | export { deepResearch } from "./deep-research.ts";
2 | export { generateFeedback } from "./feedback.ts";
3 | export { writeFinalReport } from "./deep-research.ts";
4 | export { systemPrompt } from "./prompt.ts";
5 |
--------------------------------------------------------------------------------
/supabase/functions/research/deep-research/prompt.ts:
--------------------------------------------------------------------------------
1 | export const systemPrompt = () => {
2 | const now = new Date().toISOString();
3 | return `You are an expert researcher. Today is ${now}. Follow these instructions when responding:
4 | - You may be asked to research subjects that is after your knowledge cutoff, assume the user is right when presented with news.
5 | - The user is a highly experienced analyst, no need to simplify it, be as detailed as possible and make sure your response is correct.
6 | - Be highly organized.
7 | - Suggest solutions that I didn't think about.
8 | - Be proactive and anticipate my needs.
9 | - Treat me as an expert in all subject matter.
10 | - Mistakes erode my trust, so be accurate and thorough.
11 | - Provide detailed explanations, I'm comfortable with lots of detail.
12 | - Value good arguments over authorities, the source is irrelevant.
13 | - Consider new technologies and contrarian ideas, not just the conventional wisdom.
14 | - You may use high levels of speculation or prediction, just flag it for me.`;
15 | };
16 |
--------------------------------------------------------------------------------
/supabase/functions/research/index.ts:
--------------------------------------------------------------------------------
1 | import process from "node:process";
2 | import {
3 | deepResearch,
4 | generateFeedback,
5 | writeFinalReport,
6 | } from "./deep-research/index.ts";
7 | import { type AIModel, createModel } from "./deep-research/ai/providers.ts";
8 | import { getCookies } from "jsr:@std/http/cookie";
9 |
10 | Deno.serve(async (req) => {
11 | try {
12 | const {
13 | query,
14 | breadth = 3,
15 | depth = 2,
16 | modelId = "o3-mini",
17 | } = await req.json();
18 |
19 | // Retrieve API keys from secure cookies
20 | const cookies = getCookies(req.headers);
21 |
22 | // Get API keys from cookies
23 | const openaiKey = cookies["openai-key"];
24 | const firecrawlKey = cookies["firecrawl-key"];
25 |
26 | // Add API key validation
27 | if (process.env.NEXT_PUBLIC_ENABLE_API_KEYS === "true") {
28 | if (!openaiKey || !firecrawlKey) {
29 | return Response.json(
30 | { error: "API keys are required but not provided" },
31 | { status: 401 },
32 | );
33 | }
34 | }
35 |
36 | console.log("\n🔬 [RESEARCH ROUTE] === Request Started ===");
37 | console.log("Query:", query);
38 | console.log("Model ID:", modelId);
39 | console.log("Configuration:", {
40 | breadth,
41 | depth,
42 | });
43 | console.log("API Keys Present:", {
44 | OpenAI: openaiKey ? "✅" : "❌",
45 | FireCrawl: firecrawlKey ? "✅" : "❌",
46 | });
47 |
48 | try {
49 | const model = createModel(modelId as AIModel, openaiKey);
50 | console.log("\n🤖 [RESEARCH ROUTE] === Model Created ===");
51 | console.log("Using Model:", modelId);
52 |
53 | const encoder = new TextEncoder();
54 | const stream = new TransformStream();
55 | const writer = stream.writable.getWriter();
56 |
57 | // Helper function to write and flush
58 | // deno-lint-ignore no-explicit-any
59 | const writeAndFlush = async (data: Record) => {
60 | const encoded = encoder.encode(`data: ${JSON.stringify(data)}\n\n`);
61 | await writer.write(encoded);
62 | };
63 |
64 | (async () => {
65 | try {
66 | console.log("\n🚀 [RESEARCH ROUTE] === Research Started ===");
67 |
68 | const feedbackQuestions = await generateFeedback({
69 | query,
70 | apiKey: openaiKey,
71 | });
72 | await writeAndFlush({
73 | type: "progress",
74 | step: {
75 | type: "query",
76 | content: "Generated feedback questions",
77 | },
78 | });
79 |
80 | const { learnings, visitedUrls } = await deepResearch({
81 | query,
82 | breadth,
83 | depth,
84 | model,
85 | firecrawlKey,
86 | onProgress: async (update: string) => {
87 | console.log("\n📊 [RESEARCH ROUTE] Progress Update:", update);
88 | await writeAndFlush({
89 | type: "progress",
90 | step: {
91 | type: "research",
92 | content: update,
93 | },
94 | });
95 | },
96 | });
97 |
98 | const report = await writeFinalReport({
99 | prompt: query,
100 | learnings,
101 | visitedUrls,
102 | model,
103 | });
104 |
105 | await writeAndFlush({
106 | type: "result",
107 | feedbackQuestions,
108 | learnings,
109 | visitedUrls,
110 | report,
111 | });
112 | } catch (error) {
113 | console.error("\n❌ [RESEARCH ROUTE] === Research Process Error ===");
114 | console.error("Error:", error);
115 | await writeAndFlush({
116 | type: "error",
117 | message: "Research failed",
118 | });
119 | } finally {
120 | await writer.close();
121 | }
122 | })();
123 |
124 | return new Response(stream.readable, {
125 | headers: {
126 | "Content-Type": "text/event-stream",
127 | "Cache-Control": "no-cache, no-transform",
128 | Connection: "keep-alive",
129 | "X-Accel-Buffering": "no",
130 | "Transfer-Encoding": "chunked",
131 | },
132 | });
133 | } catch (error) {
134 | console.error("\n💥 [RESEARCH ROUTE] === Route Error ===");
135 | console.error("Error:", error);
136 | return Response.json({ error: "Research failed" }, { status: 500 });
137 | }
138 | } catch (error) {
139 | console.error("\n💥 [RESEARCH ROUTE] === Parse Error ===");
140 | console.error("Error:", error);
141 | return Response.json({ error: "Research failed" }, { status: 500 });
142 | }
143 | });
144 |
--------------------------------------------------------------------------------
/supabase/seed.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/supavec/supa-deep-research/8a51cdf82e0e04cf51cfc0c28854a2701ee6a456/supabase/seed.sql
--------------------------------------------------------------------------------
/tailwind.config.ts:
--------------------------------------------------------------------------------
1 | import type { Config } from 'tailwindcss';
2 |
3 | const config = {
4 | darkMode: ['class'],
5 | content: [
6 | './pages/**/*.{ts,tsx}',
7 | './components/**/*.{ts,tsx}',
8 | './app/**/*.{ts,tsx}',
9 | './src/**/*.{ts,tsx}',
10 | ],
11 | theme: {
12 | container: {
13 | center: true,
14 | padding: '2rem',
15 | screens: {
16 | '2xl': '1400px',
17 | },
18 | },
19 | extend: {
20 | colors: {
21 | border: 'hsl(var(--border))',
22 | input: 'hsl(var(--input))',
23 | ring: 'hsl(var(--ring))',
24 | background: 'hsl(var(--background))',
25 | foreground: 'hsl(var(--foreground))',
26 | primary: {
27 | DEFAULT: 'hsl(var(--primary))',
28 | foreground: 'hsl(var(--primary-foreground))',
29 | },
30 | secondary: {
31 | DEFAULT: 'hsl(var(--secondary))',
32 | foreground: 'hsl(var(--secondary-foreground))',
33 | },
34 | destructive: {
35 | DEFAULT: 'hsl(var(--destructive))',
36 | foreground: 'hsl(var(--destructive-foreground))',
37 | },
38 | muted: {
39 | DEFAULT: 'hsl(var(--muted))',
40 | foreground: 'hsl(var(--muted-foreground))',
41 | },
42 | accent: {
43 | DEFAULT: 'hsl(var(--accent))',
44 | foreground: 'hsl(var(--accent-foreground))',
45 | },
46 | popover: {
47 | DEFAULT: 'hsl(var(--popover))',
48 | foreground: 'hsl(var(--popover-foreground))',
49 | },
50 | card: {
51 | DEFAULT: 'hsl(var(--card))',
52 | foreground: 'hsl(var(--card-foreground))',
53 | },
54 | },
55 | borderRadius: {
56 | lg: 'var(--radius)',
57 | md: 'calc(var(--radius) - 2px)',
58 | sm: 'calc(var(--radius) - 4px)',
59 | },
60 | keyframes: {
61 | 'accordion-down': {
62 | from: { height: '0' },
63 | to: { height: 'var(--radix-accordion-content-height)' },
64 | },
65 | 'accordion-up': {
66 | from: { height: 'var(--radix-accordion-content-height)' },
67 | to: { height: '0' },
68 | },
69 | },
70 | animation: {
71 | 'accordion-down': 'accordion-down 0.2s ease-out',
72 | 'accordion-up': 'accordion-up 0.2s ease-out',
73 | },
74 | },
75 | },
76 | plugins: [require('tailwindcss-animate')],
77 | } satisfies Config;
78 |
79 | export default config;
80 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "es5",
4 | "lib": ["dom", "dom.iterable", "esnext"],
5 | "allowJs": true,
6 | "skipLibCheck": true,
7 | "strict": true,
8 | "forceConsistentCasingInFileNames": true,
9 | "noEmit": true,
10 | "esModuleInterop": true,
11 | "module": "esnext",
12 | "moduleResolution": "bundler",
13 | "resolveJsonModule": true,
14 | "isolatedModules": true,
15 | "jsx": "preserve",
16 | "incremental": true,
17 | "plugins": [
18 | {
19 | "name": "next"
20 | }
21 | ],
22 | "paths": {
23 | "@/*": ["./*"],
24 | "contentlayer/generated": ["./.contentlayer/generated"]
25 | },
26 | "baseUrl": "."
27 | // ^^^^^^^^^^^
28 | },
29 | "include": [
30 | "next-env.d.ts",
31 | "**/*.ts",
32 | "**/*.tsx",
33 | ".next/types/**/*.ts",
34 | ".contentlayer/generated"
35 | , "next.config.ts" // ^^^^^^^^^^^^^^^^^^^^^^
36 | ],
37 | "exclude": ["node_modules", "supabase"]
38 | }
39 |
--------------------------------------------------------------------------------