├── .env.example
├── .eslintrc.json
├── .gitignore
├── .prettierrc
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── components.json
├── docker-compose.yml
├── index.html
├── next.config.js
├── next.config.mjs
├── package.json
├── pnpm-lock.yaml
├── postcss.config.mjs
├── prisma
├── migrations
│ ├── 20240815195611_init
│ │ └── migration.sql
│ ├── 20240815203248_add_metadata
│ │ └── migration.sql
│ ├── 20240815204542_add_config
│ │ └── migration.sql
│ ├── 20240815210443_add_provider_to_ai_configuration
│ │ └── migration.sql
│ ├── 20240816193734_add_api_key
│ │ └── migration.sql
│ ├── 20240816194453_add_configuration_relation
│ │ └── migration.sql
│ ├── 20240824081904_add_cost_model
│ │ └── migration.sql
│ ├── 20241012125518_convert_fields_to_json
│ │ └── migration.sql
│ └── migration_lock.toml
├── schema.prisma
└── seed.ts
├── public
├── ant-cache-create.png
├── ant-cache-read.png
├── anthropicCashedXConfig.png
├── cl-dashboard.png
├── cl-settings.png
├── cl-stats.jpeg
└── cl-stats.png
├── scripts
└── update-log-costs.ts
├── src
├── app
│ ├── [...openai]
│ │ └── route.ts
│ ├── actions.ts
│ ├── api
│ │ ├── configurations
│ │ │ └── route.ts
│ │ ├── logs
│ │ │ ├── [id]
│ │ │ │ └── route.ts
│ │ │ └── route.ts
│ │ └── stats
│ │ │ └── route.ts
│ ├── configurations
│ │ └── page.tsx
│ ├── favicon.ico
│ ├── globals.css
│ ├── layout.tsx
│ ├── logs
│ │ └── page.tsx
│ ├── opengraph-image.png
│ ├── page.tsx
│ ├── stats
│ │ └── page.tsx
│ └── twitter-image.png
├── components
│ ├── ConfigurationModal.tsx
│ ├── LogDetails.test.tsx
│ ├── LogDetails.tsx
│ ├── LogsList.tsx
│ ├── NavBar.tsx
│ ├── theme-provider.tsx
│ ├── theme-toggle.tsx
│ └── ui
│ │ ├── alert.tsx
│ │ ├── badge.tsx
│ │ ├── button.tsx
│ │ ├── calendar.tsx
│ │ ├── card.tsx
│ │ ├── chart.tsx
│ │ ├── checkbox.tsx
│ │ ├── dialog.tsx
│ │ ├── input.tsx
│ │ ├── label.tsx
│ │ ├── popover.tsx
│ │ ├── progress.tsx
│ │ ├── scroll-area.tsx
│ │ ├── select.tsx
│ │ ├── skeleton.tsx
│ │ ├── sonner.tsx
│ │ ├── switch.tsx
│ │ └── table.tsx
├── env.ts
├── lib
│ ├── cost-calculator.ts
│ ├── db.ts
│ ├── model-config.ts
│ ├── prisma.ts
│ └── utils.ts
└── types
│ └── logs.ts
├── start.sh
├── tailwind.config.ts
├── tsconfig.json
├── vitest.config.mts
└── vitest.setup.ts
/.env.example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=your_openai_api_key_here
2 | ANTHROPIC_API_KEY=your_anthropic_api_key_here
3 | COHERE_API_KEY=your_cohere_api_key_here
4 | MISTRAL_API_KEY=your_mistral_api_key_here
5 | GROQ_API_KEY=your_groq_api_key_here
6 |
7 | DATABASE_URL=
8 |
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "next/core-web-vitals"
3 | }
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 | .yarn/install-state.gz
8 |
9 | # testing
10 | /coverage
11 |
12 | # next.js
13 | /.next/
14 | /out/
15 |
16 | # production
17 | /build
18 |
19 | # misc
20 | .DS_Store
21 | *.pem
22 |
23 | # debug
24 | npm-debug.log*
25 | yarn-debug.log*
26 | yarn-error.log*
27 |
28 | # local env files
29 | .env*.local
30 |
31 | # vercel
32 | .vercel
33 |
34 | # typescript
35 | *.tsbuildinfo
36 | next-env.d.ts
37 |
38 | .env
39 | certificates
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "plugins": ["prettier-plugin-tailwindcss"]
3 | }
4 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Cursor Lens
2 |
3 | We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing new features
9 | - Becoming a maintainer
10 |
11 | ## We Develop with Github
12 |
13 | We use github to host code, to track issues and feature requests, as well as accept pull requests.
14 |
15 | ## We Use [Github Flow](https://guides.github.com/introduction/flow/index.html), So All Code Changes Happen Through Pull Requests
16 |
17 | Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests:
18 |
19 | 1. Fork the repo and create your branch from `main`.
20 | 2. If you've added code that should be tested, add tests.
21 | 3. If you've changed APIs, update the documentation.
22 | 4. Ensure the test suite passes.
23 | 5. Make sure your code lints.
24 | 6. Issue that pull request!
25 |
26 | ## Any contributions you make will be under the Apache License 2.0
27 |
28 | In short, when you submit code changes, your submissions are understood to be under the same [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0) that covers the project. Feel free to contact the maintainers if that's a concern.
29 |
30 | ## Report bugs using Github's [issues](https://github.com/HamedMP/CursorLens/issues)
31 |
32 | We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/HamedMP/CursorLens/issues/new); it's that easy!
33 |
34 | ## Write bug reports with detail, background, and sample code
35 |
36 | **Great Bug Reports** tend to have:
37 |
38 | - A quick summary and/or background
39 | - Steps to reproduce
40 | - Be specific!
41 | - Give sample code if you can.
42 | - What you expected would happen
43 | - What actually happens
44 | - Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
45 |
46 | ## Use a Consistent Coding Style
47 |
48 | - 2 spaces for indentation rather than tabs
49 | - You can try running `pnpm lint` for style unification
50 |
51 | ## License
52 |
53 | By contributing, you agree that your contributions will be licensed under its Apache License 2.0.
54 |
55 | ## References
56 |
57 | This document was adapted from the open-source contribution guidelines for [Facebook's Draft](https://github.com/facebook/draft-js/blob/a9316a723f9e918afde44dea68b5f9f39b7d9b00/CONTRIBUTING.md)
58 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:18-alpine
2 |
3 | WORKDIR /app
4 |
5 | RUN apk add --no-cache libc6-compat
6 | RUN apk update
7 |
8 | # Install pnpm
9 | RUN npm install -g pnpm
10 |
11 | # Copy package.json and pnpm-lock.yaml
12 | COPY package.json pnpm-lock.yaml ./
13 |
14 | # Install dependencies
15 | RUN pnpm install
16 |
17 | # Copy the rest of the application
18 | COPY . .
19 |
20 | # Generate Prisma Client
21 | RUN pnpm prisma generate
22 |
23 | # Build the application
24 | RUN pnpm run build
25 |
26 | # Expose the port the app runs on
27 | EXPOSE 3000
28 |
29 | # Start the application
30 | CMD ["sh", "./start.sh"]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Cursor Lens ✨
2 |
3 | Cursor Lens is an open-source tool designed to provide insights into AI-assisted coding sessions using Cursor AI. It acts as a proxy between Cursor and various AI providers, logging interactions and providing detailed analytics to help developers optimize their use of AI in their coding workflow.
4 |
5 | We are live on ProductHunt today, please upvote us if you find this useful! 🙏
6 |
7 |
8 |
9 | 
10 |
11 | ## Features
12 |
13 | - **AI Provider Integration**: Supports multiple AI providers including OpenAI, Anthropic, and more.
14 | - **Request Logging**: Captures and logs all requests between Cursor and AI providers.
15 | - **Analytics Dashboard**: Provides visual analytics on AI usage, including token consumption and request patterns.
16 | - **Configurable AI Models**: Allows users to set up and switch between different AI configurations.
17 | - **Real-time Monitoring**: Offers a live view of ongoing AI interactions.
18 | - **Token Usage Tracking**: Monitors and reports on token usage across different models.
19 | - **Cost Estimation**: Provides estimated costs based on token usage and model pricing.
20 |
21 | ## Technology Stack
22 |
23 | - **Frontend/Backend**: Next.js with React
24 | - **Database**: PostgreSQL with Prisma ORM
25 | - **AI Library**: Vercel AI SDK
26 | - **Styling**: Tailwind CSS with shadcn/ui components
27 |
28 | ## Getting Started
29 |
30 | For detailed installation instructions, please refer to our [Installation Guide](https://www.cursorlens.com/docs/getting-started/installation).
31 |
32 | ### Prerequisites
33 |
34 | - Node.js (v14 or later)
35 | - pnpm
36 | - PostgreSQL
37 | - ngrok
38 |
39 | ### Quick Installation Steps
40 |
41 | 1. Clone the repository
42 | 2. Install dependencies with `pnpm install`
43 | 3. Set up environment variables
44 | 4. Set up the database with `pnpm prisma migrate dev`
45 | 5. Build the project with `pnpm build`
46 | 6. Set up ngrok
47 | 7. Configure Cursor to use your ngrok URL as the API endpoint
48 |
49 | For full details on each step, please see the [Installation Guide](https://www.cursorlens.com/docs/getting-started/installation).
50 |
51 | ## Usage
52 |
53 | 1. Configure Cursor to use Cursor Lens as its API endpoint by overriding `OpenAI Base URL`.
54 | 2. Choose a `gpt-` model. Use Cursor as normal for AI-assisted coding.
55 | 3. Visit the Cursor Lens dashboard to view logs, statistics, and insights.
56 |
57 | 
58 |
59 | ## Stats page
60 |
61 | 
62 |
63 | ## Prompt caching with Anthropic (v0.1.2):
64 |
65 | 1. Create a new config on `/configuration` page, choose `antropicCached` with Sonnet 3.5. Name it as you like.
66 | 2. Mark it as default.
67 | 3. Use Cursor with CursorLens as normal. The system and context messages in `CMD+L` and `CMD+i` chats will be cached from now on.
68 |
69 | > Note that TTL for the cache is 5 minutes.
70 |
71 | 
72 | 
73 | 
74 |
75 | # Releases
76 |
77 | ## Nightly - 2024-08-24
78 |
79 | - Add new cost calculation
80 |
81 | To run it, make sure to run:
82 |
83 | - `npx prisma seed db` and then
84 | - `pnpm run update-log-costs` to add cost info in metadata for all previous logs
85 |
86 | ## [0.1.2-alpha] - 2024-08-22
87 |
88 | ### ⚠️ ALPHA RELEASE
89 |
90 | ### Added
91 |
92 | - Add Anthropic Cache support for context messages
93 | - Increase Token limit for Anthropic to 8192 tokens
94 | - Improved statistics page: Now you can select the data points you want to see
95 |
96 | ### Improved and fixed
97 |
98 | - Log details are now collapsible
99 | - Full response is captured in the logs
100 |
101 | ## [0.1.1-alpha] - 2024-08-18
102 |
103 | ### ⚠️ ALPHA RELEASE
104 |
105 | ### Added
106 |
107 | - Added support for Mistral AI, Cohere, Groq, and Ollama
108 |
109 | ## [0.1.0-alpha] - 2024-08-17
110 |
111 | This is the initial alpha release of CursorLens. As an alpha version, it may contain bugs and is not yet feature-complete. Use with caution in non-production environments.
112 |
113 | ### Added
114 |
115 | - Initial project setup with Next.js
116 | - Basic proxy functionality between Cursor and AI providers (OpenAI, Anthropic)
117 | - Simple dashboard for viewing AI interaction logs
118 | - Token usage tracking for OpenAI and Anthropic models
119 | - Basic cost estimation based on token usage
120 | - Support for PostgreSQL database with Prisma ORM
121 | - Environment variable configuration for API keys and database connection
122 | - Basic error handling and logging
123 |
124 | ### Known Issues
125 |
126 | - Limited error handling for edge cases
127 | - Incomplete test coverage
128 | - Basic UI with limited customization options
129 | - Potential performance issues with large volumes of requests
130 | - Cost calculation for cached messages in Anthropic are not correct
131 |
132 | ## Contributing
133 |
134 | We welcome contributions to Cursor Lens! Please see our [Contributing Guide](CONTRIBUTING.md) for more details on how to get started.
135 |
136 | ## License
137 |
138 | Cursor Lens is licensed under the GNU Affero General Public License v3.0 (AGPL-3.0). See the [LICENSE](LICENSE) file for details.
139 |
140 | ## Support
141 |
142 | If you encounter any issues or have questions, please file an issue on the GitHub repository or contact the maintainers directly.
143 |
144 | For more detailed information, please visit our [documentation](https://www.cursorlens.com/docs/project/introduction).
145 |
146 | ---
147 |
148 | Happy coding with Cursor Lens!
149 |
--------------------------------------------------------------------------------
/components.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://ui.shadcn.com/schema.json",
3 | "style": "new-york",
4 | "rsc": true,
5 | "tsx": true,
6 | "tailwind": {
7 | "config": "tailwind.config.ts",
8 | "css": "src/app/globals.css",
9 | "baseColor": "stone",
10 | "cssVariables": true,
11 | "prefix": ""
12 | },
13 | "aliases": {
14 | "components": "@/components",
15 | "utils": "@/lib/utils"
16 | }
17 | }
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14
4 | environment:
5 | - POSTGRES_USER=postgres
6 | - POSTGRES_PASSWORD=postgres
7 | - POSTGRES_DB=postgres
8 | volumes:
9 | - postgres_data:/var/lib/postgresql/data
10 | healthcheck:
11 | test: ["CMD-SHELL", "pg_isready -U postgres"]
12 | interval: 5s
13 | timeout: 5s
14 | retries: 5
15 | ports:
16 | - "5432:5432"
17 |
18 | app:
19 | build:
20 | context: .
21 | dockerfile: Dockerfile
22 | ports:
23 | - "3000:3000"
24 | environment:
25 | - DATABASE_URL=postgresql://postgres:postgres@db:5432/postgres
26 | depends_on:
27 | db:
28 | condition: service_healthy
29 |
30 | ngrok:
31 | image: ngrok/ngrok:latest
32 | environment:
33 | NGROK_AUTHTOKEN: ${NGROK_AUTHTOKEN}
34 | command: http app:3000
35 | ports:
36 | - "4040:4040"
37 | depends_on:
38 | - app
39 |
40 | volumes:
41 | postgres_data:
42 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Cursor Dashboard
7 |
29 |
30 |
31 | Cursor Dashboard
32 |
33 | Cursor Dashboard is a powerful Next.js application for managing and
34 | analyzing AI configurations and logs.
35 |
36 | Features:
37 |
38 | - View total logs and tokens used
39 | - Manage AI configurations
40 | - Browse recent logs
41 |
42 | Getting Started:
43 |
44 | - Clone the repository
45 | - Install dependencies with
npm install
46 | - Run the development server with
npm run dev
47 |
48 | View on GitHub
51 |
52 |
53 |
--------------------------------------------------------------------------------
/next.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {
3 | typescript: {
4 | ignoreBuildErrors: true,
5 | },
6 | };
7 |
8 | module.exports = nextConfig;
9 |
--------------------------------------------------------------------------------
/next.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {};
3 |
4 | export default nextConfig;
5 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "cursor-analytics",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint",
10 | "test": "vitest",
11 | "test:watch": "vitest --watch",
12 | "test:coverage": "vitest run --coverage",
13 | "test:ui": "vitest --ui",
14 | "seed": "tsx prisma/seed.ts",
15 | "update-log-costs": "tsx scripts/update-log-costs.ts"
16 | },
17 | "prisma": {
18 | "seed": "tsx prisma/seed.ts"
19 | },
20 | "dependencies": {
21 | "@ai-sdk/amazon-bedrock": "^0.0.17",
22 | "@ai-sdk/anthropic": "^0.0.46",
23 | "@ai-sdk/cohere": "^0.0.17",
24 | "@ai-sdk/google-vertex": "^0.0.28",
25 | "@ai-sdk/mistral": "^0.0.34",
26 | "@ai-sdk/openai": "^0.0.46",
27 | "@prisma/client": "^5.18.0",
28 | "@radix-ui/react-checkbox": "^1.1.1",
29 | "@radix-ui/react-dialog": "^1.1.1",
30 | "@radix-ui/react-icons": "^1.3.0",
31 | "@radix-ui/react-label": "^2.1.0",
32 | "@radix-ui/react-popover": "^1.1.1",
33 | "@radix-ui/react-progress": "^1.1.0",
34 | "@radix-ui/react-scroll-area": "^1.1.0",
35 | "@radix-ui/react-select": "^2.1.1",
36 | "@radix-ui/react-slot": "^1.1.0",
37 | "@radix-ui/react-switch": "^1.1.0",
38 | "@t3-oss/env-nextjs": "^0.11.0",
39 | "@types/react-syntax-highlighter": "^15.5.13",
40 | "ai": "^3.3.14",
41 | "class-variance-authority": "^0.7.0",
42 | "clsx": "^2.1.1",
43 | "date-fns": "^3.6.0",
44 | "lodash": "^4.17.21",
45 | "lucide-react": "^0.427.0",
46 | "next": "14.2.5",
47 | "next-themes": "^0.3.0",
48 | "ollama-ai-provider": "^0.12.0",
49 | "openai": "^4.55.7",
50 | "react": "^18",
51 | "react-day-picker": "8.10.1",
52 | "react-dom": "^18",
53 | "react-markdown": "^9.0.1",
54 | "react-syntax-highlighter": "^15.5.0",
55 | "recharts": "^2.12.7",
56 | "sonner": "^1.5.0",
57 | "sqlite": "^5.1.1",
58 | "sqlite3": "^5.1.7",
59 | "tailwind-merge": "^2.5.2",
60 | "tailwindcss-animate": "^1.0.7",
61 | "zod": "^3.23.8"
62 | },
63 | "devDependencies": {
64 | "@testing-library/jest-dom": "^6.5.0",
65 | "@testing-library/react": "^16.0.1",
66 | "@testing-library/user-event": "^14.5.2",
67 | "@types/jsdom": "^21.1.7",
68 | "@types/lodash": "^4.17.7",
69 | "@types/node": "^20",
70 | "@types/react": "^18",
71 | "@types/react-dom": "^18",
72 | "@vitejs/plugin-react": "^4.3.2",
73 | "@vitest/ui": "^2.1.2",
74 | "eslint": "^8",
75 | "eslint-config-next": "14.2.5",
76 | "jsdom": "^25.0.1",
77 | "postcss": "^8",
78 | "prettier": "^3.3.3",
79 | "prettier-plugin-tailwindcss": "^0.6.6",
80 | "prisma": "^5.18.0",
81 | "tailwindcss": "^3.4.1",
82 | "tsx": "^4.7.1",
83 | "typescript": "^5",
84 | "vitest": "^2.1.2"
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/postcss.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('postcss-load-config').Config} */
2 | const config = {
3 | plugins: {
4 | tailwindcss: {},
5 | },
6 | };
7 |
8 | export default config;
9 |
--------------------------------------------------------------------------------
/prisma/migrations/20240815195611_init/migration.sql:
--------------------------------------------------------------------------------
1 | -- CreateTable
2 | CREATE TABLE "Log" (
3 | "id" TEXT NOT NULL,
4 | "method" TEXT NOT NULL,
5 | "url" TEXT NOT NULL,
6 | "headers" TEXT NOT NULL,
7 | "body" TEXT NOT NULL,
8 | "response" TEXT NOT NULL,
9 | "timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
10 |
11 | CONSTRAINT "Log_pkey" PRIMARY KEY ("id")
12 | );
13 |
--------------------------------------------------------------------------------
/prisma/migrations/20240815203248_add_metadata/migration.sql:
--------------------------------------------------------------------------------
1 | -- AlterTable
2 | ALTER TABLE "Log" ADD COLUMN "metadata" JSONB;
3 |
--------------------------------------------------------------------------------
/prisma/migrations/20240815204542_add_config/migration.sql:
--------------------------------------------------------------------------------
1 | -- CreateTable
2 | CREATE TABLE "AIConfiguration" (
3 | "id" TEXT NOT NULL,
4 | "name" TEXT NOT NULL,
5 | "model" TEXT NOT NULL,
6 | "temperature" DOUBLE PRECISION,
7 | "maxTokens" INTEGER,
8 | "topP" DOUBLE PRECISION,
9 | "frequencyPenalty" DOUBLE PRECISION,
10 | "presencePenalty" DOUBLE PRECISION,
11 | "isDefault" BOOLEAN NOT NULL DEFAULT false,
12 | "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
13 | "updatedAt" TIMESTAMP(3) NOT NULL,
14 |
15 | CONSTRAINT "AIConfiguration_pkey" PRIMARY KEY ("id")
16 | );
17 |
18 | -- CreateIndex
19 | CREATE UNIQUE INDEX "AIConfiguration_name_key" ON "AIConfiguration"("name");
20 |
--------------------------------------------------------------------------------
/prisma/migrations/20240815210443_add_provider_to_ai_configuration/migration.sql:
--------------------------------------------------------------------------------
1 | -- AlterTable
2 | ALTER TABLE "AIConfiguration" ADD COLUMN "provider" TEXT NOT NULL DEFAULT 'openai';
3 |
--------------------------------------------------------------------------------
/prisma/migrations/20240816193734_add_api_key/migration.sql:
--------------------------------------------------------------------------------
1 | -- AlterTable
2 | ALTER TABLE "AIConfiguration" ADD COLUMN "apiKey" TEXT;
3 |
--------------------------------------------------------------------------------
/prisma/migrations/20240816194453_add_configuration_relation/migration.sql:
--------------------------------------------------------------------------------
1 | -- AlterTable
2 | ALTER TABLE "Log" ADD COLUMN "configurationId" TEXT;
3 |
4 | -- AddForeignKey
5 | ALTER TABLE "Log" ADD CONSTRAINT "Log_configurationId_fkey" FOREIGN KEY ("configurationId") REFERENCES "AIConfiguration"("id") ON DELETE SET NULL ON UPDATE CASCADE;
6 |
--------------------------------------------------------------------------------
/prisma/migrations/20240824081904_add_cost_model/migration.sql:
--------------------------------------------------------------------------------
1 | /*
2 | Warnings:
3 |
4 | - You are about to drop the column `configurationId` on the `Log` table. All the data in the column will be lost.
5 | - Made the column `metadata` on table `Log` required. This step will fail if there are existing NULL values in that column.
6 |
7 | */
8 | -- DropForeignKey
9 | ALTER TABLE "Log" DROP CONSTRAINT "Log_configurationId_fkey";
10 |
11 | -- AlterTable
12 | ALTER TABLE "Log" DROP COLUMN "configurationId",
13 | ALTER COLUMN "metadata" SET NOT NULL;
14 |
15 | -- CreateTable
16 | CREATE TABLE "ModelCost" (
17 | "id" TEXT NOT NULL,
18 | "provider" TEXT NOT NULL,
19 | "model" TEXT NOT NULL,
20 | "inputTokenCost" DOUBLE PRECISION NOT NULL,
21 | "outputTokenCost" DOUBLE PRECISION NOT NULL,
22 | "validFrom" TIMESTAMP(3) NOT NULL,
23 | "validTo" TIMESTAMP(3),
24 |
25 | CONSTRAINT "ModelCost_pkey" PRIMARY KEY ("id")
26 | );
27 |
28 | -- CreateIndex
29 | CREATE UNIQUE INDEX "ModelCost_provider_model_validFrom_key" ON "ModelCost"("provider", "model", "validFrom");
30 |
31 | -- CreateIndex
32 | CREATE INDEX "Log_timestamp_idx" ON "Log"("timestamp");
33 |
--------------------------------------------------------------------------------
/prisma/migrations/20241012125518_convert_fields_to_json/migration.sql:
--------------------------------------------------------------------------------
1 | -- AlterTable
2 | ALTER TABLE "Log"
3 | ALTER COLUMN "headers" TYPE JSONB USING headers::jsonb,
4 | ALTER COLUMN "body" TYPE JSONB USING body::jsonb,
5 | ALTER COLUMN "response" TYPE JSONB USING response::jsonb;
6 |
--------------------------------------------------------------------------------
/prisma/migrations/migration_lock.toml:
--------------------------------------------------------------------------------
1 | # Please do not edit this file manually
2 | # It should be added in your version-control system (i.e. Git)
3 | provider = "postgresql"
--------------------------------------------------------------------------------
/prisma/schema.prisma:
--------------------------------------------------------------------------------
1 | // This is your Prisma schema file,
2 | // learn more about it in the docs: https://pris.ly/d/prisma-schema
3 |
4 | generator client {
5 | provider = "prisma-client-js"
6 | binaryTargets = ["native", "linux-musl-openssl-3.0.x"]
7 | }
8 |
9 | datasource db {
10 | provider = "postgresql"
11 | url = env("DATABASE_URL")
12 | }
13 |
14 | model Log {
15 | id String @id @default(cuid())
16 | method String
17 | url String
18 | headers Json
19 | body Json
20 | response Json?
21 | timestamp DateTime @default(now())
22 | metadata Json
23 |
24 | @@index([timestamp])
25 | }
26 |
27 | model AIConfiguration {
28 | id String @id @default(cuid())
29 | name String @unique
30 | provider String @default("openai")
31 | model String
32 | temperature Float?
33 | maxTokens Int?
34 | topP Float?
35 | frequencyPenalty Float?
36 | presencePenalty Float?
37 | isDefault Boolean @default(false)
38 | apiKey String?
39 | createdAt DateTime @default(now())
40 | updatedAt DateTime @updatedAt
41 | }
42 |
43 | model ModelCost {
44 | id String @id @default(cuid())
45 | provider String
46 | model String
47 | inputTokenCost Float
48 | outputTokenCost Float
49 | validFrom DateTime
50 | validTo DateTime?
51 |
52 | @@unique([provider, model, validFrom])
53 | }
--------------------------------------------------------------------------------
/prisma/seed.ts:
--------------------------------------------------------------------------------
1 | import { PrismaClient } from "@prisma/client";
2 | import { getModelConfigurations } from "../src/lib/model-config";
3 |
4 | const prisma = new PrismaClient();
5 |
6 | function convertToCostPerMillionTokens(cost: number): number {
7 | return cost * 1_000_000;
8 | }
9 |
10 | async function main() {
11 | const modelConfigurations = getModelConfigurations();
12 |
13 | for (const [provider, models] of Object.entries(modelConfigurations)) {
14 | for (const [model, config] of Object.entries(models)) {
15 | if (config && "inputTokenCost" in config && "outputTokenCost" in config) {
16 | await prisma.modelCost.upsert({
17 | where: {
18 | provider_model_validFrom: {
19 | provider,
20 | model,
21 | validFrom: new Date(),
22 | },
23 | },
24 | update: {
25 | inputTokenCost: convertToCostPerMillionTokens(
26 | config.inputTokenCost,
27 | ),
28 | outputTokenCost: convertToCostPerMillionTokens(
29 | config.outputTokenCost,
30 | ),
31 | },
32 | create: {
33 | provider,
34 | model,
35 | inputTokenCost: convertToCostPerMillionTokens(
36 | config.inputTokenCost,
37 | ),
38 | outputTokenCost: convertToCostPerMillionTokens(
39 | config.outputTokenCost,
40 | ),
41 | validFrom: new Date(),
42 | },
43 | });
44 | }
45 | }
46 | }
47 |
48 | console.log("Seed data inserted successfully");
49 | }
50 |
51 | main()
52 | .catch((e) => {
53 | console.error(e);
54 | process.exit(1);
55 | })
56 | .finally(async () => {
57 | await prisma.$disconnect();
58 | });
59 |
--------------------------------------------------------------------------------
/public/ant-cache-create.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/ant-cache-create.png
--------------------------------------------------------------------------------
/public/ant-cache-read.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/ant-cache-read.png
--------------------------------------------------------------------------------
/public/anthropicCashedXConfig.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/anthropicCashedXConfig.png
--------------------------------------------------------------------------------
/public/cl-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/cl-dashboard.png
--------------------------------------------------------------------------------
/public/cl-settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/cl-settings.png
--------------------------------------------------------------------------------
/public/cl-stats.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/cl-stats.jpeg
--------------------------------------------------------------------------------
/public/cl-stats.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/public/cl-stats.png
--------------------------------------------------------------------------------
/scripts/update-log-costs.ts:
--------------------------------------------------------------------------------
1 | import { PrismaClient } from "@prisma/client";
2 | import { getModelCost } from "../src/lib/cost-calculator";
3 |
4 | const prisma = new PrismaClient();
5 |
6 | async function updateLogCosts() {
7 | const logs = await prisma.log.findMany();
8 |
9 | console.log(`Found ${logs.length} logs to update`);
10 |
11 | for (const log of logs) {
12 | try {
13 | const metadata = log.metadata as any;
14 | const { provider, model } = metadata;
15 |
16 | if (!provider || !model) {
17 | console.warn(`Skipping log ${log.id}: Missing provider or model`);
18 | continue;
19 | }
20 |
21 | const modelCost = await getModelCost(provider, model);
22 |
23 | let updatedMetadata = { ...metadata };
24 | let response =
25 | typeof log.response === "string"
26 | ? JSON.parse(log.response)
27 | : log.response;
28 |
29 | // Extract token usage from response
30 | const usage = response?.usage || {};
31 | const inputTokens = usage.promptTokens || metadata.inputTokens || 0;
32 | const outputTokens = usage.completionTokens || metadata.outputTokens || 0;
33 | const totalTokens = usage.totalTokens || inputTokens + outputTokens;
34 |
35 | // Calculate costs
36 | const inputCost = (inputTokens / 1000000) * modelCost.inputTokenCost;
37 | const outputCost = (outputTokens / 1000000) * modelCost.outputTokenCost;
38 | const totalCost = inputCost + outputCost;
39 |
40 | updatedMetadata = {
41 | ...updatedMetadata,
42 | inputTokens,
43 | outputTokens,
44 | totalTokens,
45 | inputCost,
46 | outputCost,
47 | totalCost,
48 | };
49 |
50 | await prisma.log.update({
51 | where: { id: log.id },
52 | data: { metadata: updatedMetadata },
53 | });
54 |
55 | console.log(
56 | `Updated log ${log.id}: inputTokens=${inputTokens}, outputTokens=${outputTokens}, totalCost=${totalCost}`,
57 | );
58 | } catch (error) {
59 | console.error(`Error updating log ${log.id}:`, error);
60 | }
61 | }
62 |
63 | console.log("Finished updating logs");
64 | }
65 |
66 | updateLogCosts()
67 | .catch((error) => {
68 | console.error("Error in updateLogCosts:", error);
69 | })
70 | .finally(async () => {
71 | await prisma.$disconnect();
72 | });
73 |
--------------------------------------------------------------------------------
/src/app/[...openai]/route.ts:
--------------------------------------------------------------------------------
1 | import { anthropic, createAnthropic } from "@ai-sdk/anthropic";
2 | import { createCohere } from "@ai-sdk/cohere";
3 | import { createMistral } from "@ai-sdk/mistral";
4 | import { createOpenAI, openai } from "@ai-sdk/openai";
5 | import { ollama } from "ollama-ai-provider";
6 |
7 | import { env } from "@/env";
8 | import { calculateCost, getModelCost } from "@/lib/cost-calculator";
9 | import { getDefaultConfiguration, insertLog } from "@/lib/db";
10 | import { generateText, streamText } from "ai";
11 | import { type NextRequest, NextResponse } from "next/server";
12 | import OpenAI from "openai";
13 |
14 | const openaiClient = new OpenAI({
15 | apiKey: env.OPENAI_API_KEY,
16 | });
17 |
18 | // Allow streaming responses up to 30 seconds
19 | export const maxDuration = 30;
20 |
21 | async function getAIModelClient(provider: string, model: string) {
22 | switch (provider.toLowerCase()) {
23 | case "openai":
24 | return openai(model);
25 | case "anthropic": {
26 | const anthropicClient = createAnthropic({
27 | apiKey: env.ANTHROPIC_API_KEY,
28 | });
29 | return anthropicClient(model);
30 | }
31 | case "anthropiccached": {
32 | const anthropicClient = createAnthropic({
33 | apiKey: env.ANTHROPIC_API_KEY,
34 | });
35 | return anthropicClient(model, { cacheControl: true });
36 | }
37 | case "cohere": {
38 | const cohereClient = createCohere({
39 | apiKey: env.COHERE_API_KEY,
40 | });
41 | return cohereClient(model);
42 | }
43 | case "mistral": {
44 | const mistralClient = createMistral({
45 | apiKey: env.MISTRAL_API_KEY,
46 | });
47 | return mistralClient(model);
48 | }
49 | case "groq": {
50 | const groqClient = createOpenAI({
51 | apiKey: env.GROQ_API_KEY,
52 | });
53 | return groqClient(model);
54 | }
55 | case "ollama":
56 | return ollama("llama3.1");
57 | case "google-vertex":
58 | throw new Error("Google Vertex AI is not currently supported");
59 | default:
60 | throw new Error(`Unsupported provider: ${provider}`);
61 | }
62 | }
63 |
64 | export async function POST(
65 | request: NextRequest,
66 | { params }: { params: { openai: string[] } },
67 | ) {
68 | const endpoint = params.openai.join("/");
69 | if (endpoint !== "chat/completions" && endpoint !== "v1/chat/completions") {
70 | return NextResponse.json({ error: "Not found", endpoint }, { status: 404 });
71 | }
72 |
73 | const body = await request.json();
74 | const { messages, model: cursorModel, stream = false, ...otherParams } = body;
75 |
76 | try {
77 | const defaultConfig = await getDefaultConfiguration();
78 | if (!defaultConfig) {
79 | throw new Error("No default configuration found");
80 | }
81 |
82 | const {
83 | id: configId,
84 | provider,
85 | model,
86 | temperature,
87 | maxTokens,
88 | topP,
89 | frequencyPenalty,
90 | presencePenalty,
91 | } = defaultConfig;
92 |
93 | if (!provider) {
94 | throw new Error("Provider is not defined in the default configuration");
95 | }
96 |
97 | const aiModel = await getAIModelClient(provider, model);
98 |
99 | let modifiedMessages = messages;
100 |
101 | if (provider.toLowerCase() === "anthropiccached") {
102 | const hasPotentialContext = messages.some(
103 | (message: any) => message.name === "potential_context",
104 | );
105 |
106 | modifiedMessages = messages.map((message: any) => {
107 | if (message.name === "potential_context") {
108 | return {
109 | ...message,
110 | experimental_providerMetadata: {
111 | anthropic: { cacheControl: { type: "ephemeral" } },
112 | },
113 | };
114 | }
115 | return message;
116 | });
117 |
118 | if (!hasPotentialContext && modifiedMessages.length >= 2) {
119 | modifiedMessages[1] = {
120 | ...modifiedMessages[1],
121 | experimental_providerMetadata: {
122 | anthropic: { cacheControl: { type: "ephemeral" } },
123 | },
124 | };
125 | }
126 | }
127 |
128 | const streamTextOptions = {
129 | model: aiModel,
130 | messages: modifiedMessages,
131 | maxTokens: ["anthropic", "anthropiccached"].includes(
132 | provider.toLowerCase(),
133 | )
134 | ? 8192
135 | : undefined,
136 | // Add other parameters from defaultConfig if needed
137 | };
138 |
139 | const logEntry = {
140 | method: "POST",
141 | url: `/api/${endpoint}`,
142 | headers: Object.fromEntries(request.headers),
143 | body: {
144 | ...body,
145 | ...streamTextOptions,
146 | model: model,
147 | },
148 | response: {},
149 | timestamp: new Date(),
150 | metadata: {
151 | configId,
152 | provider,
153 | model,
154 | temperature,
155 | maxTokens,
156 | topP,
157 | frequencyPenalty,
158 | presencePenalty,
159 | inputTokens: 0,
160 | outputTokens: 0,
161 | totalTokens: 0,
162 | inputCost: 0,
163 | outputCost: 0,
164 | totalCost: 0,
165 | },
166 | };
167 |
168 | if (stream) {
169 | const result = await streamText({
170 | ...streamTextOptions,
171 | async onFinish({
172 | text,
173 | toolCalls,
174 | toolResults,
175 | usage,
176 | finishReason,
177 | ...otherProps
178 | }) {
179 | const inputTokens = usage?.promptTokens ?? 0;
180 | const outputTokens = usage?.completionTokens ?? 0;
181 | const totalTokens = usage?.totalTokens ?? 0;
182 |
183 | const modelCost = await getModelCost(provider, model);
184 | const inputCost = (inputTokens / 1000000) * modelCost.inputTokenCost;
185 | const outputCost =
186 | (outputTokens / 1000000) * modelCost.outputTokenCost;
187 | const totalCost = inputCost + outputCost;
188 |
189 | logEntry.response = {
190 | text,
191 | toolCalls,
192 | toolResults,
193 | usage,
194 | finishReason,
195 | ...otherProps,
196 | };
197 | logEntry.metadata = {
198 | ...logEntry.metadata,
199 | inputTokens,
200 | outputTokens,
201 | totalTokens,
202 | inputCost,
203 | outputCost,
204 | totalCost,
205 | };
206 | await insertLog(logEntry);
207 | },
208 | });
209 |
210 | // Convert the result to a ReadableStream in OpenAI's format
211 | const stream = new ReadableStream({
212 | async start(controller) {
213 | for await (const chunk of result.textStream) {
214 | const data = JSON.stringify({
215 | id: `chatcmpl-${Math.random().toString(36).substr(2, 9)}`,
216 | object: "chat.completion.chunk",
217 | created: Math.floor(Date.now() / 1000),
218 | model: model,
219 | choices: [
220 | {
221 | delta: { content: chunk },
222 | index: 0,
223 | finish_reason: null,
224 | },
225 | ],
226 | });
227 | controller.enqueue(new TextEncoder().encode(`data: ${data}\n\n`));
228 | }
229 | controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"));
230 | controller.close();
231 | },
232 | });
233 |
234 | // Return a streaming response
235 | return new Response(stream, {
236 | headers: {
237 | "Content-Type": "text/event-stream",
238 | "Cache-Control": "no-cache",
239 | Connection: "keep-alive",
240 | },
241 | });
242 | }
243 | // For non-streaming requests, use the AI SDK
244 | const result = await generateText({
245 | model: aiModel,
246 | messages,
247 | });
248 |
249 | const inputTokens = result.usage?.promptTokens ?? 0;
250 | const outputTokens = result.usage?.completionTokens ?? 0;
251 | const totalTokens = result.usage?.totalTokens ?? 0;
252 |
253 | const modelCost = await getModelCost(provider, model);
254 | const inputCost = inputTokens * modelCost.inputTokenCost;
255 | const outputCost = outputTokens * modelCost.outputTokenCost;
256 | const totalCost = inputCost + outputCost;
257 |
258 | logEntry.response = result;
259 | logEntry.metadata = {
260 | ...logEntry.metadata,
261 | inputTokens,
262 | outputTokens,
263 | totalTokens,
264 | inputCost,
265 | outputCost,
266 | totalCost,
267 | };
268 | await insertLog(logEntry);
269 |
270 | return NextResponse.json(result);
271 | } catch (error) {
272 | console.error("Error in chat completion:", error);
273 | const errorMessage = error instanceof Error ? error.message : String(error);
274 | const errorLogEntry = {
275 | method: "POST",
276 | url: `/api/${endpoint}`,
277 | headers: Object.fromEntries(request.headers),
278 | body: body,
279 | response: { error: errorMessage },
280 | timestamp: new Date(),
281 | metadata: {
282 | error: errorMessage,
283 | stack: (error as Error).stack,
284 | },
285 | };
286 | await insertLog(errorLogEntry);
287 | return NextResponse.json({ error: errorMessage }, { status: 500 });
288 | }
289 | }
290 |
291 | export async function GET(
292 | request: NextRequest,
293 | { params }: { params: { openai: string[] } },
294 | ) {
295 | const endpoint = params.openai.join("/");
296 |
297 | // Existing 'models' endpoint
298 | if (endpoint === "models") {
299 | const logEntry = {
300 | method: "GET",
301 | url: "/api/v1/models",
302 | headers: Object.fromEntries(request.headers),
303 | body: {},
304 | response: {},
305 | timestamp: new Date(),
306 | };
307 |
308 | try {
309 | const models = await openaiClient.models.list();
310 | logEntry.response = models;
311 | await insertLog(logEntry);
312 | return NextResponse.json(models);
313 | } catch (error) {
314 | console.error("Error fetching models:", error);
315 | logEntry.response = { error: String(error) };
316 | await insertLog(logEntry);
317 | return NextResponse.json({ error: String(error) }, { status: 500 });
318 | }
319 | }
320 |
321 | // New test routes
322 | else if (endpoint === "test/openai") {
323 | return testOpenAI();
324 | } else if (endpoint === "test/anthropic") {
325 | return testAnthropic();
326 | } else if (endpoint === "test/anthropiccached") {
327 | return testAnthropicCached();
328 | } else if (endpoint === "test/cohere") {
329 | return testCohere();
330 | } else if (endpoint === "test/mistral") {
331 | return testMistral();
332 | } else if (endpoint === "test/groq") {
333 | return testGroq();
334 | }
335 |
336 | return NextResponse.json({ error: "Not found" }, { status: 404 });
337 | }
338 |
339 | async function testOpenAI() {
340 | try {
341 | const model = openai("gpt-3.5-turbo");
342 | const result = await generateText({
343 | model,
344 | messages: [{ role: "user", content: 'Say "Hello from OpenAI!"' }],
345 | });
346 | return NextResponse.json({ provider: "OpenAI", result });
347 | } catch (error) {
348 | console.error("Error testing OpenAI:", error);
349 | return NextResponse.json({ error: String(error) }, { status: 500 });
350 | }
351 | }
352 |
353 | async function testAnthropicCached() {
354 | try {
355 | const model = anthropic("claude-3-5-sonnet-20240620", {
356 | cacheControl: true,
357 | });
358 |
359 | const result = await generateText({
360 | model,
361 | messages: [
362 | { role: "user", content: 'Say "Hello from Anthropic and Vercel"' },
363 | ],
364 | });
365 | return NextResponse.json({ provider: "Anthropic Cached", result });
366 | } catch (error) {
367 | console.error("Error testing Anthropic:", error);
368 | return NextResponse.json({ error: String(error) }, { status: 500 });
369 | }
370 | }
371 |
372 | async function testAnthropic() {
373 | try {
374 | const anthropicClient = createAnthropic({
375 | apiKey: env.ANTHROPIC_API_KEY,
376 | });
377 | const model = anthropicClient("claude-3-haiku-20240307");
378 | const result = await generateText({
379 | model,
380 | messages: [{ role: "user", content: 'Say "Hello from Anthropic!"' }],
381 | });
382 | return NextResponse.json({ provider: "Anthropic", result });
383 | } catch (error) {
384 | console.error("Error testing Anthropic:", error);
385 | return NextResponse.json({ error: String(error) }, { status: 500 });
386 | }
387 | }
388 |
389 | async function testCohere() {
390 | try {
391 | const cohereClient = createCohere({
392 | apiKey: env.COHERE_API_KEY,
393 | });
394 | const model = cohereClient("command");
395 | const result = await generateText({
396 | model,
397 | messages: [{ role: "user", content: 'Say "Hello from Cohere!"' }],
398 | });
399 | return NextResponse.json({ provider: "Cohere", result });
400 | } catch (error) {
401 | console.error("Error testing Cohere:", error);
402 | return NextResponse.json({ error: String(error) }, { status: 500 });
403 | }
404 | }
405 |
406 | async function testMistral() {
407 | try {
408 | const mistralClient = createMistral({
409 | apiKey: env.MISTRAL_API_KEY,
410 | });
411 | const model = mistralClient("mistral-small-latest");
412 | const result = await generateText({
413 | model,
414 | messages: [{ role: "user", content: 'Say "Hello from Mistral!"' }],
415 | });
416 | return NextResponse.json({ provider: "Mistral", result });
417 | } catch (error) {
418 | console.error("Error testing Mistral:", error);
419 | return NextResponse.json({ error: String(error) }, { status: 500 });
420 | }
421 | }
422 |
423 | async function testGroq() {
424 | try {
425 | const groqClient = createOpenAI({
426 | apiKey: env.GROQ_API_KEY,
427 | });
428 | const model = groqClient("llama-3.1-70b-versatile");
429 | const result = await generateText({
430 | model,
431 | messages: [{ role: "user", content: 'Say "Hello from Groq!"' }],
432 | });
433 | return NextResponse.json({ provider: "Groq", result });
434 | } catch (error) {
435 | console.error("Error testing Groq:", error);
436 | return NextResponse.json({ error: String(error) }, { status: 500 });
437 | }
438 | }
439 |
--------------------------------------------------------------------------------
/src/app/actions.ts:
--------------------------------------------------------------------------------
1 | "use server";
2 |
3 | import { getModelConfigurations } from "@/lib/model-config";
4 | import prisma from "@/lib/prisma";
5 | import type { Log, AIConfiguration, Prisma } from "@prisma/client";
6 |
7 | // Helper function to serialize dates
8 | function serializeDates(obj: T): T {
9 | return JSON.parse(JSON.stringify(obj));
10 | }
11 |
12 | // Add the metadata type definition
13 | type LogMetadata = {
14 | topP: number;
15 | model: string;
16 | configId: string;
17 | provider: string;
18 | maxTokens: number;
19 | temperature: number;
20 | presencePenalty: number;
21 | frequencyPenalty: number;
22 | totalTokens: number;
23 | inputTokens: number;
24 | outputTokens: number;
25 | totalCost: number;
26 | };
27 |
28 | export async function getLogs({
29 | provider = "all",
30 | startDate = "",
31 | endDate = "",
32 | }: { provider?: string; startDate?: string; endDate?: string } = {}) {
33 | try {
34 | const query: Prisma.LogFindManyArgs = {
35 | orderBy: {
36 | timestamp: "desc",
37 | },
38 | };
39 |
40 | const whereConditions: Prisma.LogWhereInput = {};
41 |
42 | if (provider !== "all") {
43 | whereConditions.metadata = {
44 | path: ["provider"],
45 | equals: provider,
46 | };
47 | }
48 |
49 | if (startDate || endDate) {
50 | whereConditions.timestamp = {};
51 | if (startDate) {
52 | whereConditions.timestamp.gte = new Date(startDate);
53 | }
54 | if (endDate) {
55 | whereConditions.timestamp.lte = new Date(endDate);
56 | }
57 | }
58 |
59 | // Only add the where clause if we have conditions
60 | if (Object.keys(whereConditions).length > 0) {
61 | query.where = whereConditions;
62 | }
63 |
64 | const logs = await prisma.log.findMany(query);
65 |
66 | // Cast the metadata to the correct type
67 | return logs.map((log) => ({
68 | ...log,
69 | metadata: log.metadata as LogMetadata,
70 | }));
71 | } catch (error) {
72 | console.error("Error fetching logs:", error);
73 | throw error;
74 | }
75 | }
76 |
77 | export async function getStats(timeFilter = "all"): Promise<{
78 | totalLogs: number;
79 | totalTokens: number;
80 | totalPromptTokens: number;
81 | totalCompletionTokens: number;
82 | perModelProviderStats: {
83 | [key: string]: {
84 | logs: number;
85 | tokens: number;
86 | promptTokens: number;
87 | completionTokens: number;
88 | cost: number;
89 | provider: string;
90 | model: string;
91 | };
92 | };
93 | tokenUsageOverTime: {
94 | date: string;
95 | tokens: number;
96 | promptTokens: number;
97 | completionTokens: number;
98 | }[];
99 | }> {
100 | let startDate = new Date(0); // Default to all time
101 |
102 | switch (timeFilter) {
103 | case "day":
104 | startDate = new Date(Date.now() - 24 * 60 * 60 * 1000);
105 | break;
106 | case "week":
107 | startDate = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000);
108 | break;
109 | case "month":
110 | startDate = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000);
111 | break;
112 | }
113 |
114 | const logs = await prisma.log.findMany({
115 | where: {
116 | timestamp: {
117 | gte: startDate,
118 | },
119 | },
120 | orderBy: {
121 | timestamp: "asc",
122 | },
123 | });
124 |
125 | const perModelProviderStats: {
126 | [key: string]: {
127 | logs: number;
128 | tokens: number;
129 | promptTokens: number;
130 | completionTokens: number;
131 | cost: number;
132 | provider: string;
133 | model: string;
134 | };
135 | } = {};
136 |
137 | let totalTokens = 0;
138 | let totalPromptTokens = 0;
139 | let totalCompletionTokens = 0;
140 | const tokenUsageOverTime: {
141 | date: string;
142 | tokens: number;
143 | promptTokens: number;
144 | completionTokens: number;
145 | }[] = [];
146 |
147 | for (const log of logs) {
148 | const metadata = log.metadata as LogMetadata;
149 | const model = metadata.model || "unknown";
150 | const provider = metadata.provider || "unknown";
151 | const key = `${provider}:${model}`;
152 |
153 | if (!perModelProviderStats[key]) {
154 | perModelProviderStats[key] = {
155 | logs: 0,
156 | tokens: 0,
157 | promptTokens: 0,
158 | completionTokens: 0,
159 | cost: 0,
160 | provider,
161 | model,
162 | };
163 | }
164 | perModelProviderStats[key].logs += 1;
165 |
166 | const tokens = metadata.totalTokens || 0;
167 | const promptTokens = metadata.inputTokens || 0;
168 | const completionTokens = metadata.outputTokens || 0;
169 | const cost = metadata.totalCost || 0;
170 |
171 | perModelProviderStats[key].tokens += tokens;
172 | perModelProviderStats[key].promptTokens += promptTokens;
173 | perModelProviderStats[key].completionTokens += completionTokens;
174 | perModelProviderStats[key].cost += cost;
175 |
176 | totalTokens += tokens;
177 | totalPromptTokens += promptTokens;
178 | totalCompletionTokens += completionTokens;
179 |
180 | const date = log.timestamp.toISOString().split("T")[0];
181 | const existingEntry = tokenUsageOverTime.find(
182 | (entry) => entry.date === date,
183 | );
184 | if (existingEntry) {
185 | existingEntry.tokens += tokens;
186 | existingEntry.promptTokens += promptTokens;
187 | existingEntry.completionTokens += completionTokens;
188 | } else {
189 | tokenUsageOverTime.push({
190 | date,
191 | tokens,
192 | promptTokens,
193 | completionTokens,
194 | });
195 | }
196 | }
197 |
198 | return {
199 | totalLogs: logs.length,
200 | totalTokens,
201 | totalPromptTokens,
202 | totalCompletionTokens,
203 | perModelProviderStats,
204 | tokenUsageOverTime,
205 | };
206 | }
207 |
208 | export async function getConfigurations(): Promise {
209 | const configs = await prisma.aIConfiguration.findMany();
210 | return serializeDates(configs);
211 | }
212 |
213 | export async function updateDefaultConfiguration(
214 | configId: string,
215 | ): Promise {
216 | await prisma.aIConfiguration.updateMany({
217 | where: { isDefault: true },
218 | data: { isDefault: false },
219 | });
220 | await prisma.aIConfiguration.update({
221 | where: { id: configId },
222 | data: { isDefault: true },
223 | });
224 | }
225 |
226 | export async function createConfiguration(config: Partial) {
227 | const {
228 | name,
229 | provider,
230 | model,
231 | temperature,
232 | maxTokens,
233 | topP,
234 | frequencyPenalty,
235 | presencePenalty,
236 | isDefault,
237 | apiKey,
238 | } = config;
239 |
240 | // TODO: Consider using Zod schemas for validation and potentially integrate
241 | // https://github.com/vantezzen/auto-form for form generation and validation
242 |
243 | // Guard clause to ensure required fields are present
244 | if (!name || !provider || !model) {
245 | throw new Error("Name, provider, and model are required fields");
246 | }
247 |
248 | const newConfig = await prisma.aIConfiguration.create({
249 | data: {
250 | name,
251 | provider,
252 | model,
253 | temperature: temperature,
254 | maxTokens: maxTokens,
255 | topP: topP,
256 | frequencyPenalty: frequencyPenalty,
257 | presencePenalty: presencePenalty,
258 | isDefault: isDefault,
259 | apiKey: apiKey,
260 | },
261 | });
262 |
263 | return serializeDates(newConfig);
264 | }
265 |
266 | export async function updateConfiguration(
267 | id: string,
268 | data: Partial,
269 | ): Promise {
270 | const updatedConfig = await prisma.aIConfiguration.update({
271 | where: { id },
272 | data,
273 | });
274 | return serializeDates(updatedConfig);
275 | }
276 |
277 | export async function deleteConfiguration(id: string) {
278 | try {
279 | const deletedConfig = await prisma.aIConfiguration.delete({
280 | where: { id },
281 | });
282 | return serializeDates(deletedConfig);
283 | } catch (error) {
284 | console.error("Error deleting configuration:", error);
285 | throw new Error("Failed to delete configuration");
286 | }
287 | }
288 |
289 | type ConfigurationCost = {
290 | provider: string;
291 | model: string;
292 | inputTokenCost: number;
293 | outputTokenCost: number;
294 | };
295 |
296 | export async function getConfigurationCosts(): Promise {
297 | const modelConfigurations = getModelConfigurations();
298 | return Object.entries(modelConfigurations).flatMap(([provider, models]) =>
299 | Object.entries(models)
300 | .filter(
301 | (entry): entry is [string, NonNullable<(typeof entry)[1]>] =>
302 | entry[1] !== null &&
303 | "inputTokenCost" in entry[1] &&
304 | "outputTokenCost" in entry[1],
305 | )
306 | .map(([model, config]) => ({
307 | provider,
308 | model,
309 | inputTokenCost: config.inputTokenCost,
310 | outputTokenCost: config.outputTokenCost,
311 | })),
312 | );
313 | }
314 |
315 | export { getModelConfigurations };
316 |
317 | export async function setDefaultConfiguration(configId: string): Promise {
318 | try {
319 | await prisma.aIConfiguration.updateMany({
320 | where: { isDefault: true },
321 | data: { isDefault: false },
322 | });
323 | await prisma.aIConfiguration.update({
324 | where: { id: configId },
325 | data: { isDefault: true },
326 | });
327 | } catch (error) {
328 | console.error("Error setting default configuration:", error);
329 | throw error;
330 | }
331 | }
332 |
--------------------------------------------------------------------------------
/src/app/api/configurations/route.ts:
--------------------------------------------------------------------------------
1 | import { NextRequest, NextResponse } from "next/server";
2 | import {
3 | getConfigurations,
4 | createConfiguration,
5 | updateConfiguration,
6 | deleteConfiguration,
7 | } from "@/app/actions";
8 |
9 | export async function GET() {
10 | try {
11 | const configurations = await getConfigurations();
12 | return NextResponse.json(configurations);
13 | } catch (error) {
14 | console.error("Error fetching configurations:", error);
15 | return NextResponse.json(
16 | { error: "Error fetching configurations" },
17 | { status: 500 },
18 | );
19 | }
20 | }
21 |
22 | export async function POST(request: NextRequest) {
23 | try {
24 | const configData = await request.json();
25 | const newConfig = await createConfiguration(configData);
26 | return NextResponse.json(newConfig);
27 | } catch (error) {
28 | console.error("Error creating configuration:", error);
29 | return NextResponse.json(
30 | { error: "Error creating configuration" },
31 | { status: 500 },
32 | );
33 | }
34 | }
35 |
36 | export async function PUT(request: NextRequest) {
37 | try {
38 | const { id, ...data } = await request.json();
39 | const updatedConfig = await updateConfiguration(id, data);
40 | return NextResponse.json(updatedConfig);
41 | } catch (error) {
42 | console.error("Error updating configuration:", error);
43 | return NextResponse.json(
44 | { error: "Error updating configuration" },
45 | { status: 500 },
46 | );
47 | }
48 | }
49 |
50 | export async function DELETE(request: NextRequest) {
51 | const { searchParams } = request.nextUrl;
52 | const id = searchParams.get("id");
53 | if (!id) {
54 | return NextResponse.json(
55 | { error: "Configuration ID is required" },
56 | { status: 400 },
57 | );
58 | }
59 |
60 | try {
61 | await deleteConfiguration(id);
62 | return NextResponse.json({ message: "Configuration deleted successfully" });
63 | } catch (error) {
64 | console.error("Error deleting configuration:", error);
65 | return NextResponse.json(
66 | { error: "Error deleting configuration" },
67 | { status: 500 },
68 | );
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/src/app/api/logs/[id]/route.ts:
--------------------------------------------------------------------------------
1 | // app/api/logs/[id]/route.ts
2 | import { NextRequest, NextResponse } from 'next/server';
3 | import prisma from '@/lib/prisma';
4 |
5 | interface RouteParams {
6 | params: { id: string };
7 | }
8 |
9 | export async function GET(
10 | request: NextRequest,
11 | { params }: RouteParams
12 | ): Promise {
13 | try {
14 | const logId = params.id;
15 |
16 | if (!logId) {
17 | return NextResponse.json({ error: 'Invalid log ID' }, { status: 400 });
18 | }
19 |
20 | const log = await prisma.log.findUnique({
21 | where: { id: logId },
22 | });
23 |
24 | if (log) {
25 | return NextResponse.json(log);
26 | } else {
27 | return NextResponse.json({ error: 'Log not found' }, { status: 404 });
28 | }
29 | } catch (error) {
30 | console.error('Error fetching log:', error);
31 | return NextResponse.json(
32 | { error: 'Internal server error' },
33 | { status: 500 }
34 | );
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/app/api/logs/route.ts:
--------------------------------------------------------------------------------
1 | // app/api/logs/route.ts
2 | import { getLogs } from "@/app/actions";
3 | import prisma from "@/lib/prisma"; // Make sure to import prisma client
4 | import { NextRequest, NextResponse } from "next/server";
5 |
6 | export async function GET(request: NextRequest) {
7 | const { searchParams } = request.nextUrl;
8 | const provider = searchParams.get("provider") || "all";
9 | const startDate = searchParams.get("startDate") || "";
10 | const endDate = searchParams.get("endDate") || "";
11 |
12 | try {
13 | const logs = await getLogs({ provider, startDate, endDate });
14 | return NextResponse.json(logs);
15 | } catch (error) {
16 | console.error("Error fetching logs:", error);
17 | return NextResponse.json({ error: "Error fetching logs" }, { status: 500 });
18 | }
19 | }
20 |
21 | export async function POST(request: NextRequest) {
22 | try {
23 | const logData = await request.json();
24 | const log = await prisma.log.create({
25 | data: {
26 | ...logData,
27 | metadata: logData.metadata as any,
28 | response: logData.response as any,
29 | timestamp: new Date(),
30 | },
31 | });
32 | return NextResponse.json(log);
33 | } catch (error) {
34 | console.error("Error creating log:", error);
35 | return NextResponse.json({ error: "Error creating log" }, { status: 500 });
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/app/api/stats/route.ts:
--------------------------------------------------------------------------------
1 | import { NextRequest, NextResponse } from "next/server";
2 | import { getStats } from "@/app/actions";
3 |
4 | export async function GET(request: NextRequest) {
5 | const { searchParams } = request.nextUrl;
6 | const timeFilter = searchParams.get("timeFilter") || "all";
7 |
8 | try {
9 | const stats = await getStats(timeFilter);
10 | return NextResponse.json(stats);
11 | } catch (error) {
12 | console.error("Error fetching stats:", error);
13 | return NextResponse.json(
14 | { error: "Error fetching stats" },
15 | { status: 500 },
16 | );
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/src/app/configurations/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState, useEffect, useMemo, useCallback } from "react";
4 | import { Card, CardHeader, CardTitle, CardContent } from "@/components/ui/card";
5 | import { Button } from "@/components/ui/button";
6 | import { Input } from "@/components/ui/input";
7 | import { Label } from "@/components/ui/label";
8 | import {
9 | Table,
10 | TableBody,
11 | TableCell,
12 | TableHead,
13 | TableHeader,
14 | TableRow,
15 | } from "@/components/ui/table";
16 | import {
17 | Dialog,
18 | DialogContent,
19 | DialogHeader,
20 | DialogTitle,
21 | DialogTrigger,
22 | } from "@/components/ui/dialog";
23 | import { Switch } from "@/components/ui/switch";
24 | import { PlusIcon, PencilIcon, Trash2Icon } from "lucide-react";
25 | import {
26 | Select,
27 | SelectContent,
28 | SelectItem,
29 | SelectTrigger,
30 | SelectValue,
31 | } from "@/components/ui/select";
32 | import {
33 | getConfigurations,
34 | updateDefaultConfiguration,
35 | createConfiguration,
36 | deleteConfiguration,
37 | updateConfiguration,
38 | } from "../actions";
39 | import {
40 | getModelConfigurations,
41 | type ModelConfigurations,
42 | } from "@/lib/model-config";
43 | import { ConfigurationModal } from "@/components/ConfigurationModal";
44 |
45 | interface AIConfiguration {
46 | id: string;
47 | name: string;
48 | provider: string;
49 | model: string;
50 | temperature: number | null;
51 | maxTokens: number | null;
52 | topP: number | null;
53 | frequencyPenalty: number | null;
54 | presencePenalty: number | null;
55 | isDefault: boolean;
56 | createdAt: Date;
57 | updatedAt: Date;
58 | apiKey: string | null;
59 | }
60 |
61 | export default function ConfigurationsPage() {
62 | const [configurations, setConfigurations] = useState([]);
63 | const [isAddModalOpen, setIsAddModalOpen] = useState(false);
64 | const [isEditModalOpen, setIsEditModalOpen] = useState(false);
65 | const [editingConfig, setEditingConfig] = useState(
66 | null,
67 | );
68 | const [error, setError] = useState(null);
69 | const modelConfigurations: ModelConfigurations = useMemo(
70 | () => getModelConfigurations(),
71 | [],
72 | );
73 |
74 | const sortedConfigurations = useMemo(() => {
75 | return [...configurations].sort((a, b) => {
76 | if (a.provider !== b.provider) {
77 | return a.provider.localeCompare(b.provider);
78 | }
79 | return a.name.localeCompare(b.name);
80 | });
81 | }, [configurations]);
82 |
83 | useEffect(() => {
84 | fetchConfigurations();
85 | }, []);
86 |
87 | const fetchConfigurations = useCallback(async () => {
88 | try {
89 | const configData = await getConfigurations();
90 | setConfigurations(configData as AIConfiguration[]);
91 | } catch (error) {
92 | console.error("Error fetching configurations:", error);
93 | setError("Error loading configurations");
94 | }
95 | }, []);
96 |
97 | const handleCreateConfig = useCallback(
98 | async (newConfig: Partial) => {
99 | try {
100 | await createConfiguration(newConfig);
101 | fetchConfigurations();
102 | } catch (error) {
103 | console.error("Error creating configuration:", error);
104 | setError("Error creating configuration. Please try again.");
105 | }
106 | },
107 | [fetchConfigurations],
108 | );
109 |
110 | const handleToggleDefault = useCallback(
111 | async (configId: string, isDefault: boolean) => {
112 | try {
113 | await updateDefaultConfiguration(configId);
114 | fetchConfigurations();
115 | } catch (error) {
116 | console.error("Error updating default configuration:", error);
117 | setError("Error updating default configuration");
118 | }
119 | },
120 | [fetchConfigurations],
121 | );
122 |
123 | const handleDeleteConfig = useCallback(
124 | async (configId: string) => {
125 | if (confirm("Are you sure you want to delete this configuration?")) {
126 | try {
127 | await deleteConfiguration(configId);
128 | fetchConfigurations();
129 | } catch (error) {
130 | console.error("Error deleting configuration:", error);
131 | setError("Failed to delete configuration. Please try again.");
132 | }
133 | }
134 | },
135 | [fetchConfigurations],
136 | );
137 |
138 | const handleEditConfig = useCallback(
139 | async (updatedConfig: Partial) => {
140 | if (!editingConfig) return;
141 | try {
142 | await updateConfiguration(editingConfig.id, updatedConfig);
143 | fetchConfigurations();
144 | setEditingConfig(null);
145 | } catch (error) {
146 | console.error("Error updating configuration:", error);
147 | setError("Error updating configuration. Please try again.");
148 | }
149 | },
150 | [editingConfig, fetchConfigurations],
151 | );
152 |
153 | const openEditModal = useCallback((config: AIConfiguration) => {
154 | setEditingConfig(config);
155 | setIsEditModalOpen(true);
156 | }, []);
157 |
158 | return (
159 |
160 |
AI Configurations
161 |
162 |
163 |
164 | Configuration List
165 |
166 |
167 |
168 |
169 |
170 | Name
171 | Provider
172 | Model
173 | Temperature
174 | Max Tokens
175 | Default
176 | Actions
177 |
178 |
179 |
180 | {sortedConfigurations.map((config) => (
181 |
182 | {config.name}
183 | {config.provider}
184 | {config.model}
185 | {config.temperature}
186 | {config.maxTokens}
187 |
188 |
191 | handleToggleDefault(config.id, checked)
192 | }
193 | />
194 |
195 |
196 |
197 |
204 |
212 |
213 |
214 |
215 | ))}
216 |
217 |
218 |
219 |
220 |
221 |
225 |
226 |
setIsAddModalOpen(false)}
229 | onSave={handleCreateConfig}
230 | title="Add New Configuration"
231 | />
232 |
233 | {
236 | setIsEditModalOpen(false);
237 | setEditingConfig(null);
238 | }}
239 | onSave={handleEditConfig}
240 | initialConfig={editingConfig || undefined}
241 | title="Edit Configuration"
242 | />
243 |
244 | );
245 | }
246 |
--------------------------------------------------------------------------------
/src/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/src/app/favicon.ico
--------------------------------------------------------------------------------
/src/app/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | @layer base {
6 | :root {
7 | --background: 0 0% 100%;
8 | --foreground: 20 14.3% 4.1%;
9 | --card: 0 0% 100%;
10 | --card-foreground: 20 14.3% 4.1%;
11 | --popover: 0 0% 100%;
12 | --popover-foreground: 20 14.3% 4.1%;
13 | --primary: 24 9.8% 10%;
14 | --primary-foreground: 60 9.1% 97.8%;
15 | --secondary: 60 4.8% 95.9%;
16 | --secondary-foreground: 24 9.8% 10%;
17 | --muted: 60 4.8% 95.9%;
18 | --muted-foreground: 25 5.3% 44.7%;
19 | --accent: 60 4.8% 95.9%;
20 | --accent-foreground: 24 9.8% 10%;
21 | --destructive: 0 84.2% 60.2%;
22 | --destructive-foreground: 60 9.1% 97.8%;
23 | --border: 20 5.9% 90%;
24 | --input: 20 5.9% 90%;
25 | --ring: 20 14.3% 4.1%;
26 | --radius: 0.5rem;
27 | }
28 |
29 | .dark {
30 | --background: 0 0% 10%;
31 | --foreground: 60 9.1% 97.8%;
32 | --card: 20 14.3% 4.1%;
33 | --card-foreground: 60 9.1% 97.8%;
34 | --popover: 20 14.3% 4.1%;
35 | --popover-foreground: 60 9.1% 97.8%;
36 | --primary: 60 9.1% 97.8%;
37 | --primary-foreground: 24 9.8% 10%;
38 | --secondary: 12 6.5% 15.1%;
39 | --secondary-foreground: 60 9.1% 97.8%;
40 | --muted: 12 6.5% 15.1%;
41 | --muted-foreground: 24 5.4% 63.9%;
42 | --accent: 12 6.5% 15.1%;
43 | --accent-foreground: 60 9.1% 97.8%;
44 | --destructive: 0 62.8% 30.6%;
45 | --destructive-foreground: 60 9.1% 97.8%;
46 | --border: 12 6.5% 15.1%;
47 | --input: 12 6.5% 15.1%;
48 | --ring: 24 5.7% 82.9%;
49 | }
50 | }
51 |
52 | @layer base {
53 | * {
54 | @apply border-border;
55 | }
56 | body {
57 | @apply bg-background text-foreground;
58 | }
59 | }
60 |
61 | :root {
62 | --chart-1: 12 76% 61%;
63 | --chart-2: 173 58% 39%;
64 | --chart-3: 197 37% 24%;
65 | --chart-4: 43 74% 66%;
66 | --chart-5: 27 87% 67%;
67 | }
68 |
69 | .dark {
70 | --chart-1: 220 70% 50%;
71 | --chart-2: 160 60% 45%;
72 | --chart-3: 30 80% 55%;
73 | --chart-4: 280 65% 60%;
74 | --chart-5: 340 75% 55%;
75 | }
76 |
--------------------------------------------------------------------------------
/src/app/layout.tsx:
--------------------------------------------------------------------------------
1 | import type { Metadata } from "next";
2 | import { Inter } from "next/font/google";
3 | import "./globals.css";
4 | import NavBar from "@/components/NavBar";
5 | import Link from "next/link";
6 | import { ThemeProvider } from "@/components/theme-provider";
7 | import { Toaster } from "@/components/ui/sonner";
8 |
9 | const inter = Inter({ subsets: ["latin"] });
10 |
11 | export const metadata: Metadata = {
12 | title: "Cursor Lens",
13 | description: "Analytics and Routing for Cursor IDE",
14 | metadataBase: new URL("https://cursorlens.com"),
15 | };
16 |
17 | export default function RootLayout({
18 | children,
19 | }: Readonly<{
20 | children: React.ReactNode;
21 | }>) {
22 | return (
23 |
24 |
25 |
31 |
32 |
33 |
34 |
35 |
50 |
Cursor Lens
51 |
52 |
53 |
54 |
55 |
{children}
56 |
57 |
58 |
59 |
60 |
61 | );
62 | }
63 |
--------------------------------------------------------------------------------
/src/app/logs/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState, useEffect, useCallback, useMemo } from "react";
4 | import { format } from "date-fns";
5 | import { Calendar as CalendarIcon } from "lucide-react";
6 | import { RefreshCw } from "lucide-react";
7 | import LogsList from "@/components/LogsList";
8 | import LogDetails from "@/components/LogDetails";
9 | import { getLogs, getStats } from "@/app/actions";
10 | import { Skeleton } from "@/components/ui/skeleton";
11 | import {
12 | Select,
13 | SelectContent,
14 | SelectItem,
15 | SelectTrigger,
16 | SelectValue,
17 | } from "@/components/ui/select";
18 | import { Button } from "@/components/ui/button";
19 | import { Calendar } from "@/components/ui/calendar";
20 | import {
21 | Popover,
22 | PopoverContent,
23 | PopoverTrigger,
24 | } from "@/components/ui/popover";
25 | import { cn } from "@/lib/utils";
26 | import { debounce } from "lodash";
27 |
28 | interface Stats {
29 | totalLogs: number;
30 | totalTokens: number;
31 | perModelStats: {
32 | [key: string]: {
33 | logs: number;
34 | tokens: number;
35 | };
36 | };
37 | }
38 |
39 | export default function Logs() {
40 | const [logs, setLogs] = useState([]);
41 | const [stats, setStats] = useState(null);
42 | const [selectedLogId, setSelectedLogId] = useState(
43 | undefined,
44 | );
45 | const [error, setError] = useState(null);
46 | const [provider, setProvider] = useState("all");
47 | const [startDate, setStartDate] = useState(undefined);
48 | const [endDate, setEndDate] = useState(undefined);
49 | const [isLoading, setIsLoading] = useState(true);
50 |
51 | const handleLogSelect = (logId: string) => {
52 | setSelectedLogId(logId);
53 | };
54 |
55 | const fetchData = useCallback(async () => {
56 | setIsLoading(true);
57 | setError(null);
58 |
59 | try {
60 | const [fetchedLogs, fetchedStats] = await Promise.all([
61 | getLogs({
62 | provider,
63 | startDate: startDate ? format(startDate, "yyyy-MM-dd") : undefined,
64 | endDate: endDate ? format(endDate, "yyyy-MM-dd") : undefined,
65 | }),
66 | getStats(),
67 | ]);
68 |
69 | setLogs(fetchedLogs);
70 | setStats(fetchedStats);
71 | } catch (error) {
72 | setError("Error loading data");
73 | } finally {
74 | setIsLoading(false);
75 | }
76 | }, [provider, startDate, endDate]);
77 |
78 | const debouncedFetchData = useMemo(
79 | () => debounce(fetchData, 500),
80 | [fetchData],
81 | );
82 |
83 | useEffect(() => {
84 | debouncedFetchData();
85 | // Cleanup function to cancel any pending debounced calls
86 | return () => debouncedFetchData.cancel();
87 | }, [debouncedFetchData]);
88 |
89 | const handleRefresh = useCallback(() => {
90 | fetchData();
91 | }, [fetchData]);
92 |
93 | const LoadingSkeleton = () => (
94 | <>
95 | {[...Array(5)].map((_, i) => (
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 | ))}
104 | >
105 | );
106 |
107 | return (
108 |
109 |
110 |
111 |
112 |
113 |
Logs List
114 |
124 |
125 |
126 |
127 |
141 |
142 |
143 |
157 |
158 |
159 |
165 |
166 |
167 |
168 |
169 |
179 |
180 |
181 |
187 |
188 |
189 |
190 |
191 | {isLoading ? (
192 |
193 | ) : error ? (
194 |
{error}
195 | ) : logs.length > 0 ? (
196 |
201 | ) : (
202 |
No logs found.
203 | )}
204 |
205 |
206 |
207 |
208 | Log Details
209 |
210 |
211 |
212 | {selectedLogId ? (
213 |
214 | ) : (
215 |
Select a log to view details.
216 | )}
217 |
218 |
219 |
220 |
221 |
222 | );
223 | }
224 |
--------------------------------------------------------------------------------
/src/app/opengraph-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/src/app/opengraph-image.png
--------------------------------------------------------------------------------
/src/app/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
4 | import { Label } from "@/components/ui/label";
5 | import {
6 | Select,
7 | SelectContent,
8 | SelectItem,
9 | SelectTrigger,
10 | SelectValue,
11 | } from "@/components/ui/select";
12 | import { Skeleton } from "@/components/ui/skeleton";
13 | import { ArrowRight } from "lucide-react";
14 | import Link from "next/link";
15 | import { useRouter } from "next/navigation";
16 | import { useEffect, useState } from "react";
17 | import LogsList from "../components/LogsList";
18 | import { Log } from "../types/logs";
19 | import {
20 | createConfiguration,
21 | getConfigurations,
22 | getLogs,
23 | getStats,
24 | updateDefaultConfiguration,
25 | } from "./actions";
26 |
27 | interface Stats {
28 | totalLogs: number;
29 | totalTokens: number;
30 | }
31 |
32 | interface AIConfiguration {
33 | id: string;
34 | name: string;
35 | provider: string;
36 | model: string;
37 | temperature: number | null;
38 | maxTokens: number | null;
39 | topP: number | null;
40 | frequencyPenalty: number | null;
41 | presencePenalty: number | null;
42 | isDefault: boolean;
43 | createdAt: Date;
44 | updatedAt: Date;
45 | }
46 |
47 | export default function Home() {
48 | const [logs, setLogs] = useState([]);
49 | const [stats, setStats] = useState({ totalLogs: 0, totalTokens: 0 });
50 | const [aiConfigurations, setAIConfigurations] = useState(
51 | [],
52 | );
53 | const [selectedConfig, setSelectedConfig] = useState("");
54 | const [loading, setLoading] = useState(true);
55 | const [error, setError] = useState(null);
56 | const router = useRouter();
57 | const [newConfigName, setNewConfigName] = useState("");
58 | const [newConfigModel, setNewConfigModel] = useState("");
59 | const [newConfigProvider, setNewConfigProvider] = useState("");
60 | const [isDialogOpen, setIsDialogOpen] = useState(false);
61 |
62 | useEffect(() => {
63 | const fetchData = async () => {
64 | try {
65 | const [logsData, statsData, configData] = await Promise.all([
66 | getLogs(),
67 | getStats(),
68 | getConfigurations(),
69 | ]);
70 |
71 | setLogs(logsData as unknown as Log[]); // Type assertion
72 | setStats(statsData);
73 | setAIConfigurations(configData as AIConfiguration[]); // Type assertion
74 | const defaultConfig = configData.find((config) => config.isDefault);
75 | setSelectedConfig(defaultConfig ? defaultConfig.name : "");
76 |
77 | setLoading(false);
78 | } catch (error) {
79 | console.error("Error fetching data:", error);
80 | setError("Error loading data. Please try again later.");
81 | setLoading(false);
82 | }
83 | };
84 | fetchData();
85 | }, []);
86 |
87 | const handleConfigChange = async (configName: string) => {
88 | setSelectedConfig(configName);
89 | try {
90 | await updateDefaultConfiguration(configName);
91 | router.refresh();
92 | } catch (error) {
93 | console.error("Error updating configuration:", error);
94 | setError("Error updating configuration");
95 | }
96 | };
97 |
98 | const handleCreateConfig = async () => {
99 | if (!newConfigName || !newConfigModel || !newConfigProvider) {
100 | setError(
101 | "Please provide name, model, and provider for the new configuration",
102 | );
103 | return;
104 | }
105 |
106 | try {
107 | await createConfiguration({
108 | name: newConfigName,
109 | model: newConfigModel,
110 | provider: newConfigProvider,
111 | isDefault: false,
112 | });
113 |
114 | const configData = await getConfigurations();
115 | setAIConfigurations(configData as AIConfiguration[]); // Type assertion
116 |
117 | setNewConfigName("");
118 | setNewConfigModel("");
119 | setNewConfigProvider("");
120 |
121 | setError(null);
122 | setIsDialogOpen(false);
123 | } catch (error) {
124 | console.error("Error creating configuration:", error);
125 | setError("Error creating configuration");
126 | }
127 | };
128 |
129 | const handleLogSelect = (logId: string) => {
130 | router.push(`/logs?selectedLogId=${logId}`);
131 | };
132 |
133 | if (loading)
134 | return (
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 | );
145 | if (error) return Error: {error}
;
146 |
147 | return (
148 |
149 |
182 |
183 |
184 |
185 |
186 | Total Logs
187 |
188 |
189 | {stats.totalLogs}
190 |
191 |
192 |
193 |
194 | Total Tokens Used
195 |
196 |
197 |
198 | {stats.totalTokens.toLocaleString()}
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 | AI Configuration
207 |
208 |
209 |
210 |
211 |
223 |
224 |
228 | Manage Configurations
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 | Recent Logs
237 |
238 |
239 |
240 |
245 |
246 |
250 | View all logs
251 |
252 |
253 |
254 |
255 | );
256 | }
257 |
--------------------------------------------------------------------------------
/src/app/stats/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState, useEffect } from "react";
4 | import { Card, CardHeader, CardTitle, CardContent } from "@/components/ui/card";
5 | import { getStats, getConfigurations, getConfigurationCosts } from "../actions";
6 | import type { AIConfiguration } from "@prisma/client";
7 | import {
8 | BarChart,
9 | Bar,
10 | XAxis,
11 | YAxis,
12 | CartesianGrid,
13 | ResponsiveContainer,
14 | LineChart,
15 | Line,
16 | PieChart,
17 | Pie,
18 | Cell,
19 | } from "recharts";
20 | import {
21 | ChartContainer,
22 | ChartTooltip,
23 | ChartTooltipContent,
24 | ChartLegend,
25 | ChartLegendContent,
26 | } from "@/components/ui/chart";
27 | import {
28 | Select,
29 | SelectContent,
30 | SelectItem,
31 | SelectTrigger,
32 | SelectValue,
33 | } from "@/components/ui/select";
34 | import { Skeleton } from "@/components/ui/skeleton";
35 | import { Checkbox } from "@/components/ui/checkbox";
36 |
37 | interface Stats {
38 | totalLogs: number;
39 | totalTokens: number;
40 | totalPromptTokens: number;
41 | totalCompletionTokens: number;
42 | perModelProviderStats: {
43 | [key: string]: {
44 | logs: number;
45 | tokens: number;
46 | promptTokens: number;
47 | completionTokens: number;
48 | cost: number;
49 | provider: string;
50 | model: string;
51 | };
52 | };
53 | tokenUsageOverTime: {
54 | date: string;
55 | tokens: number;
56 | promptTokens: number;
57 | completionTokens: number;
58 | }[];
59 | }
60 |
61 | const chartConfig = {
62 | logs: {
63 | label: "Logs",
64 | color: "hsl(var(--chart-1))",
65 | },
66 | tokens: {
67 | label: "Total Tokens",
68 | color: "hsl(var(--chart-2))",
69 | },
70 | promptTokens: {
71 | label: "Input Tokens",
72 | color: "hsl(var(--chart-3))",
73 | },
74 | completionTokens: {
75 | label: "Output Tokens",
76 | color: "hsl(var(--chart-4))",
77 | },
78 | cost: {
79 | label: "Cost ($)",
80 | color: "hsl(var(--chart-5))",
81 | },
82 | };
83 |
84 | export default function StatsPage() {
85 | const [stats, setStats] = useState(null);
86 | const [configurations, setConfigurations] = useState([]);
87 | const [loading, setLoading] = useState(true);
88 | const [error, setError] = useState(null);
89 | const [timeFilter, setTimeFilter] = useState("all");
90 | const [tokenUsageOverTime, setTokenUsageOverTime] = useState<
91 | { date: string; tokens: number }[]
92 | >([]);
93 | const [selectedMetrics, setSelectedMetrics] = useState([
94 | "logs",
95 | "tokens",
96 | "cost",
97 | ]);
98 |
99 | useEffect(() => {
100 | const fetchData = async () => {
101 | try {
102 | const [statsData, configData, costsData] = await Promise.all([
103 | getStats(timeFilter),
104 | getConfigurations(),
105 | getConfigurationCosts(),
106 | ]);
107 |
108 | const perModelProviderStats: Stats["perModelProviderStats"] = {};
109 | for (const config of configData) {
110 | perModelProviderStats[`${config.provider}:${config.model}`] = {
111 | logs: 0,
112 | tokens: 0,
113 | promptTokens: 0,
114 | completionTokens: 0,
115 | cost: 0,
116 | provider: config.provider,
117 | model: config.model,
118 | };
119 | }
120 |
121 | for (const [key, modelStats] of Object.entries(
122 | statsData.perModelProviderStats,
123 | )) {
124 | const [provider, model] = key.split(":");
125 | const costData = costsData.find(
126 | (c) => c.provider === provider && c.model === model,
127 | );
128 | const inputTokenCost = costData?.inputTokenCost || 0;
129 | const outputTokenCost = costData?.outputTokenCost || 0;
130 |
131 | perModelProviderStats[key] = {
132 | ...modelStats,
133 | cost:
134 | modelStats.promptTokens * inputTokenCost +
135 | modelStats.completionTokens * outputTokenCost,
136 | };
137 | }
138 |
139 | setStats({
140 | totalLogs: statsData.totalLogs,
141 | totalTokens: statsData.totalTokens,
142 | totalPromptTokens: statsData.totalPromptTokens,
143 | totalCompletionTokens: statsData.totalCompletionTokens,
144 | perModelProviderStats,
145 | tokenUsageOverTime: statsData.tokenUsageOverTime,
146 | });
147 | setTokenUsageOverTime(statsData.tokenUsageOverTime);
148 | setConfigurations(configData);
149 | setLoading(false);
150 | } catch (error) {
151 | console.error("Error fetching data:", error);
152 | setError("Error loading data. Please try again later.");
153 | setLoading(false);
154 | }
155 | };
156 | fetchData();
157 | }, [timeFilter]);
158 |
159 | const handleMetricToggle = (metric: string) => {
160 | setSelectedMetrics((prev) =>
161 | prev.includes(metric)
162 | ? prev.filter((m) => m !== metric)
163 | : [...prev, metric],
164 | );
165 | };
166 |
167 | if (loading) {
168 | return (
169 |
170 |
171 |
172 | {["card1", "card2", "card3"].map((key) => (
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 | ))}
182 |
183 | {["card1", "card2", "card3"].map((key) => (
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 | ))}
193 |
194 | );
195 | }
196 |
197 | if (error) {
198 | return (
199 |
200 |
201 |
202 | Error
203 |
204 |
205 | {error}
206 |
207 |
208 |
209 | );
210 | }
211 |
212 | if (!stats) return null;
213 |
214 | const chartData = Object.entries(stats.perModelProviderStats).map(
215 | ([key, data]) => ({
216 | provider: data.provider,
217 | model: data.model,
218 | logs: data.logs,
219 | tokens: data.tokens,
220 | promptTokens: data.promptTokens,
221 | completionTokens: data.completionTokens,
222 | cost: data.cost,
223 | }),
224 | );
225 |
226 | const pieChartData = Object.entries(stats.perModelProviderStats).map(
227 | ([key, data]) => ({
228 | name: key,
229 | value: data.logs,
230 | }),
231 | );
232 |
233 | const COLORS = ["#0088FE", "#00C49F", "#FFBB28", "#FF8042", "#8884D8"];
234 |
235 | return (
236 |
237 |
Analytics Dashboard
238 |
239 |
240 |
254 |
255 |
256 |
257 |
258 |
259 | Total Logs
260 |
261 |
262 |
263 | {stats.totalLogs.toLocaleString()}
264 |
265 |
266 |
267 |
268 |
269 | Total Tokens
270 |
271 |
272 |
273 | {stats.totalTokens.toLocaleString()}
274 |
275 |
276 |
277 |
278 |
279 | Total Cost
280 |
281 |
282 |
283 | $
284 | {Object.values(stats.perModelProviderStats)
285 | .reduce((sum, data) => sum + data.cost, 0)
286 | .toFixed(2)}
287 |
288 |
289 |
290 |
291 |
292 |
293 | {Object.entries(chartConfig).map(([key, config]) => (
294 |
295 | handleMetricToggle(key)}
299 | />
300 |
306 |
307 | ))}
308 |
309 |
310 |
311 |
312 | Per Model and Provider Statistics
313 |
314 |
315 |
316 |
317 |
318 |
319 |
326 |
327 |
332 | {
334 | if (active && payload && payload.length) {
335 | return (
336 |
337 |
{`${payload[0].payload.provider}: ${payload[0].payload.model}`}
338 | {payload.map((entry) => (
339 |
340 | {`${entry.name}: ${entry.value}`}
341 |
342 | ))}
343 |
344 | );
345 | }
346 | return null;
347 | }}
348 | />
349 | } />
350 | {selectedMetrics.includes("logs") && (
351 |
357 | )}
358 | {selectedMetrics.includes("tokens") && (
359 |
365 | )}
366 | {selectedMetrics.includes("promptTokens") && (
367 |
373 | )}
374 | {selectedMetrics.includes("completionTokens") && (
375 |
381 | )}
382 | {selectedMetrics.includes("cost") && (
383 |
389 | )}
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 | Token Usage Over Time
399 |
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 | } />
408 | } />
409 |
415 |
416 |
417 |
418 |
419 |
420 |
421 |
422 |
423 | Model and Provider Usage Distribution
424 |
425 |
426 |
427 |
428 |
429 |
438 | `${name} ${(percent * 100).toFixed(0)}%`
439 | }
440 | >
441 | {pieChartData.map((entry) => (
442 | |
446 | ))}
447 |
448 | } />
449 | } />
450 |
451 |
452 |
453 |
454 |
455 |
456 | );
457 | }
458 |
--------------------------------------------------------------------------------
/src/app/twitter-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HamedMP/CursorLens/6577121583bae3c0349a2958d81cdbcefec79f89/src/app/twitter-image.png
--------------------------------------------------------------------------------
/src/components/ConfigurationModal.tsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useCallback, useMemo } from "react";
2 | import { Button } from "@/components/ui/button";
3 | import { Input } from "@/components/ui/input";
4 | import { Label } from "@/components/ui/label";
5 | import {
6 | Dialog,
7 | DialogContent,
8 | DialogHeader,
9 | DialogTitle,
10 | } from "@/components/ui/dialog";
11 | import {
12 | Select,
13 | SelectContent,
14 | SelectItem,
15 | SelectTrigger,
16 | SelectValue,
17 | } from "@/components/ui/select";
18 | import { AIConfiguration, ModelConfigurations } from "@/types";
19 | import { getModelConfigurations } from "@/lib/model-config";
20 |
21 | interface ConfigurationModalProps {
22 | isOpen: boolean;
23 | onClose: () => void;
24 | onSave: (config: Partial) => Promise;
25 | initialConfig?: Partial;
26 | title: string;
27 | }
28 |
29 | export function ConfigurationModal({
30 | isOpen,
31 | onClose,
32 | onSave,
33 | initialConfig,
34 | title,
35 | }: ConfigurationModalProps) {
36 | const [config, setConfig] = useState>(
37 | initialConfig || {},
38 | );
39 | const [error, setError] = useState(null);
40 | const [selectedProvider, setSelectedProvider] = useState(
41 | initialConfig?.provider || "",
42 | );
43 | const [selectedModel, setSelectedModel] = useState(
44 | initialConfig?.model || "",
45 | );
46 | const [customProvider, setCustomProvider] = useState("");
47 | const [customModel, setCustomModel] = useState("");
48 | const modelConfigurations: ModelConfigurations = useMemo(
49 | () => getModelConfigurations(),
50 | [],
51 | );
52 |
53 | const handleSave = useCallback(async () => {
54 | if (!config.name || !config.provider || !config.model) {
55 | setError("Name, provider, and model are required fields");
56 | return;
57 | }
58 |
59 | try {
60 | await onSave(config);
61 | onClose();
62 | } catch (error) {
63 | console.error("Error saving configuration:", error);
64 | setError("Error saving configuration. Please try again.");
65 | }
66 | }, [config, onSave, onClose]);
67 |
68 | const handleTemplateSelect = useCallback(
69 | (provider: string, model: string): void => {
70 | const providerConfigs = modelConfigurations[provider];
71 |
72 | if (!providerConfigs) {
73 | console.error(`No configurations found for provider: ${provider}`);
74 | return;
75 | }
76 |
77 | const modelConfig = providerConfigs[model];
78 |
79 | if (
80 | !modelConfig ||
81 | !("isTemplate" in modelConfig) ||
82 | !modelConfig.isTemplate
83 | ) {
84 | console.error(
85 | `No valid template configuration found for model: ${model}`,
86 | );
87 | return;
88 | }
89 |
90 | const readableName = `${provider.charAt(0).toUpperCase() + provider.slice(1)} ${model}`;
91 |
92 | setConfig({
93 | ...modelConfig,
94 | name: readableName,
95 | provider,
96 | model,
97 | });
98 | setSelectedProvider(provider);
99 | setSelectedModel(model);
100 | },
101 | [modelConfigurations],
102 | );
103 |
104 | const templateButtons = useMemo(
105 | () =>
106 | Object.entries(modelConfigurations).flatMap(([provider, models]) =>
107 | Object.entries(models)
108 | .filter(
109 | ([_, config]) =>
110 | config && "isTemplate" in config && config.isTemplate,
111 | )
112 | .map(([model, config]) => (
113 |
123 | )),
124 | ),
125 | [modelConfigurations, handleTemplateSelect],
126 | );
127 |
128 | const handleProviderChange = useCallback(
129 | (value: string) => {
130 | setSelectedProvider(value);
131 | setSelectedModel("");
132 | setConfig({
133 | ...config,
134 | provider: value === "other" ? "" : value,
135 | model: "",
136 | });
137 | setCustomProvider("");
138 | },
139 | [config],
140 | );
141 |
142 | const handleModelChange = useCallback(
143 | (value: string) => {
144 | setSelectedModel(value);
145 | setConfig({ ...config, model: value === "other" ? "" : value });
146 | setCustomModel("");
147 | },
148 | [config],
149 | );
150 |
151 | const handleCustomProviderChange = useCallback(
152 | (e: React.ChangeEvent) => {
153 | setCustomProvider(e.target.value);
154 | setConfig({ ...config, provider: e.target.value });
155 | },
156 | [config],
157 | );
158 |
159 | const handleCustomModelChange = useCallback(
160 | (e: React.ChangeEvent) => {
161 | setCustomModel(e.target.value);
162 | setConfig({ ...config, model: e.target.value });
163 | },
164 | [config],
165 | );
166 |
167 | return (
168 |
268 | );
269 | }
270 |
--------------------------------------------------------------------------------
/src/components/LogDetails.test.tsx:
--------------------------------------------------------------------------------
1 | import "@testing-library/jest-dom";
2 | import { act, render, screen, waitFor } from "@testing-library/react";
3 | import React from "react";
4 | import { beforeEach, describe, expect, it, Mock, vi } from "vitest";
5 | import LogDetails from "./LogDetails";
6 |
7 | describe("LogDetails", () => {
8 | const mockLogId = "123";
9 | const mockLog = {
10 | id: 123,
11 | method: "POST",
12 | url: "/api/test",
13 | headers: { "Content-Type": "application/json" },
14 | body: { message: "Test body" },
15 | response: { result: "Success" },
16 | timestamp: "2023-04-01T12:00:00Z",
17 | metadata: {
18 | provider: "openai",
19 | model: "gpt-3.5-turbo",
20 | inputTokens: 10,
21 | outputTokens: 20,
22 | totalTokens: 30,
23 | inputCost: 0.0001,
24 | outputCost: 0.0002,
25 | totalCost: 0.0003,
26 | },
27 | };
28 | let fetchResolve: (value: unknown) => void;
29 |
30 | beforeEach(() => {
31 | vi.resetAllMocks();
32 | global.fetch = vi.fn(
33 | () =>
34 | new Promise((resolve) => {
35 | fetchResolve = resolve;
36 | }),
37 | ) as Mock;
38 | });
39 |
40 | it("renders loading skeleton when log is not loaded", async () => {
41 | render();
42 |
43 | expect(screen.getAllByRole("status").length).toBeGreaterThan(0);
44 | expect(screen.getAllByLabelText(/loading/i).length).toBeGreaterThan(0);
45 |
46 | fetchResolve({
47 | ok: true,
48 | json: () => Promise.resolve(mockLog),
49 | });
50 |
51 | await waitFor(() => {
52 | expect(screen.queryByRole("status")).not.toBeInTheDocument();
53 | });
54 | });
55 |
56 | it("renders error message when fetch fails", async () => {
57 | (global.fetch as ReturnType).mockRejectedValueOnce(
58 | new Error("Failed to fetch"),
59 | );
60 |
61 | render();
62 |
63 | await waitFor(() => {
64 | expect(screen.getByText("Error fetching log data")).toBeInTheDocument();
65 | });
66 | });
67 |
68 | it("renders log details when fetch is successful", async () => {
69 | render();
70 |
71 | fetchResolve({
72 | ok: true,
73 | json: () => Promise.resolve(mockLog),
74 | });
75 |
76 | await waitFor(() => {
77 | expect(screen.getByText("POST /api/test")).toBeInTheDocument();
78 | expect(screen.getByText("2023-04-01T12:00:00Z")).toBeInTheDocument();
79 | expect(screen.getByText("Input Tokens")).toBeInTheDocument();
80 | expect(screen.getByText("10")).toBeInTheDocument();
81 | expect(screen.getByText("Output Tokens")).toBeInTheDocument();
82 | expect(screen.getByText("20")).toBeInTheDocument();
83 | expect(screen.getByText("Total Tokens")).toBeInTheDocument();
84 | expect(screen.getByText("30")).toBeInTheDocument();
85 | expect(screen.getByText("Input Cost")).toBeInTheDocument();
86 | expect(screen.getByText("$0.0001")).toBeInTheDocument();
87 | expect(screen.getByText("Output Cost")).toBeInTheDocument();
88 | expect(screen.getByText("$0.0002")).toBeInTheDocument();
89 | expect(screen.getByText("Total Cost")).toBeInTheDocument();
90 | expect(screen.getByText("$0.0003")).toBeInTheDocument();
91 | });
92 | });
93 | });
94 |
--------------------------------------------------------------------------------
/src/components/LogDetails.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { Alert, AlertDescription } from "@/components/ui/alert";
4 | import { Button } from "@/components/ui/button";
5 | import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
6 | import { Skeleton } from "@/components/ui/skeleton";
7 | import {
8 | Table,
9 | TableBody,
10 | TableCell,
11 | TableHead,
12 | TableHeader,
13 | TableRow,
14 | } from "@/components/ui/table";
15 | import { ChevronDown, ChevronUp, Copy } from "lucide-react";
16 | import { useSearchParams } from "next/navigation";
17 | import { useEffect, useState } from "react";
18 | import ReactMarkdown from "react-markdown";
19 | import {
20 | Prism as SyntaxHighlighter,
21 | SyntaxHighlighterProps,
22 | } from "react-syntax-highlighter";
23 | import * as themes from "react-syntax-highlighter/dist/esm/styles/prism";
24 | import { toast } from "sonner";
25 | import { Log } from "../types/logs";
26 |
27 | interface LogDetailsProps {
28 | logId: string;
29 | }
30 |
31 | const formatMetadataValue = (
32 | value: number | undefined,
33 | isMonetary: boolean = false,
34 | ): string => {
35 | if (value === undefined || isNaN(value)) return "N/A";
36 | if (isMonetary) {
37 | return `$${value.toFixed(4)}`;
38 | }
39 | return value.toLocaleString();
40 | };
41 |
42 | export default function LogDetails({ logId }: LogDetailsProps) {
43 | const [log, setLog] = useState(null);
44 | const [error, setError] = useState(null);
45 | const [theme, setTheme] = useState(
46 | null,
47 | );
48 | const searchParams = useSearchParams();
49 | const [expandedSections, setExpandedSections] = useState({
50 | response: true,
51 | body: true,
52 | headers: true,
53 | });
54 |
55 | useEffect(() => {
56 | const fetchLog = async () => {
57 | if (logId) {
58 | try {
59 | const response = await fetch(`/api/logs/${logId}`);
60 | if (!response.ok) {
61 | throw new Error("Failed to fetch log");
62 | }
63 | const logData: Log = await response.json();
64 | setLog(logData);
65 | } catch (err) {
66 | setError("Error fetching log data");
67 | console.error(err);
68 | }
69 | }
70 | };
71 |
72 | fetchLog();
73 |
74 | const loadTheme = () => {
75 | const themeName = "vscDarkPlus";
76 | setTheme(
77 | themes[
78 | themeName as keyof typeof themes
79 | ] as SyntaxHighlighterProps["style"],
80 | );
81 | };
82 | loadTheme();
83 | }, [logId, searchParams]);
84 |
85 | const toggleSection = (section: "response" | "body" | "headers") => {
86 | setExpandedSections((prev) => ({
87 | ...prev,
88 | [section]: !prev[section],
89 | }));
90 | };
91 |
92 | if (error) {
93 | return (
94 |
95 | {error}
96 |
97 | );
98 | }
99 |
100 | if (!log) {
101 | return (
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 | );
115 | }
116 |
117 | const maskSensitiveInfo = (obj: any) => {
118 | const sensitiveKeys = ["authorization", "api-key", "secret"];
119 | if (typeof obj === "object" && obj !== null) {
120 | Object.keys(obj).forEach((key) => {
121 | if (
122 | sensitiveKeys.some((sensitiveKey) =>
123 | key.toLowerCase().includes(sensitiveKey),
124 | )
125 | ) {
126 | obj[key] = "************************";
127 | } else if (typeof obj[key] === "object") {
128 | obj[key] = maskSensitiveInfo(obj[key]);
129 | }
130 | });
131 | }
132 | return obj;
133 | };
134 |
135 | const JsonHighlight = ({
136 | content,
137 | isExpandable = false,
138 | }: {
139 | content: object | string | null;
140 | isExpandable?: boolean;
141 | }) => {
142 | const [isExpanded, setIsExpanded] = useState(!isExpandable);
143 | const parsedContent =
144 | typeof content === "string" ? JSON.parse(content) : content;
145 | const maskedContent = maskSensitiveInfo(parsedContent);
146 | const jsonString =
147 | JSON.stringify(maskedContent, null, 2) || "No data available";
148 |
149 | const handleCopy = () => {
150 | navigator.clipboard.writeText(jsonString);
151 | toast.success("Copied to clipboard");
152 | };
153 |
154 | const toggleExpand = () => {
155 | setIsExpanded(!isExpanded);
156 | };
157 |
158 | const renderAIResponse = (response: any) => {
159 | return (
160 |
161 |
AI Response
162 |
173 | {String(children).replace(/\n$/, "")}
174 |
175 | ) : (
176 |
177 | {children}
178 |
179 | );
180 | },
181 | }}
182 | >
183 | {response.text}
184 |
185 |
186 | );
187 | };
188 |
189 | const renderMessages = (messages: any[]) => {
190 | return messages
191 | .slice()
192 | .reverse()
193 | .map((message, index) => (
194 |
204 |
205 | {message.role === "user"
206 | ? "You"
207 | : message.role === "system"
208 | ? "System"
209 | : "Assistant"}
210 |
211 |
222 | {String(children).replace(/\n$/, "")}
223 |
224 | ) : (
225 |
226 | {children}
227 |
228 | );
229 | },
230 | }}
231 | >
232 | {message.content}
233 |
234 |
235 | ));
236 | };
237 |
238 | return (
239 |
240 |
248 | {isExpandable && (
249 |
265 | )}
266 |
267 | {(isExpanded || !isExpandable) && (
268 |
279 | {jsonString}
280 |
281 | )}
282 |
283 | {parsedContent && "text" in parsedContent && (
284 |
285 |
AI Response
286 | {renderAIResponse(parsedContent)}
287 |
288 | )}
289 |
290 | {parsedContent && "messages" in parsedContent && (
291 |
292 |
293 | Messages (Most recent on top)
294 |
295 | {renderMessages(parsedContent.messages as object[])}
296 |
297 | )}
298 |
299 | );
300 | };
301 |
302 | const renderUsageTable = (log: Log) => {
303 | return (
304 |
305 |
306 |
307 | Input Tokens
308 | Output Tokens
309 | Total Tokens
310 | Input Cost
311 | Output Cost
312 | Total Cost
313 |
314 |
315 |
316 |
317 |
318 | {formatMetadataValue(log.metadata?.inputTokens)}
319 |
320 |
321 | {formatMetadataValue(log.metadata?.outputTokens)}
322 |
323 |
324 | {formatMetadataValue(log.metadata?.totalTokens)}
325 |
326 |
327 | {formatMetadataValue(log.metadata?.inputCost, true)}
328 |
329 |
330 | {formatMetadataValue(log.metadata?.outputCost, true)}
331 |
332 |
333 | {formatMetadataValue(log.metadata?.totalCost, true)}
334 |
335 |
336 |
337 |
338 | );
339 | };
340 |
341 | return (
342 |
343 |
344 | Request Details
345 |
346 |
347 |
348 | {log.method} {log.url}
349 |
350 | {log.timestamp}
351 |
352 | {renderUsageTable(log)}
353 |
354 |
355 | toggleSection("response")}
358 | >
359 | Response
360 |
367 |
368 | {expandedSections.response && (
369 |
370 |
371 |
372 | )}
373 |
374 |
375 |
376 | toggleSection("body")}
379 | >
380 | Body
381 |
388 |
389 | {expandedSections.body && (
390 |
391 |
392 |
393 | )}
394 |
395 |
396 |
397 | toggleSection("headers")}
400 | >
401 | Headers
402 |
409 |
410 | {expandedSections.headers && (
411 |
412 |
413 |
414 | )}
415 |
416 |
417 |
418 | );
419 | }
420 |
--------------------------------------------------------------------------------
/src/components/LogsList.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { getConfigurationCosts } from "@/app/actions";
4 | import { Badge } from "@/components/ui/badge";
5 | import { Button } from "@/components/ui/button";
6 | import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
7 | import { ScrollArea } from "@/components/ui/scroll-area";
8 | import { Clock, DollarSign, Hash, MessageSquare } from "lucide-react";
9 | import { useRouter } from "next/navigation";
10 | import { useEffect, useState } from "react";
11 | import { Log } from "../types/logs";
12 |
13 | interface LogsListProps {
14 | logs: Log[];
15 | onLogSelect: (logId: string) => void;
16 | selectedLogId?: string;
17 | }
18 |
19 | const LogsListComponent: React.FC = ({
20 | logs,
21 | onLogSelect,
22 | selectedLogId,
23 | }) => {
24 | const getProviderColor = (provider: string) => {
25 | const colors: Record = {
26 | anthropic: "bg-purple-100 text-purple-800 border-purple-300",
27 | anthropicCached: "bg-indigo-100 text-indigo-800 border-indigo-300",
28 | openai: "bg-green-100 text-green-800 border-green-300",
29 | cohere: "bg-blue-100 text-blue-800 border-blue-300",
30 | mistral: "bg-red-100 text-red-800 border-red-300",
31 | groq: "bg-yellow-100 text-yellow-800 border-yellow-300",
32 | ollama: "bg-orange-100 text-orange-800 border-orange-300",
33 | other: "bg-gray-100 text-gray-800 border-gray-300",
34 | };
35 | return colors[provider] || "bg-gray-100 text-gray-800 border-gray-300";
36 | };
37 |
38 | if (!Array.isArray(logs) || logs.length === 0) {
39 | return No logs available.
;
40 | }
41 |
42 | return (
43 |
44 | {logs.map((log) => {
45 | const totalTokens = log.metadata.totalTokens || 0;
46 | const totalCost = log.metadata.totalCost || 0;
47 | const firstUserMessage =
48 | log.body.messages.find((m) => m.role === "user" && !("name" in m))
49 | ?.content || "No message available";
50 | const truncatedMessage =
51 | firstUserMessage.slice(0, 100) +
52 | (firstUserMessage.length > 100 ? "..." : "");
53 | const isSelected = selectedLogId === log.id;
54 | const providerColorClass = getProviderColor(log.metadata.provider);
55 |
56 | return (
57 |
onLogSelect(log.id)}
65 | >
66 |
67 |
68 | {truncatedMessage}
69 |
70 |
71 |
72 |
73 |
74 |
78 | {log.metadata.provider}
79 |
80 |
81 | {log.metadata.model}
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 | {new Date(log.timestamp).toLocaleString()}
91 |
92 |
93 |
94 |
95 | {totalCost.toFixed(4)}
96 |
97 |
98 |
99 |
100 | {totalTokens} tokens
101 |
102 |
103 |
104 |
105 |
106 | );
107 | })}
108 |
109 | );
110 | };
111 |
112 | export default function LogsList({
113 | logs,
114 | onLogSelect,
115 | selectedLogId,
116 | }: LogsListProps) {
117 | const router = useRouter();
118 |
119 | return (
120 |
121 |
122 | Requests
123 |
124 |
125 |
126 |
131 |
132 |
133 |
134 | );
135 | }
136 |
--------------------------------------------------------------------------------
/src/components/NavBar.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import Link from 'next/link';
4 | import { usePathname } from 'next/navigation';
5 | import { ThemeToggle } from './theme-toggle';
6 |
7 | function NavLink({
8 | href,
9 | children,
10 | }: {
11 | href: string;
12 | children: React.ReactNode;
13 | }) {
14 | const pathname = usePathname();
15 | const active = pathname === href;
16 |
17 | return (
18 |
26 | {children}
27 |
28 | );
29 | }
30 |
31 | export default function NavBar() {
32 | return (
33 |
42 | );
43 | }
44 |
--------------------------------------------------------------------------------
/src/components/theme-provider.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import * as React from 'react';
4 | import { ThemeProvider as NextThemesProvider } from 'next-themes';
5 | import { type ThemeProviderProps } from 'next-themes/dist/types';
6 |
7 | export function ThemeProvider({ children, ...props }: ThemeProviderProps) {
8 | return {children};
9 | }
10 |
--------------------------------------------------------------------------------
/src/components/theme-toggle.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import * as React from 'react';
4 | import { Moon, Sun } from 'lucide-react';
5 | import { useTheme } from 'next-themes';
6 |
7 | export function ThemeToggle() {
8 | const { setTheme, theme } = useTheme();
9 |
10 | return (
11 |
19 | );
20 | }
21 |
--------------------------------------------------------------------------------
/src/components/ui/alert.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { cva, type VariantProps } from "class-variance-authority"
3 |
4 | import { cn } from "@/lib/utils"
5 |
6 | const alertVariants = cva(
7 | "relative w-full rounded-lg border px-4 py-3 text-sm [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground [&>svg~*]:pl-7",
8 | {
9 | variants: {
10 | variant: {
11 | default: "bg-background text-foreground",
12 | destructive:
13 | "border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive",
14 | },
15 | },
16 | defaultVariants: {
17 | variant: "default",
18 | },
19 | }
20 | )
21 |
22 | const Alert = React.forwardRef<
23 | HTMLDivElement,
24 | React.HTMLAttributes & VariantProps
25 | >(({ className, variant, ...props }, ref) => (
26 |
32 | ))
33 | Alert.displayName = "Alert"
34 |
35 | const AlertTitle = React.forwardRef<
36 | HTMLParagraphElement,
37 | React.HTMLAttributes
38 | >(({ className, ...props }, ref) => (
39 |
44 | ))
45 | AlertTitle.displayName = "AlertTitle"
46 |
47 | const AlertDescription = React.forwardRef<
48 | HTMLParagraphElement,
49 | React.HTMLAttributes
50 | >(({ className, ...props }, ref) => (
51 |
56 | ))
57 | AlertDescription.displayName = "AlertDescription"
58 |
59 | export { Alert, AlertTitle, AlertDescription }
60 |
--------------------------------------------------------------------------------
/src/components/ui/badge.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { cva, type VariantProps } from "class-variance-authority"
3 |
4 | import { cn } from "@/lib/utils"
5 |
6 | const badgeVariants = cva(
7 | "inline-flex items-center rounded-md border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2",
8 | {
9 | variants: {
10 | variant: {
11 | default:
12 | "border-transparent bg-primary text-primary-foreground shadow hover:bg-primary/80",
13 | secondary:
14 | "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80",
15 | destructive:
16 | "border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80",
17 | outline: "text-foreground",
18 | },
19 | },
20 | defaultVariants: {
21 | variant: "default",
22 | },
23 | }
24 | )
25 |
26 | export interface BadgeProps
27 | extends React.HTMLAttributes,
28 | VariantProps {}
29 |
30 | function Badge({ className, variant, ...props }: BadgeProps) {
31 | return (
32 |
33 | )
34 | }
35 |
36 | export { Badge, badgeVariants }
37 |
--------------------------------------------------------------------------------
/src/components/ui/button.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { Slot } from "@radix-ui/react-slot"
3 | import { cva, type VariantProps } from "class-variance-authority"
4 |
5 | import { cn } from "@/lib/utils"
6 |
7 | const buttonVariants = cva(
8 | "inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50",
9 | {
10 | variants: {
11 | variant: {
12 | default:
13 | "bg-primary text-primary-foreground shadow hover:bg-primary/90",
14 | destructive:
15 | "bg-destructive text-destructive-foreground shadow-sm hover:bg-destructive/90",
16 | outline:
17 | "border border-input bg-background shadow-sm hover:bg-accent hover:text-accent-foreground",
18 | secondary:
19 | "bg-secondary text-secondary-foreground shadow-sm hover:bg-secondary/80",
20 | ghost: "hover:bg-accent hover:text-accent-foreground",
21 | link: "text-primary underline-offset-4 hover:underline",
22 | },
23 | size: {
24 | default: "h-9 px-4 py-2",
25 | sm: "h-8 rounded-md px-3 text-xs",
26 | lg: "h-10 rounded-md px-8",
27 | icon: "h-9 w-9",
28 | },
29 | },
30 | defaultVariants: {
31 | variant: "default",
32 | size: "default",
33 | },
34 | }
35 | )
36 |
37 | export interface ButtonProps
38 | extends React.ButtonHTMLAttributes,
39 | VariantProps {
40 | asChild?: boolean
41 | }
42 |
43 | const Button = React.forwardRef(
44 | ({ className, variant, size, asChild = false, ...props }, ref) => {
45 | const Comp = asChild ? Slot : "button"
46 | return (
47 |
52 | )
53 | }
54 | )
55 | Button.displayName = "Button"
56 |
57 | export { Button, buttonVariants }
58 |
--------------------------------------------------------------------------------
/src/components/ui/calendar.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import * as React from "react"
4 | import { ChevronLeftIcon, ChevronRightIcon } from "@radix-ui/react-icons"
5 | import { DayPicker } from "react-day-picker"
6 |
7 | import { cn } from "@/lib/utils"
8 | import { buttonVariants } from "@/components/ui/button"
9 |
10 | export type CalendarProps = React.ComponentProps
11 |
12 | function Calendar({
13 | className,
14 | classNames,
15 | showOutsideDays = true,
16 | ...props
17 | }: CalendarProps) {
18 | return (
19 | .day-range-end)]:rounded-r-md [&:has(>.day-range-start)]:rounded-l-md first:[&:has([aria-selected])]:rounded-l-md last:[&:has([aria-selected])]:rounded-r-md"
43 | : "[&:has([aria-selected])]:rounded-md"
44 | ),
45 | day: cn(
46 | buttonVariants({ variant: "ghost" }),
47 | "h-8 w-8 p-0 font-normal aria-selected:opacity-100"
48 | ),
49 | day_range_start: "day-range-start",
50 | day_range_end: "day-range-end",
51 | day_selected:
52 | "bg-primary text-primary-foreground hover:bg-primary hover:text-primary-foreground focus:bg-primary focus:text-primary-foreground",
53 | day_today: "bg-accent text-accent-foreground",
54 | day_outside:
55 | "day-outside text-muted-foreground opacity-50 aria-selected:bg-accent/50 aria-selected:text-muted-foreground aria-selected:opacity-30",
56 | day_disabled: "text-muted-foreground opacity-50",
57 | day_range_middle:
58 | "aria-selected:bg-accent aria-selected:text-accent-foreground",
59 | day_hidden: "invisible",
60 | ...classNames,
61 | }}
62 | components={{
63 | IconLeft: ({ ...props }) => ,
64 | IconRight: ({ ...props }) => ,
65 | }}
66 | {...props}
67 | />
68 | )
69 | }
70 | Calendar.displayName = "Calendar"
71 |
72 | export { Calendar }
73 |
--------------------------------------------------------------------------------
/src/components/ui/card.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 |
3 | import { cn } from "@/lib/utils"
4 |
5 | const Card = React.forwardRef<
6 | HTMLDivElement,
7 | React.HTMLAttributes
8 | >(({ className, ...props }, ref) => (
9 |
17 | ))
18 | Card.displayName = "Card"
19 |
20 | const CardHeader = React.forwardRef<
21 | HTMLDivElement,
22 | React.HTMLAttributes
23 | >(({ className, ...props }, ref) => (
24 |
29 | ))
30 | CardHeader.displayName = "CardHeader"
31 |
32 | const CardTitle = React.forwardRef<
33 | HTMLParagraphElement,
34 | React.HTMLAttributes
35 | >(({ className, ...props }, ref) => (
36 |
41 | ))
42 | CardTitle.displayName = "CardTitle"
43 |
44 | const CardDescription = React.forwardRef<
45 | HTMLParagraphElement,
46 | React.HTMLAttributes
47 | >(({ className, ...props }, ref) => (
48 |
53 | ))
54 | CardDescription.displayName = "CardDescription"
55 |
56 | const CardContent = React.forwardRef<
57 | HTMLDivElement,
58 | React.HTMLAttributes
59 | >(({ className, ...props }, ref) => (
60 |
61 | ))
62 | CardContent.displayName = "CardContent"
63 |
64 | const CardFooter = React.forwardRef<
65 | HTMLDivElement,
66 | React.HTMLAttributes
67 | >(({ className, ...props }, ref) => (
68 |
73 | ))
74 | CardFooter.displayName = "CardFooter"
75 |
76 | export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }
77 |
--------------------------------------------------------------------------------
/src/components/ui/chart.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import * as React from 'react';
4 | import * as RechartsPrimitive from 'recharts';
5 | import {
6 | NameType,
7 | Payload,
8 | ValueType,
9 | } from 'recharts/types/component/DefaultTooltipContent';
10 |
11 | import { cn } from '@/lib/utils';
12 |
13 | // Format: { THEME_NAME: CSS_SELECTOR }
14 | const THEMES = { light: '', dark: '.dark' } as const;
15 |
16 | export type ChartConfig = {
17 | [k in string]: {
18 | label?: React.ReactNode;
19 | icon?: React.ComponentType;
20 | } & (
21 | | { color?: string; theme?: never }
22 | | { color?: never; theme: Record }
23 | );
24 | };
25 |
26 | type ChartContextProps = {
27 | config: ChartConfig;
28 | };
29 |
30 | const ChartContext = React.createContext(null);
31 |
32 | function useChart() {
33 | const context = React.useContext(ChartContext);
34 |
35 | if (!context) {
36 | throw new Error('useChart must be used within a ');
37 | }
38 |
39 | return context;
40 | }
41 |
42 | const ChartContainer = React.forwardRef<
43 | HTMLDivElement,
44 | React.ComponentProps<'div'> & {
45 | config: ChartConfig;
46 | children: React.ComponentProps<
47 | typeof RechartsPrimitive.ResponsiveContainer
48 | >['children'];
49 | }
50 | >(({ id, className, children, config, ...props }, ref) => {
51 | const uniqueId = React.useId();
52 | const chartId = `chart-${id || uniqueId.replace(/:/g, '')}`;
53 |
54 | return (
55 |
56 |
65 |
66 |
67 | {children}
68 |
69 |
70 |
71 | );
72 | });
73 | ChartContainer.displayName = 'Chart';
74 |
75 | const ChartStyle = ({ id, config }: { id: string; config: ChartConfig }) => {
76 | const colorConfig = Object.entries(config).filter(
77 | ([_, config]) => config.theme || config.color
78 | );
79 |
80 | if (!colorConfig.length) {
81 | return null;
82 | }
83 |
84 | return (
85 |