├── landingpage ├── .firebaserc ├── pnpm-workspace.yaml ├── metadata.json ├── index.tsx ├── firebase.json ├── package.json ├── tsconfig.json ├── README.md ├── vite.config.ts ├── firebase.ts ├── types.ts ├── components │ ├── TypingAnimation.tsx │ ├── TerminalWindow.tsx │ ├── SupportSection.tsx │ └── BlockLogo.tsx ├── .gitignore ├── App.tsx └── constants.ts ├── src ├── handlers │ ├── types.ts │ └── native-handler.ts ├── adapters │ ├── index.ts │ ├── minimax-adapter.ts │ ├── deepseek-adapter.ts │ ├── adapter-manager.ts │ ├── qwen-adapter.ts │ ├── openai-adapter.ts │ ├── base-adapter.ts │ ├── gemini-adapter.ts │ └── grok-adapter.ts ├── middleware │ ├── index.ts │ ├── types.ts │ └── manager.ts ├── port-manager.ts ├── validation │ ├── types.ts │ └── validator.ts ├── utils.ts ├── types.ts ├── config.ts ├── providers │ └── provider-registry.ts ├── index.ts └── logger.ts ├── scripts ├── postinstall.cjs └── generate-manifest.ts ├── .gitignore ├── tsconfig.json ├── biome.json ├── CLAUDE.md ├── tests ├── monitor-integration-test.sh ├── verify-user-models.ts ├── debug-snapshot.ts ├── fixtures │ ├── example_simple_text.json │ ├── README.md │ └── example_tool_use.json ├── image-handling.test.ts ├── gemini-compatibility.test.ts └── snapshot-workflow.sh ├── ai_docs ├── GEMINI_NO_CONTENT_FIX.md ├── THINKING_ALIGNMENT_SUMMARY.md └── TIMEOUT_CONFIGURATION_CLARIFICATION.md ├── package.json ├── docs ├── advanced │ ├── cost-tracking.md │ ├── environment.md │ └── automation.md ├── usage │ ├── monitor-mode.md │ ├── interactive-mode.md │ └── single-shot-mode.md ├── getting-started │ └── quick-start.md └── models │ ├── model-mapping.md │ └── choosing-models.md ├── install.sh ├── pnpm-lock.yaml └── .github └── workflows └── release.yml /landingpage/.firebaserc: -------------------------------------------------------------------------------- 1 | { 2 | "projects": { 3 | "default": "claudish-6da10" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /landingpage/pnpm-workspace.yaml: -------------------------------------------------------------------------------- 1 | onlyBuiltDependencies: 2 | - '@firebase/util' 3 | - esbuild 4 | - protobufjs 5 | -------------------------------------------------------------------------------- /landingpage/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Claudish", 3 | "description": "A landing page for Claudish - the universal model wrapper for Claude Code CLI.", 4 | "requestFramePermissions": [] 5 | } -------------------------------------------------------------------------------- /src/handlers/types.ts: -------------------------------------------------------------------------------- 1 | import type { Context } from "hono"; 2 | 3 | export interface ModelHandler { 4 | handle(c: Context, payload: any): Promise; 5 | shutdown(): Promise; 6 | } 7 | -------------------------------------------------------------------------------- /src/adapters/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Model adapters for handling model-specific quirks 3 | */ 4 | 5 | export { BaseModelAdapter, DefaultAdapter } from "./base-adapter.js"; 6 | export type { ToolCall, AdapterResult } from "./base-adapter.js"; 7 | export { GrokAdapter } from "./grok-adapter.js"; 8 | export { AdapterManager } from "./adapter-manager.js"; 9 | -------------------------------------------------------------------------------- /src/middleware/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Middleware System Exports 3 | * 4 | * Provides a clean middleware system for handling model-specific behavior. 5 | */ 6 | 7 | export { MiddlewareManager } from "./manager.js"; 8 | export { GeminiThoughtSignatureMiddleware } from "./gemini-thought-signature.js"; 9 | export type { 10 | ModelMiddleware, 11 | RequestContext, 12 | NonStreamingResponseContext, 13 | StreamChunkContext, 14 | } from "./types.js"; 15 | -------------------------------------------------------------------------------- /landingpage/index.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import App from './App'; 4 | import './firebase'; // Initialize Firebase Analytics 5 | 6 | const rootElement = document.getElementById('root'); 7 | if (!rootElement) { 8 | throw new Error("Could not find root element to mount to"); 9 | } 10 | 11 | const root = ReactDOM.createRoot(rootElement); 12 | root.render( 13 | 14 | 15 | 16 | ); -------------------------------------------------------------------------------- /landingpage/firebase.json: -------------------------------------------------------------------------------- 1 | { 2 | "hosting": { 3 | "public": "dist", 4 | "ignore": [ 5 | "firebase.json", 6 | "**/.*", 7 | "**/node_modules/**" 8 | ], 9 | "rewrites": [ 10 | { 11 | "source": "**", 12 | "destination": "/index.html" 13 | } 14 | ], 15 | "headers": [ 16 | { 17 | "source": "/assets/**", 18 | "headers": [ 19 | { 20 | "key": "Cache-Control", 21 | "value": "public, max-age=31536000, immutable" 22 | } 23 | ] 24 | } 25 | ] 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /scripts/postinstall.cjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | console.log('\x1b[32m✓ Claudish installed successfully!\x1b[0m'); 4 | console.log(''); 5 | console.log('\x1b[1mUsage:\x1b[0m'); 6 | console.log(' claudish --model x-ai/grok-code-fast-1 "your prompt"'); 7 | console.log(' claudish --interactive # Interactive model selection'); 8 | console.log(' claudish --list-models # List all available models'); 9 | console.log(''); 10 | console.log('\x1b[1mGet started:\x1b[0m'); 11 | console.log(' 1. Set OPENROUTER_API_KEY environment variable'); 12 | console.log(' 2. Run: claudish --interactive'); 13 | console.log(''); 14 | -------------------------------------------------------------------------------- /landingpage/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "claudish", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "vite build", 9 | "preview": "vite preview", 10 | "firebase:deploy": "pnpm build && firebase deploy --only hosting" 11 | }, 12 | "dependencies": { 13 | "firebase": "^12.6.0", 14 | "react": "^19.2.0", 15 | "react-dom": "^19.2.0" 16 | }, 17 | "devDependencies": { 18 | "@types/node": "^22.14.0", 19 | "@vitejs/plugin-react": "^5.0.0", 20 | "typescript": "~5.8.2", 21 | "vite": "^6.2.0" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | 4 | # Build output 5 | dist/ 6 | build/ 7 | 8 | # Environment files 9 | .env 10 | .env.local 11 | .env.*.local 12 | 13 | # IDE 14 | .idea/ 15 | .vscode/ 16 | *.swp 17 | *.swo 18 | 19 | # OS files 20 | .DS_Store 21 | Thumbs.db 22 | 23 | # Logs 24 | *.log 25 | npm-debug.log* 26 | yarn-debug.log* 27 | yarn-error.log* 28 | 29 | # Test coverage 30 | coverage/ 31 | 32 | # Temporary files 33 | tmp/ 34 | temp/ 35 | all-models.json 36 | 37 | # Claude Code local files 38 | .claude/ 39 | .claudemem/ 40 | 41 | # npm lockfile (we use bun.lock) 42 | package-lock.json 43 | 44 | # Dev/test files 45 | __tests__/ 46 | *.jinja 47 | logs/ 48 | -------------------------------------------------------------------------------- /landingpage/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "experimentalDecorators": true, 5 | "useDefineForClassFields": false, 6 | "module": "ESNext", 7 | "lib": [ 8 | "ES2022", 9 | "DOM", 10 | "DOM.Iterable" 11 | ], 12 | "skipLibCheck": true, 13 | "types": [ 14 | "node" 15 | ], 16 | "moduleResolution": "bundler", 17 | "isolatedModules": true, 18 | "moduleDetection": "force", 19 | "allowJs": true, 20 | "jsx": "react-jsx", 21 | "paths": { 22 | "@/*": [ 23 | "./*" 24 | ] 25 | }, 26 | "allowImportingTsExtensions": true, 27 | "noEmit": true 28 | } 29 | } -------------------------------------------------------------------------------- /landingpage/README.md: -------------------------------------------------------------------------------- 1 | # Claudish Landing Page 2 | 3 | The marketing site for [Claudish](https://github.com/MadAppGang/claudish) - run Claude Code with any AI model via OpenRouter. 4 | 5 | Built with Claudish itself: Opus 4.5 and Gemini 3.0 Pro collaborating in a single session. 6 | 7 | ## Development 8 | 9 | ```bash 10 | pnpm install 11 | pnpm dev 12 | ``` 13 | 14 | Opens at `localhost:3000`. 15 | 16 | ## Deploy 17 | 18 | ```bash 19 | pnpm firebase:deploy 20 | ``` 21 | 22 | Builds and deploys to Firebase Hosting. 23 | 24 | ## Stack 25 | 26 | - Vite + React 19 + TypeScript 27 | - Tailwind CSS 4 28 | - Firebase Hosting + Analytics 29 | 30 | ## Live 31 | 32 | https://claudish.com 33 | -------------------------------------------------------------------------------- /landingpage/vite.config.ts: -------------------------------------------------------------------------------- 1 | import path from 'path'; 2 | import { defineConfig, loadEnv } from 'vite'; 3 | import react from '@vitejs/plugin-react'; 4 | 5 | export default defineConfig(({ mode }) => { 6 | const env = loadEnv(mode, '.', ''); 7 | return { 8 | server: { 9 | port: 3000, 10 | host: '0.0.0.0', 11 | }, 12 | plugins: [react()], 13 | define: { 14 | 'process.env.API_KEY': JSON.stringify(env.GEMINI_API_KEY), 15 | 'process.env.GEMINI_API_KEY': JSON.stringify(env.GEMINI_API_KEY) 16 | }, 17 | resolve: { 18 | alias: { 19 | '@': path.resolve(__dirname, '.'), 20 | } 21 | } 22 | }; 23 | }); 24 | -------------------------------------------------------------------------------- /landingpage/firebase.ts: -------------------------------------------------------------------------------- 1 | import { initializeApp } from "firebase/app"; 2 | import { getAnalytics, isSupported } from "firebase/analytics"; 3 | 4 | const firebaseConfig = { 5 | apiKey: "AIzaSyCNkRYx0x-dcjPQJSGgCqugOJ17BwOpcDQ", 6 | authDomain: "claudish-6da10.firebaseapp.com", 7 | projectId: "claudish-6da10", 8 | storageBucket: "claudish-6da10.firebasestorage.app", 9 | messagingSenderId: "1095565486978", 10 | appId: "1:1095565486978:web:1ced13f51530bb9c1d3d9b", 11 | measurementId: "G-9PYJS4N8X9", 12 | }; 13 | 14 | export const app = initializeApp(firebaseConfig); 15 | 16 | // Analytics only works in browser, not during SSR/build 17 | export const analytics = isSupported().then((supported) => 18 | supported ? getAnalytics(app) : null 19 | ); 20 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "lib": ["ES2022"], 5 | "module": "ESNext", 6 | "moduleResolution": "bundler", 7 | "jsx": "react-jsx", 8 | "strict": true, 9 | "noUnusedLocals": true, 10 | "noUnusedParameters": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "exactOptionalPropertyTypes": false, 14 | "esModuleInterop": true, 15 | "allowSyntheticDefaultImports": true, 16 | "forceConsistentCasingInFileNames": true, 17 | "isolatedModules": true, 18 | "resolveJsonModule": true, 19 | "types": ["bun-types"], 20 | "skipLibCheck": true 21 | }, 22 | "include": ["src/**/*"], 23 | "exclude": ["node_modules", "dist"] 24 | } 25 | -------------------------------------------------------------------------------- /landingpage/types.ts: -------------------------------------------------------------------------------- 1 | export interface TerminalLine { 2 | id: string; 3 | type: 'input' | 'output' | 'success' | 'info' | 'ascii' | 'progress' | 'system' | 'welcome' | 'rich-input' | 'thinking' | 'tool'; 4 | content: string | any; 5 | prefix?: string; 6 | delay?: number; // Simulated delay before appearing 7 | data?: any; // Extra data for rich components 8 | } 9 | 10 | export interface Feature { 11 | id: string; 12 | title: string; 13 | description: string; 14 | icon?: string; 15 | badge?: string; 16 | key?: string; // Legacy support if needed 17 | value?: string | string[]; // Legacy support if needed 18 | } 19 | 20 | export interface ModelCard { 21 | id: string; 22 | name: string; 23 | provider: string; 24 | description: string; 25 | tags: string[]; 26 | color: string; 27 | } -------------------------------------------------------------------------------- /biome.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json", 3 | "vcs": { 4 | "enabled": true, 5 | "clientKind": "git", 6 | "useIgnoreFile": true 7 | }, 8 | "files": { 9 | "ignoreUnknown": false, 10 | "ignore": ["node_modules", "dist", ".git"] 11 | }, 12 | "formatter": { 13 | "enabled": true, 14 | "indentStyle": "space", 15 | "indentWidth": 2, 16 | "lineWidth": 100 17 | }, 18 | "organizeImports": { 19 | "enabled": true 20 | }, 21 | "linter": { 22 | "enabled": true, 23 | "rules": { 24 | "recommended": true, 25 | "complexity": { 26 | "noExcessiveCognitiveComplexity": "warn" 27 | }, 28 | "style": { 29 | "noNonNullAssertion": "off", 30 | "useNodejsImportProtocol": "error" 31 | }, 32 | "suspicious": { 33 | "noExplicitAny": "warn" 34 | } 35 | } 36 | }, 37 | "javascript": { 38 | "formatter": { 39 | "quoteStyle": "double", 40 | "semicolons": "always", 41 | "trailingCommas": "es5" 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /landingpage/components/TypingAnimation.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | 3 | interface TypingAnimationProps { 4 | text: string; 5 | speed?: number; 6 | onComplete?: () => void; 7 | className?: string; 8 | } 9 | 10 | export const TypingAnimation: React.FC = ({ 11 | text, 12 | speed = 30, 13 | onComplete, 14 | className = '' 15 | }) => { 16 | const [displayedText, setDisplayedText] = useState(''); 17 | const [currentIndex, setCurrentIndex] = useState(0); 18 | 19 | useEffect(() => { 20 | if (currentIndex < text.length) { 21 | const timeout = setTimeout(() => { 22 | setDisplayedText(prev => prev + text[currentIndex]); 23 | setCurrentIndex(prev => prev + 1); 24 | }, speed + (Math.random() * 20)); // Add slight randomness for realism 25 | 26 | return () => clearTimeout(timeout); 27 | } else if (onComplete) { 28 | onComplete(); 29 | } 30 | }, [currentIndex, text, speed, onComplete]); 31 | 32 | return {displayedText}; 33 | }; -------------------------------------------------------------------------------- /src/adapters/minimax-adapter.ts: -------------------------------------------------------------------------------- 1 | import { BaseModelAdapter, AdapterResult } from "./base-adapter"; 2 | import { log } from "../logger"; 3 | 4 | export class MiniMaxAdapter extends BaseModelAdapter { 5 | processTextContent( 6 | textContent: string, 7 | accumulatedText: string 8 | ): AdapterResult { 9 | // MiniMax interleaved thinking is handled by the model 10 | return { 11 | cleanedText: textContent, 12 | extractedToolCalls: [], 13 | wasTransformed: false, 14 | }; 15 | } 16 | 17 | /** 18 | * Handle request preparation - specifically for mapping reasoning parameters 19 | */ 20 | override prepareRequest(request: any, originalRequest: any): any { 21 | if (originalRequest.thinking) { 22 | // MiniMax uses reasoning_split boolean 23 | request.reasoning_split = true; 24 | 25 | log(`[MiniMaxAdapter] Enabled reasoning_split: true`); 26 | 27 | // Cleanup: Remove raw thinking object 28 | delete request.thinking; 29 | } 30 | 31 | return request; 32 | } 33 | 34 | shouldHandle(modelId: string): boolean { 35 | return modelId.includes("minimax"); 36 | } 37 | 38 | getName(): string { 39 | return "MiniMaxAdapter"; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/adapters/deepseek-adapter.ts: -------------------------------------------------------------------------------- 1 | import { BaseModelAdapter, AdapterResult } from "./base-adapter"; 2 | import { log } from "../logger"; 3 | 4 | export class DeepSeekAdapter extends BaseModelAdapter { 5 | processTextContent( 6 | textContent: string, 7 | accumulatedText: string 8 | ): AdapterResult { 9 | return { 10 | cleanedText: textContent, 11 | extractedToolCalls: [], 12 | wasTransformed: false, 13 | }; 14 | } 15 | 16 | /** 17 | * Handle request preparation - specifically for stripping unsupported parameters 18 | */ 19 | override prepareRequest(request: any, originalRequest: any): any { 20 | if (originalRequest.thinking) { 21 | // DeepSeek doesn't support thinking params via API options 22 | // It thinks automatically or via other means (R1) 23 | // Stripping thinking object to prevent API errors 24 | 25 | log(`[DeepSeekAdapter] Stripping thinking object (not supported by API)`); 26 | 27 | // Cleanup: Remove raw thinking object 28 | delete request.thinking; 29 | } 30 | 31 | return request; 32 | } 33 | 34 | shouldHandle(modelId: string): boolean { 35 | return modelId.includes("deepseek"); 36 | } 37 | 38 | getName(): string { 39 | return "DeepSeekAdapter"; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # Claudish - Development Notes 2 | 3 | ## Release Process 4 | 5 | **Releases are handled by CI/CD** - do NOT manually run `npm publish`. 6 | 7 | 1. Bump version in `package.json` 8 | 2. Commit with conventional commit message (e.g., `feat!: v3.0.0 - description`) 9 | 3. Create annotated tag: `git tag -a v3.0.0 -m "message"` 10 | 4. Push with tags: `git push origin main --tags` 11 | 5. CI/CD will automatically publish to npm 12 | 13 | ## Build Commands 14 | 15 | - `bun run build` - Full build (extracts models + bundles) 16 | - `bun run build:ci` - CI build (bundles only, no model extraction) 17 | - `bun run dev` - Development mode 18 | 19 | ## Local Model Support 20 | 21 | Claudish supports local models via: 22 | - **Ollama**: `claudish --model ollama/llama3.2` 23 | - **LM Studio**: `claudish --model lmstudio/model-name` 24 | - **Custom URLs**: `claudish --model http://localhost:11434/model` 25 | 26 | ### Context Tracking for Local Models 27 | 28 | Local model APIs (LM Studio, Ollama) report `prompt_tokens` as the **full conversation context** each request, not incremental tokens. The `writeTokenFile` function uses assignment (`=`) not accumulation (`+=`) for input tokens to handle this correctly. 29 | 30 | ## Debug Logging 31 | 32 | Debug logging is behind the `--debug` flag and outputs to `logs/` directory. It's disabled by default. 33 | -------------------------------------------------------------------------------- /src/port-manager.ts: -------------------------------------------------------------------------------- 1 | import { createServer } from "node:net"; 2 | 3 | /** 4 | * Find an available port in the given range. 5 | * Uses random selection first to avoid conflicts in parallel runs. 6 | */ 7 | export async function findAvailablePort(startPort = 3000, endPort = 9000): Promise { 8 | // Try random port first (better for parallel runs) 9 | const randomPort = Math.floor(Math.random() * (endPort - startPort + 1)) + startPort; 10 | 11 | if (await isPortAvailable(randomPort)) { 12 | return randomPort; 13 | } 14 | 15 | // Fallback: sequential search 16 | for (let port = startPort; port <= endPort; port++) { 17 | if (await isPortAvailable(port)) { 18 | return port; 19 | } 20 | } 21 | 22 | throw new Error(`No available ports found in range ${startPort}-${endPort}`); 23 | } 24 | 25 | /** 26 | * Check if a port is available by attempting to bind to it. 27 | */ 28 | export async function isPortAvailable(port: number): Promise { 29 | return new Promise((resolve) => { 30 | const server = createServer(); 31 | 32 | server.once("error", (err: NodeJS.ErrnoException) => { 33 | resolve(err.code !== "EADDRINUSE"); 34 | }); 35 | 36 | server.once("listening", () => { 37 | server.close(); 38 | resolve(true); 39 | }); 40 | 41 | server.listen(port, "127.0.0.1"); 42 | }); 43 | } 44 | -------------------------------------------------------------------------------- /landingpage/components/TerminalWindow.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | interface TerminalWindowProps { 4 | children: React.ReactNode; 5 | className?: string; 6 | title?: string; 7 | noPadding?: boolean; 8 | } 9 | 10 | export const TerminalWindow: React.FC = ({ 11 | children, 12 | className = '', 13 | title = 'claudish-cli', 14 | noPadding = false 15 | }) => { 16 | return ( 17 |
18 | {/* Window Header */} 19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | {title} 27 |
28 |
29 | 30 | {/* Terminal Content */} 31 |
32 | {children} 33 |
34 |
35 | ); 36 | }; -------------------------------------------------------------------------------- /src/validation/types.ts: -------------------------------------------------------------------------------- 1 | export type IssueSeverity = "critical" | "high" | "medium" | "low"; 2 | export type ImprovementCategory = "mandatory" | "recommended" | "optional"; 3 | 4 | export interface ValidationIssue { 5 | path: string; 6 | line?: number; 7 | severity: IssueSeverity; 8 | category: ImprovementCategory; 9 | message: string; 10 | hypothesis?: string; 11 | improvement?: string; 12 | details?: string; 13 | } 14 | 15 | export interface ValidationSuggestion { 16 | what: string; 17 | why: string; 18 | impact: number; // 0-1 19 | ease: number; // 0-1 20 | urgency: number; // 0-1 21 | score: number; // impact * 0.4 + ease * 0.3 + urgency * 0.3 22 | } 23 | 24 | export interface ValidationReport { 25 | issues: ValidationIssue[]; 26 | summary: { 27 | total: number; 28 | bySeverity: { 29 | critical: ValidationIssue[]; 30 | high: ValidationIssue[]; 31 | medium: ValidationIssue[]; 32 | low: ValidationIssue[]; 33 | }; 34 | byCategory: { 35 | mandatory: ValidationIssue[]; 36 | recommended: ValidationIssue[]; 37 | optional: ValidationIssue[]; 38 | }; 39 | }; 40 | } 41 | 42 | export interface ValidationResult { 43 | project: ValidationReport; 44 | agents: ValidationReport; 45 | commands: ValidationReport; 46 | skills: ValidationReport; 47 | documentation: ValidationReport; 48 | } 49 | 50 | export interface ImprovementPlan { 51 | suggestions: ValidationSuggestion[]; 52 | todos: string[]; 53 | effort: "low" | "medium" | "high"; 54 | priorityIssues: string[]; 55 | estimatedTime: string; 56 | } 57 | -------------------------------------------------------------------------------- /landingpage/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | firebase-debug.log* 8 | firebase-debug.*.log* 9 | 10 | # Firebase cache 11 | .firebase/ 12 | 13 | # Firebase config 14 | 15 | # Uncomment this if you'd like others to create their own Firebase project. 16 | # For a team working on the same Firebase project(s), it is recommended to leave 17 | # it commented so all members can deploy to the same project(s) in .firebaserc. 18 | # .firebaserc 19 | 20 | # Runtime data 21 | pids 22 | *.pid 23 | *.seed 24 | *.pid.lock 25 | 26 | # Directory for instrumented libs generated by jscoverage/JSCover 27 | lib-cov 28 | 29 | # Coverage directory used by tools like istanbul 30 | coverage 31 | 32 | # nyc test coverage 33 | .nyc_output 34 | 35 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 36 | .grunt 37 | 38 | # Bower dependency directory (https://bower.io/) 39 | bower_components 40 | 41 | # node-waf configuration 42 | .lock-wscript 43 | 44 | # Compiled binary addons (http://nodejs.org/api/addons.html) 45 | build/Release 46 | 47 | # Dependency directories 48 | node_modules/ 49 | 50 | # Build output 51 | dist/ 52 | 53 | # Optional npm cache directory 54 | .npm 55 | 56 | # Optional eslint cache 57 | .eslintcache 58 | 59 | # Optional REPL history 60 | .node_repl_history 61 | 62 | # Output of 'npm pack' 63 | *.tgz 64 | 65 | # Yarn Integrity file 66 | .yarn-integrity 67 | 68 | # dotenv environment variables file 69 | .env 70 | 71 | # dataconnect generated files 72 | .dataconnect 73 | -------------------------------------------------------------------------------- /src/adapters/adapter-manager.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Adapter manager for selecting model-specific adapters 3 | * 4 | * This allows us to handle different model quirks: 5 | * - Grok: XML function calls 6 | * - Gemini: Thought signatures in reasoning_details 7 | * - Deepseek: (future) 8 | * - Others: (future) 9 | */ 10 | 11 | import { BaseModelAdapter, DefaultAdapter } from "./base-adapter"; 12 | import { GrokAdapter } from "./grok-adapter"; 13 | import { GeminiAdapter } from "./gemini-adapter"; 14 | import { OpenAIAdapter } from "./openai-adapter"; 15 | import { QwenAdapter } from "./qwen-adapter"; 16 | import { MiniMaxAdapter } from "./minimax-adapter"; 17 | import { DeepSeekAdapter } from "./deepseek-adapter"; 18 | 19 | export class AdapterManager { 20 | private adapters: BaseModelAdapter[]; 21 | private defaultAdapter: DefaultAdapter; 22 | 23 | constructor(modelId: string) { 24 | // Register all available adapters 25 | this.adapters = [ 26 | new GrokAdapter(modelId), 27 | new GeminiAdapter(modelId), 28 | new OpenAIAdapter(modelId), 29 | new QwenAdapter(modelId), 30 | new MiniMaxAdapter(modelId), 31 | new DeepSeekAdapter(modelId) 32 | ]; 33 | this.defaultAdapter = new DefaultAdapter(modelId); 34 | } 35 | 36 | /** 37 | * Get the appropriate adapter for the current model 38 | */ 39 | getAdapter(): BaseModelAdapter { 40 | for (const adapter of this.adapters) { 41 | if (adapter.shouldHandle(this.defaultAdapter["modelId"])) { 42 | return adapter; 43 | } 44 | } 45 | return this.defaultAdapter; 46 | } 47 | 48 | /** 49 | * Check if current model needs special handling 50 | */ 51 | needsTransformation(): boolean { 52 | return this.getAdapter() !== this.defaultAdapter; 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Calculate fuzzy match score for a string against a query 3 | * Returns a score from 0 to 1 (1 being perfect match) 4 | * Returns 0 if no match found 5 | */ 6 | export function fuzzyScore(text: string, query: string): number { 7 | if (!text || !query) return 0; 8 | 9 | const t = text.toLowerCase(); 10 | const q = query.toLowerCase(); 11 | 12 | // Exact match 13 | if (t === q) return 1.0; 14 | 15 | // Start match 16 | if (t.startsWith(q)) return 0.9; 17 | 18 | // Word boundary match (e.g. "claude-3" matches "3") 19 | if (t.includes(` ${q}`) || t.includes(`-${q}`) || t.includes(`/${q}`)) return 0.8; 20 | 21 | // Contains match 22 | if (t.includes(q)) return 0.6; // base score for inclusion 23 | 24 | // Subsequence match (fuzzy) 25 | let score = 0; 26 | let tIdx = 0; 27 | let qIdx = 0; 28 | let consecutive = 0; 29 | 30 | while (tIdx < t.length && qIdx < q.length) { 31 | if (t[tIdx] === q[qIdx]) { 32 | score += 1 + (consecutive * 0.5); // Bonus for consecutive matches 33 | consecutive++; 34 | qIdx++; 35 | } else { 36 | consecutive = 0; 37 | } 38 | tIdx++; 39 | } 40 | 41 | // Only count as match if we matched all query chars 42 | if (qIdx === q.length) { 43 | // Normalize score between 0.1 and 0.5 depending on compactness 44 | // Higher score if match spans shorter distance 45 | const compactness = q.length / (tIdx + 1); // +1 to avoid division by zero, though tIdx always >= 1 here 46 | return 0.1 + (0.4 * compactness * (score / (q.length * 2))); // Heuristic 47 | } 48 | 49 | return 0; 50 | } 51 | 52 | /** 53 | * Format a number as currency 54 | */ 55 | export function formatCurrency(amount: number): string { 56 | if (amount === 0) return "FREE"; 57 | return `$${amount.toFixed(2)}`; 58 | } 59 | -------------------------------------------------------------------------------- /tests/monitor-integration-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Monitor Mode Integration Tests 4 | # This script runs various Claude Code scenarios with monitor mode to analyze the protocol 5 | 6 | set -e 7 | 8 | CLAUDISH="./dist/index.js" 9 | TEST_DIR="$(pwd)/tests" 10 | LOGS_DIR="$(pwd)/logs" 11 | 12 | echo "========================================" 13 | echo "Claudish Monitor Mode Integration Tests" 14 | echo "========================================" 15 | echo "" 16 | 17 | # Create logs directory 18 | mkdir -p "$LOGS_DIR" 19 | 20 | # Test scenarios 21 | declare -a TESTS=( 22 | "1:simple:What is 2+2? Answer briefly." 23 | "2:file_read:Read the package.json file and tell me the version" 24 | "3:grep:Search for 'createProxyServer' in the codebase" 25 | "4:multi_tool:List all TypeScript files in src/ and count them" 26 | ) 27 | 28 | # Run each test 29 | for test in "${TESTS[@]}"; do 30 | IFS=':' read -r num name query <<< "$test" 31 | 32 | echo "========================================" 33 | echo "TEST $num: $name" 34 | echo "Query: $query" 35 | echo "========================================" 36 | echo "" 37 | 38 | # Run claudish with monitor mode 39 | LOG_FILE="$LOGS_DIR/test_${num}_${name}.log" 40 | 41 | echo "[TEST] Running: $CLAUDISH --monitor --debug \"$query\"" 42 | echo "[TEST] Logs will be saved to: $LOG_FILE" 43 | echo "" 44 | 45 | # Run the test (redirect stderr to capture logs) 46 | if $CLAUDISH --monitor --debug "$query" 2>&1 | tee "$LOG_FILE"; then 47 | echo "" 48 | echo "[TEST] ✅ Test $num completed successfully" 49 | else 50 | echo "" 51 | echo "[TEST] ❌ Test $num failed" 52 | fi 53 | 54 | echo "" 55 | echo "Waiting 2 seconds before next test..." 56 | sleep 2 57 | echo "" 58 | done 59 | 60 | echo "========================================" 61 | echo "All tests completed!" 62 | echo "========================================" 63 | echo "" 64 | echo "Log files:" 65 | ls -lh "$LOGS_DIR"/test_*.log 66 | 67 | echo "" 68 | echo "To analyze logs:" 69 | echo " cat $LOGS_DIR/test_1_simple.log | grep -A 50 'MONITOR'" 70 | echo " cat $LOGS_DIR/test_2_file_read.log | grep -A 50 'tool_use'" 71 | -------------------------------------------------------------------------------- /ai_docs/GEMINI_NO_CONTENT_FIX.md: -------------------------------------------------------------------------------- 1 | # Gemini/Grok Empty Content Fix 2 | 3 | ## Problem 4 | Users reported receiving "(no content)" messages before the actual response when using Gemini 2.0 Flash or other reasoning models. 5 | 6 | **Root Cause**: The proxy server was proactively creating an empty text block (`content_block_start` with type `text`) immediately after receiving the request, "for protocol compliance". When the first chunk from the model arrived containing reasoning (thinking) or other content, this empty text block was closed without any text being added to it. Claude Code renders this closed empty block as a "(no content)" message. 7 | 8 | ## Solution 9 | Removed the eager initialization of the empty text block. The code now lazily initializes the appropriate block type (text or thinking) based on the content of the first chunk received from the model. 10 | 11 | ### Changes in `src/proxy-server.ts` 12 | 13 | **Removed (Commented Out):** 14 | ```typescript 15 | // THINKING BLOCK SUPPORT: We still need to send content_block_start IMMEDIATELY 16 | // Protocol requires it right after message_start, before ping 17 | // But we'll close and reopen if reasoning arrives first 18 | textBlockIndex = currentBlockIndex++; 19 | sendSSE("content_block_start", { 20 | type: "content_block_start", 21 | index: textBlockIndex, 22 | content_block: { 23 | type: "text", 24 | text: "", 25 | }, 26 | }); 27 | textBlockStarted = true; 28 | ``` 29 | 30 | ### Logic Flow 31 | 32 | 1. **Start**: Send `message_start` and `ping`. 33 | 2. **Wait**: Wait for first chunk from OpenRouter. 34 | 3. **First Chunk**: 35 | - **If Reasoning**: Start `thinking` block (index 0). 36 | - **If Content**: Start `text` block (index 0). 37 | - **If Tool Call**: Start `tool_use` block (index 0). 38 | 39 | This ensures that no empty blocks are created and closed, preventing the "(no content)" rendering issue. 40 | 41 | ## Verification 42 | - Analyzed code flow for all 3 scenarios (reasoning, content, tool use). 43 | - Verified that `textBlockIndex` and `currentBlockIndex` are correctly managed without the eager initialization. 44 | - Verified that existing lazy initialization logic handles the "not started" state correctly. 45 | 46 | **Date**: 2025-11-25 47 | **Status**: Fixed 48 | -------------------------------------------------------------------------------- /src/adapters/qwen-adapter.ts: -------------------------------------------------------------------------------- 1 | import { BaseModelAdapter, AdapterResult } from "./base-adapter"; 2 | import { log } from "../logger"; 3 | 4 | // Qwen special tokens that should be stripped from output 5 | const QWEN_SPECIAL_TOKENS = [ 6 | "<|im_start|>", 7 | "<|im_end|>", 8 | "<|endoftext|>", 9 | "<|end|>", 10 | "assistant\n", // Role marker that sometimes leaks 11 | ]; 12 | 13 | export class QwenAdapter extends BaseModelAdapter { 14 | processTextContent( 15 | textContent: string, 16 | accumulatedText: string 17 | ): AdapterResult { 18 | // Strip Qwen special tokens that may leak through 19 | // This can happen when the model gets confused and outputs its chat template 20 | let cleanedText = textContent; 21 | for (const token of QWEN_SPECIAL_TOKENS) { 22 | cleanedText = cleanedText.replaceAll(token, ""); 23 | } 24 | 25 | // Also handle partial tokens at chunk boundaries 26 | // e.g., "<|im_" at the end of one chunk and "start|>" at the beginning of next 27 | cleanedText = cleanedText.replace(/<\|[a-z_]*$/i, ""); // Partial at end 28 | cleanedText = cleanedText.replace(/^[a-z_]*\|>/i, ""); // Partial at start 29 | 30 | const wasTransformed = cleanedText !== textContent; 31 | if (wasTransformed && cleanedText.length === 0) { 32 | // Entire chunk was special tokens, skip it 33 | return { 34 | cleanedText: "", 35 | extractedToolCalls: [], 36 | wasTransformed: true, 37 | }; 38 | } 39 | 40 | return { 41 | cleanedText, 42 | extractedToolCalls: [], 43 | wasTransformed, 44 | }; 45 | } 46 | 47 | /** 48 | * Handle request preparation - specifically for mapping reasoning parameters 49 | */ 50 | override prepareRequest(request: any, originalRequest: any): any { 51 | if (originalRequest.thinking) { 52 | const { budget_tokens } = originalRequest.thinking; 53 | 54 | // Qwen specific parameters 55 | request.enable_thinking = true; 56 | request.thinking_budget = budget_tokens; 57 | 58 | log(`[QwenAdapter] Mapped budget ${budget_tokens} -> enable_thinking: true, thinking_budget: ${budget_tokens}`); 59 | 60 | // Cleanup: Remove raw thinking object 61 | delete request.thinking; 62 | } 63 | 64 | return request; 65 | } 66 | 67 | shouldHandle(modelId: string): boolean { 68 | const lower = modelId.toLowerCase(); 69 | return lower.includes("qwen") || lower.includes("alibaba"); 70 | } 71 | 72 | getName(): string { 73 | return "QwenAdapter"; 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /tests/verify-user-models.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Test to verify the EXACT models the user specified 3 | */ 4 | 5 | import { join } from "node:path"; 6 | 7 | // Load .env 8 | const envPath = join(import.meta.dir, "..", ".env"); 9 | const envFile = await Bun.file(envPath).text(); 10 | for (const line of envFile.split("\n")) { 11 | if (line.startsWith("#") || !line.includes("=")) continue; 12 | const [key, ...values] = line.split("="); 13 | process.env[key.trim()] = values.join("=").trim(); 14 | } 15 | 16 | const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY; 17 | if (!OPENROUTER_API_KEY) { 18 | throw new Error("OPENROUTER_API_KEY not found"); 19 | } 20 | 21 | // User's EXACT models from original request 22 | const USER_SPECIFIED_MODELS = [ 23 | "x-ai/grok-code-fast-1", 24 | "openai/gpt-5-codex", 25 | "minimax/minimax-m2", 26 | "zhipuai/glm-4", // User said "z-ai/glm-4.6" - trying correct prefix 27 | "qwen/qwen3-vl-235b-a22b-instruct", 28 | ]; 29 | 30 | console.log("\n🔍 VERIFYING USER-SPECIFIED MODELS"); 31 | console.log("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); 32 | 33 | for (const model of USER_SPECIFIED_MODELS) { 34 | console.log(`Testing: ${model}`); 35 | 36 | try { 37 | const response = await fetch("https://openrouter.ai/api/v1/chat/completions", { 38 | method: "POST", 39 | headers: { 40 | "Content-Type": "application/json", 41 | Authorization: `Bearer ${OPENROUTER_API_KEY}`, 42 | "HTTP-Referer": "https://github.com/MadAppGang/claude-code", 43 | "X-Title": "Claudish Model Verification", 44 | }, 45 | body: JSON.stringify({ 46 | model, 47 | messages: [ 48 | { 49 | role: "user", 50 | content: "Say 'hi' in one word", 51 | }, 52 | ], 53 | max_tokens: 10, 54 | }), 55 | }); 56 | 57 | if (response.ok) { 58 | const data = await response.json(); 59 | const reply = data.choices?.[0]?.message?.content || "no response"; 60 | console.log(` ✅ VALID - Response: "${reply}"`); 61 | } else { 62 | const error = await response.text(); 63 | console.log(` ❌ INVALID - Error: ${error}`); 64 | 65 | // Try to suggest correct model ID 66 | if (error.includes("not a valid model")) { 67 | console.log(` 💡 Suggestion: This model ID may be outdated`); 68 | } 69 | } 70 | } catch (err) { 71 | console.log(` ❌ ERROR: ${err}`); 72 | } 73 | 74 | console.log(""); 75 | } 76 | 77 | console.log("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); 78 | -------------------------------------------------------------------------------- /src/adapters/openai-adapter.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * OpenAI adapter for handling model-specific behaviors 3 | * 4 | * Handles: 5 | * - Mapping 'thinking.budget_tokens' to 'reasoning_effort' for o1/o3 models 6 | */ 7 | 8 | import { BaseModelAdapter, AdapterResult } from "./base-adapter.js"; 9 | import { log } from "../logger.js"; 10 | 11 | export class OpenAIAdapter extends BaseModelAdapter { 12 | processTextContent( 13 | textContent: string, 14 | accumulatedText: string 15 | ): AdapterResult { 16 | // OpenAI models return standard content, no XML parsing needed for tool calls 17 | // (OpenRouter handles standard tool_calls mapping for us) 18 | return { 19 | cleanedText: textContent, 20 | extractedToolCalls: [], 21 | wasTransformed: false, 22 | }; 23 | } 24 | 25 | /** 26 | * Handle request preparation - specifically for mapping reasoning parameters 27 | */ 28 | override prepareRequest(request: any, originalRequest: any): any { 29 | // Handle mapping of 'thinking' parameter from Claude (budget_tokens) to reasoning_effort 30 | if (originalRequest.thinking) { 31 | const { budget_tokens } = originalRequest.thinking; 32 | 33 | // Logic for mapping budget to effort 34 | // < 4000: minimal 35 | // 4000 - 15999: low 36 | // 16000 - 31999: medium 37 | // >= 32000: high 38 | let effort = "medium"; 39 | 40 | if (budget_tokens < 4000) effort = "minimal"; 41 | else if (budget_tokens < 16000) effort = "low"; 42 | else if (budget_tokens >= 32000) effort = "high"; 43 | 44 | // Special case: GPT-5-codex might not support minimal (per notes), but we'll try to follow budget 45 | // The API should degrade gracefully if minimal isn't supported, or we could add a model check here 46 | 47 | request.reasoning_effort = effort; 48 | 49 | // Cleanup: Remove raw thinking object as we've translated it 50 | // This prevents OpenRouter from having both params if it decides to pass thinking through 51 | delete request.thinking; 52 | 53 | log(`[OpenAIAdapter] Mapped budget ${budget_tokens} -> reasoning_effort: ${effort}`); 54 | } 55 | 56 | return request; 57 | } 58 | 59 | shouldHandle(modelId: string): boolean { 60 | // Handle explicit OpenAI models or OpenRouter prefixes for OpenAI reasoning models 61 | // Checking for o1/o3 specifically as they are the current reasoning models 62 | return ( 63 | modelId.startsWith("openai/") || 64 | modelId.includes("o1") || 65 | modelId.includes("o3") 66 | ); 67 | } 68 | 69 | getName(): string { 70 | return "OpenAIAdapter"; 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /scripts/generate-manifest.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bun 2 | /** 3 | * Generate release manifest with checksums 4 | * 5 | * Usage: bun scripts/generate-manifest.ts 6 | * 7 | * Creates manifest.json with checksums and file sizes for all platforms 8 | */ 9 | 10 | import { createHash } from "node:crypto"; 11 | import { readFileSync, readdirSync, statSync, writeFileSync } from "node:fs"; 12 | import { join } from "node:path"; 13 | 14 | interface PlatformInfo { 15 | checksum: string; 16 | size: number; 17 | } 18 | 19 | interface Manifest { 20 | version: string; 21 | buildDate: string; 22 | platforms: Record; 23 | } 24 | 25 | const PLATFORM_MAP: Record = { 26 | "claudish-darwin-arm64": "darwin-arm64", 27 | "claudish-darwin-x64": "darwin-x64", 28 | "claudish-linux-x64": "linux-x64", 29 | "claudish-linux-arm64": "linux-arm64", 30 | }; 31 | 32 | function computeSha256(filePath: string): string { 33 | const content = readFileSync(filePath); 34 | return createHash("sha256").update(content).digest("hex"); 35 | } 36 | 37 | function generateManifest(version: string, releaseDir: string): Manifest { 38 | const platforms: Record = {}; 39 | 40 | const files = readdirSync(releaseDir); 41 | 42 | for (const file of files) { 43 | const platform = PLATFORM_MAP[file]; 44 | if (!platform) continue; 45 | 46 | const filePath = join(releaseDir, file); 47 | const stats = statSync(filePath); 48 | 49 | platforms[platform] = { 50 | checksum: computeSha256(filePath), 51 | size: stats.size, 52 | }; 53 | } 54 | 55 | return { 56 | version, 57 | buildDate: new Date().toISOString(), 58 | platforms, 59 | }; 60 | } 61 | 62 | // Main 63 | const args = process.argv.slice(2); 64 | 65 | if (args.length < 2) { 66 | console.error("Usage: bun scripts/generate-manifest.ts "); 67 | process.exit(1); 68 | } 69 | 70 | const [version, releaseDir] = args; 71 | 72 | const manifest = generateManifest(version, releaseDir); 73 | 74 | // Write manifest.json 75 | const manifestPath = join(releaseDir, "manifest.json"); 76 | writeFileSync(manifestPath, JSON.stringify(manifest, null, 2)); 77 | 78 | console.log("Generated manifest.json:"); 79 | console.log(JSON.stringify(manifest, null, 2)); 80 | 81 | // Also write checksums.txt for backwards compatibility 82 | const checksumsPath = join(releaseDir, "checksums.txt"); 83 | const checksums = Object.entries(PLATFORM_MAP) 84 | .filter(([file]) => manifest.platforms[PLATFORM_MAP[file]]) 85 | .map(([file, platform]) => `${manifest.platforms[platform].checksum} ${file}`) 86 | .join("\n"); 87 | 88 | writeFileSync(checksumsPath, checksums + "\n"); 89 | console.log("\nGenerated checksums.txt"); 90 | -------------------------------------------------------------------------------- /src/adapters/base-adapter.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Base adapter for model-specific transformations 3 | * 4 | * Different models have different quirks that need translation: 5 | * - Grok: XML function calls instead of JSON tool_calls 6 | * - Deepseek: May have its own format 7 | * - Others: Future model-specific behaviors 8 | */ 9 | 10 | export interface ToolCall { 11 | id: string; 12 | name: string; 13 | arguments: Record; 14 | } 15 | 16 | export interface AdapterResult { 17 | /** Cleaned text content (with XML/special formats removed) */ 18 | cleanedText: string; 19 | /** Extracted tool calls from special formats */ 20 | extractedToolCalls: ToolCall[]; 21 | /** Whether any transformation was done */ 22 | wasTransformed: boolean; 23 | } 24 | 25 | export abstract class BaseModelAdapter { 26 | protected modelId: string; 27 | 28 | constructor(modelId: string) { 29 | this.modelId = modelId; 30 | } 31 | 32 | /** 33 | * Process text content and extract any model-specific tool call formats 34 | * @param textContent - The raw text content from the model 35 | * @param accumulatedText - The accumulated text so far (for multi-chunk parsing) 36 | * @returns Cleaned text and any extracted tool calls 37 | */ 38 | abstract processTextContent( 39 | textContent: string, 40 | accumulatedText: string 41 | ): AdapterResult; 42 | 43 | /** 44 | * Check if this adapter should be used for the given model 45 | */ 46 | abstract shouldHandle(modelId: string): boolean; 47 | 48 | /** 49 | * Get adapter name for logging 50 | */ 51 | abstract getName(): string; 52 | 53 | /** 54 | * Handle any request preparation before sending to the model 55 | * Useful for mapping parameters like thinking budget -> reasoning_effort 56 | * @param request - The OpenRouter payload being prepared 57 | * @param originalRequest - The original Claude-format request 58 | * @returns The modified request payload 59 | */ 60 | prepareRequest(request: any, originalRequest: any): any { 61 | return request; 62 | } 63 | 64 | /** 65 | * Reset internal state between requests (prevents state contamination) 66 | */ 67 | reset(): void { 68 | // Default implementation does nothing 69 | // Subclasses can override if they maintain state 70 | } 71 | } 72 | 73 | /** 74 | * Default adapter that does no transformation 75 | */ 76 | export class DefaultAdapter extends BaseModelAdapter { 77 | processTextContent(textContent: string, accumulatedText: string): AdapterResult { 78 | return { 79 | cleanedText: textContent, 80 | extractedToolCalls: [], 81 | wasTransformed: false 82 | }; 83 | } 84 | 85 | shouldHandle(modelId: string): boolean { 86 | return false; // Default adapter is fallback 87 | } 88 | 89 | getName(): string { 90 | return "DefaultAdapter"; 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "claudish", 3 | "version": "3.0.0", 4 | "description": "Run Claude Code with any model - OpenRouter, Ollama, LM Studio & local models", 5 | "type": "module", 6 | "main": "./dist/index.js", 7 | "bin": { 8 | "claudish": "dist/index.js" 9 | }, 10 | "scripts": { 11 | "dev": "bun run src/index.ts", 12 | "dev:mcp": "bun run src/index.ts --mcp", 13 | "dev:grok": "bun run src/index.ts --interactive --model x-ai/grok-code-fast-1", 14 | "dev:grok:debug": "bun run src/index.ts --interactive --debug --log-level info --model x-ai/grok-code-fast-1", 15 | "dev:info": "bun run src/index.ts --interactive --monitor", 16 | "extract-models": "bun run scripts/extract-models.ts", 17 | "build": "bun run extract-models && bun build src/index.ts --outdir dist --target node && chmod +x dist/index.js", 18 | "build:ci": "bun build src/index.ts --outdir dist --target node && chmod +x dist/index.js", 19 | "build:binary": "bun run extract-models && bun build src/index.ts --compile --outfile claudish", 20 | "build:binary:linux": "bun run extract-models && bun build src/index.ts --compile --target=bun-linux-x64 --outfile claudish-linux-x64", 21 | "build:binary:mac": "bun run extract-models && bun build src/index.ts --compile --target=bun-darwin-arm64 --outfile claudish-darwin-arm64", 22 | "link": "npm link", 23 | "unlink": "npm unlink -g claudish", 24 | "install-global": "bun run build && npm link", 25 | "kill-all": "pkill -f 'bun.*claudish' || pkill -f 'claude.*claudish-settings' || echo 'No claudish processes found'", 26 | "test": "bun test ./tests/comprehensive-model-test.ts", 27 | "typecheck": "tsc --noEmit", 28 | "lint": "biome check .", 29 | "format": "biome format --write .", 30 | "postinstall": "node scripts/postinstall.cjs" 31 | }, 32 | "dependencies": { 33 | "@hono/node-server": "^1.19.6", 34 | "@inquirer/prompts": "^8.0.1", 35 | "@inquirer/search": "^4.0.1", 36 | "@modelcontextprotocol/sdk": "^1.22.0", 37 | "dotenv": "^17.2.3", 38 | "hono": "^4.10.6", 39 | "undici": "^7.16.0", 40 | "zod": "^4.1.13" 41 | }, 42 | "devDependencies": { 43 | "@biomejs/biome": "^1.9.4", 44 | "@types/bun": "latest", 45 | "@types/jest": "^30.0.0", 46 | "jest": "^30.2.0", 47 | "jest-environment-node": "^30.2.0", 48 | "typescript": "^5.9.3" 49 | }, 50 | "files": [ 51 | "dist/", 52 | "scripts/", 53 | "skills/", 54 | "AI_AGENT_GUIDE.md", 55 | "recommended-models.json" 56 | ], 57 | "engines": { 58 | "node": ">=18.0.0", 59 | "bun": ">=1.0.0" 60 | }, 61 | "preferGlobal": true, 62 | "keywords": [ 63 | "claude", 64 | "claude-code", 65 | "openrouter", 66 | "proxy", 67 | "cli", 68 | "mcp", 69 | "model-context-protocol", 70 | "ai" 71 | ], 72 | "author": "Jack Rudenko ", 73 | "license": "MIT", 74 | "repository": { 75 | "type": "git", 76 | "url": "https://github.com/MadAppGang/claudish" 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/validation/validator.ts: -------------------------------------------------------------------------------- 1 | import type { ValidationReport, ValidationIssue, IssueSeverity } from "./types.js"; 2 | 3 | export enum ValidationCategory { 4 | MANDATORY = "mandatory", 5 | RECOMMENDED = "recommended", 6 | OPTIONAL = "optional", 7 | } 8 | 9 | export interface Validator { 10 | /** 11 | * Validate the implementation and return issues 12 | */ 13 | validate(): ValidationIssue[]; 14 | } 15 | 16 | export abstract class BaseValidator implements Validator { 17 | protected projectPath: string; 18 | 19 | constructor(projectPath: string = ".") { 20 | this.projectPath = projectPath; 21 | } 22 | 23 | abstract validate(): ValidationIssue[]; 24 | 25 | public generateReport(): ValidationReport { 26 | const issues = this.validate(); 27 | const bySeverity = { 28 | critical: issues.filter((i) => i.severity === "critical"), 29 | high: issues.filter((i) => i.severity === "high"), 30 | medium: issues.filter((i) => i.severity === "medium"), 31 | low: issues.filter((i) => i.severity === "low"), 32 | }; 33 | 34 | return { 35 | issues, 36 | summary: { 37 | total: issues.length, 38 | bySeverity, 39 | byCategory: { 40 | mandatory: issues.filter((i) => i.category === "mandatory"), 41 | recommended: issues.filter((i) => i.category === "recommended"), 42 | optional: issues.filter((i) => i.category === "optional"), 43 | }, 44 | }, 45 | }; 46 | } 47 | 48 | /** 49 | * MOST LIKELY hypothesis generation - simplified and focused 50 | */ 51 | protected generateImprovementIssue( 52 | whatMightGoWrong: string, 53 | confidence: "high" | "medium" | "low", 54 | onlyIf?: (currentContext: string) => boolean 55 | ): ValidationIssue | null { 56 | if (onlyIf && !onlyIf("")) { 57 | return null; 58 | } 59 | 60 | return { 61 | path: "", 62 | severity: confidence === "high" ? "high" : "medium", 63 | category: "recommended", 64 | message: `Consider adding: ${whatMightGoWrong}`, 65 | hypothesis: whatMightGoWrong, 66 | improvement: `Add ${whatMightGoWrong}`, 67 | }; 68 | } 69 | 70 | /** 71 | * MOST LIKELY improvement scoring 72 | */ 73 | protected calculateImprovementScore(issue: ValidationIssue, impact: number, ease: number, urgency: number): number { 74 | return impact * 0.4 + ease * 0.3 + urgency * 0.3; 75 | } 76 | 77 | protected mostLikely(val: string | string[]): string[] { 78 | const likelyIssues = { 79 | permissions: [ 80 | "permission handling is needed", 81 | "file access permissions are incomplete", 82 | "read/write permissions need review", 83 | ], 84 | "error handling": [ 85 | "error boundaries are missing", 86 | "exception handling needs improvement", 87 | "edge cases should be considered", 88 | ], 89 | performance: [ 90 | "performance optimization is needed", 91 | "memory usage could be optimized", 92 | "speed might be impacted", 93 | ], 94 | }; 95 | 96 | const key = typeof val === "string" ? val : val[0]; 97 | 98 | for (const category in likelyIssues) { 99 | const issues = likelyIssues[category as keyof typeof likelyIssues]; 100 | for (const issue of issues) { 101 | if (issue.includes(key)) { 102 | return issues; 103 | } 104 | } 105 | } 106 | 107 | return typeof val === "string" ? [val] : val; 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/middleware/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Middleware System for Model-Specific Behavior 3 | * 4 | * This system allows clean separation of model-specific logic (Gemini thought signatures, 5 | * Grok XML handling, etc.) from the core proxy server. 6 | */ 7 | 8 | /** 9 | * Context passed to middleware before sending request to OpenRouter 10 | */ 11 | export interface RequestContext { 12 | /** Model ID being used (e.g., "google/gemini-3-pro-preview") */ 13 | modelId: string; 14 | 15 | /** Messages array (mutable - middlewares can modify in place) */ 16 | messages: any[]; 17 | 18 | /** Tools array (if any) */ 19 | tools?: any[]; 20 | 21 | /** Whether this is a streaming request */ 22 | stream: boolean; 23 | } 24 | 25 | /** 26 | * Context passed to middleware after receiving non-streaming response 27 | */ 28 | export interface NonStreamingResponseContext { 29 | /** Model ID being used */ 30 | modelId: string; 31 | 32 | /** OpenAI format response from OpenRouter */ 33 | response: any; 34 | } 35 | 36 | /** 37 | * Context passed to middleware for each streaming chunk 38 | */ 39 | export interface StreamChunkContext { 40 | /** Model ID being used */ 41 | modelId: string; 42 | 43 | /** Raw SSE chunk from OpenRouter */ 44 | chunk: any; 45 | 46 | /** Delta object (chunk.choices[0].delta) - mutable */ 47 | delta: any; 48 | 49 | /** 50 | * Shared metadata across all chunks in this streaming response 51 | * Useful for accumulating state (e.g., thought signatures) 52 | * Auto-cleaned after stream completes 53 | */ 54 | metadata: Map; 55 | } 56 | 57 | /** 58 | * Base middleware interface 59 | * 60 | * Middlewares handle model-specific behavior by hooking into the request/response lifecycle. 61 | */ 62 | export interface ModelMiddleware { 63 | /** Unique name for this middleware (for logging) */ 64 | readonly name: string; 65 | 66 | /** 67 | * Determines if this middleware should handle the given model 68 | * Called once per request to filter active middlewares 69 | */ 70 | shouldHandle(modelId: string): boolean; 71 | 72 | /** 73 | * Called once when the proxy server starts (optional) 74 | * Use for initialization, loading config, etc. 75 | */ 76 | onInit?(): void | Promise; 77 | 78 | /** 79 | * Called before sending request to OpenRouter 80 | * Can modify messages, add extra_content, inject system messages, etc. 81 | * 82 | * @param context - Mutable context (can modify messages array) 83 | */ 84 | beforeRequest(context: RequestContext): void | Promise; 85 | 86 | /** 87 | * Called after receiving complete non-streaming response (optional) 88 | * Can extract data, transform response, update cache, etc. 89 | * 90 | * @param context - Response context (read-only) 91 | */ 92 | afterResponse?(context: NonStreamingResponseContext): void | Promise; 93 | 94 | /** 95 | * Called for each chunk in a streaming response (optional) 96 | * Can extract data from delta, transform content, etc. 97 | * 98 | * @param context - Chunk context (delta is mutable) 99 | */ 100 | afterStreamChunk?(context: StreamChunkContext): void | Promise; 101 | 102 | /** 103 | * Called once after a streaming response completes (optional) 104 | * Use for cleanup, final processing of accumulated metadata, etc. 105 | * 106 | * @param metadata - Metadata map that was shared across all chunks 107 | */ 108 | afterStreamComplete?(metadata: Map): void | Promise; 109 | } 110 | -------------------------------------------------------------------------------- /landingpage/components/SupportSection.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const SupportSection: React.FC = () => { 4 | return ( 5 |
6 |
7 | 8 | {/* Terminal-style status card */} 9 |
10 | 11 | {/* Header bar */} 12 |
13 |
14 | 15 | Open Source Status 16 |
17 | MIT License 18 |
19 | 20 | {/* Content */} 21 |
22 |
23 | 24 | {/* Left: Message */} 25 |
26 |
27 | $ git status --community 28 |
29 |
30 | Claudish is free and open source.
31 | Stars on GitHub help us prioritize development
32 | and show that the community finds this useful. 33 |
34 |
35 | 36 | {/* Right: Action */} 37 | 53 |
54 |
55 |
56 | 57 |
58 |
59 | ); 60 | }; 61 | 62 | export default SupportSection; 63 | -------------------------------------------------------------------------------- /ai_docs/THINKING_ALIGNMENT_SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Thinking Translation Model Alignment Summary 2 | 3 | **Last Updated:** 2025-11-25 4 | **Status:** Verification Complete ✅ 5 | 6 | ## Overview 7 | 8 | We have implemented a comprehensive **Thinking Translation Model** that aligns Claude Code's native `thinking.budget_tokens` parameter with the diverse reasoning configurations of 6 major AI providers. This ensures that when a user requests a specific thinking budget (e.g., "Think for 16k tokens"), it is correctly translated into the native control mechanism for the target model. 9 | 10 | ## Provider Alignment Matrix 11 | 12 | | Provider | Model | Claude Parameter | Translated Parameter | Logic | 13 | | :--- | :--- | :--- | :--- | :--- | 14 | | **OpenAI** | o1, o3 | `budget_tokens` | `reasoning_effort` | < 4k: `minimal`
4k-16k: `low`
16k-32k: `medium`
> 32k: `high` | 15 | | **Google** | Gemini 3.0 | `budget_tokens` | `thinking_level` | < 16k: `low`
>= 16k: `high` | 16 | | **Google** | Gemini 2.5/2.0 | `budget_tokens` | `thinking_config.thinking_budget` | Passes exact budget (capped at 24,576) | 17 | | **xAI** | Grok 3 Mini | `budget_tokens` | `reasoning_effort` | < 20k: `low`
>= 20k: `high` | 18 | | **Qwen** | Qwen 2.5/3 | `budget_tokens` | `enable_thinking`, `thinking_budget` | `enable_thinking: true`
`thinking_budget`: exact value | 19 | | **MiniMax** | M2 | `thinking` | `reasoning_split` | `reasoning_split: true` | 20 | | **DeepSeek** | R1 | `thinking` | *(Stripped)* | Parameter removed to prevent API error (400) | 21 | 22 | ## Implementation Details 23 | 24 | ### 1. OpenAI Adapter (`OpenAIAdapter`) 25 | - **File:** `src/adapters/openai-adapter.ts` 26 | - **Behavior:** Maps continuous token budget into discrete effort levels. 27 | - **New Feature:** Added support for `minimal` effort (typically < 4000 tokens) for faster, lighter reasoning tasks. 28 | 29 | ### 2. Gemini Adapter (`GeminiAdapter`) 30 | - **File:** `src/adapters/gemini-adapter.ts` 31 | - **Behavior:** 32 | - **Gemini 3 detection:** Checks `modelId` for "gemini-3". Uses `thinking_level`. 33 | - **Backward Compatibility:** Defaults to `thinking_config` for Gemini 2.0/2.5. 34 | - **Safety:** Caps budget at 24k tokens to maintain stability. 35 | 36 | ### 3. Grok Adapter (`GrokAdapter`) 37 | - **File:** `src/adapters/grok-adapter.ts` 38 | - **Behavior:** 39 | - **Validation:** Explicitly checks for "mini" models (Grok 3 Mini). 40 | - **Stripping:** Removes thinking parameters for standard Grok 3 models which do not support API-controlled reasoning (prevents errors). 41 | 42 | ### 4. Qwen Adapter (`QwenAdapter`) 43 | - **File:** `src/adapters/qwen-adapter.ts` 44 | - **Behavior:** 45 | - Enables the specific `enable_thinking` flag required by Alibaba Cloud / OpenRouter. 46 | - Passes the budget through directly. 47 | 48 | ### 5. MiniMax Adapter (`MiniMaxAdapter`) 49 | - **File:** `src/adapters/minimax-adapter.ts` 50 | - **Behavior:** 51 | - Sets `reasoning_split: true`. 52 | - Does not support budget control, but correctly enables the interleaved reasoning feature. 53 | 54 | ### 6. DeepSeek Adapter (`DeepSeekAdapter`) 55 | - **File:** `src/adapters/deepseek-adapter.ts` 56 | - **Behavior:** 57 | - **Defensive:** Detects DeepSeek models and *removes* the `thinking` object. 58 | - **Reasoning:** Reasoning happens automatically (R1) or not at all; sending the parameter causes API rejection. 59 | 60 | ## Protocol Integration 61 | 62 | The translation happens during the `prepareRequest` phase of the `BaseModelAdapter`. 63 | 1. **Intercept:** The adapter intercepts the `ClaudeRequest`. 64 | 2. **Translate:** It reads `thinking.budget_tokens`. 65 | 3. **Mutate:** It modifies the `OpenRouterPayload` to add provider-specific fields. 66 | 4. **Clean:** It deletes the original `thinking` object to prevent OpenRouter from receiving conflicting or unrecognized parameters. 67 | -------------------------------------------------------------------------------- /tests/debug-snapshot.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Debug script to inspect actual SSE events from proxy 3 | */ 4 | 5 | import { readFileSync } from "fs"; 6 | import { join } from "path"; 7 | import { createProxyServer } from "../src/proxy-server"; 8 | import type { ProxyServer } from "../src/types"; 9 | 10 | const PORT = 8340; 11 | const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY || ""; 12 | const TEST_MODEL = "anthropic/claude-sonnet-4.5"; 13 | 14 | async function parseSSE(response: Response) { 15 | const events: any[] = []; 16 | 17 | if (!response.body) throw new Error("No body"); 18 | 19 | const reader = response.body.getReader(); 20 | const decoder = new TextDecoder(); 21 | let buffer = ""; 22 | let currentEvent: string | null = null; 23 | 24 | while (true) { 25 | const { done, value } = await reader.read(); 26 | if (done) break; 27 | 28 | buffer += decoder.decode(value, { stream: true }); 29 | const lines = buffer.split("\n"); 30 | buffer = lines.pop() || ""; 31 | 32 | for (const line of lines) { 33 | if (!line.trim()) { 34 | currentEvent = null; 35 | continue; 36 | } 37 | 38 | if (line.startsWith("event:")) { 39 | currentEvent = line.substring(6).trim(); 40 | } else if (line.startsWith("data:")) { 41 | const dataStr = line.substring(5).trim(); 42 | if (dataStr === "[DONE]") continue; 43 | 44 | try { 45 | const data = JSON.parse(dataStr); 46 | const eventType = currentEvent || data.type || "unknown"; 47 | events.push({ event: eventType, data }); 48 | console.log(`[${events.length}] ${eventType}`, data.index !== undefined ? `(index: ${data.index})` : ""); 49 | } catch (e) { 50 | console.warn("Parse error:", dataStr); 51 | } 52 | } 53 | } 54 | } 55 | 56 | return events; 57 | } 58 | 59 | async function main() { 60 | console.log("Starting debug test...\n"); 61 | 62 | // Start proxy 63 | const server: ProxyServer = await createProxyServer(PORT, OPENROUTER_API_KEY, TEST_MODEL, false); 64 | console.log(`✅ Proxy started on port ${PORT}\n`); 65 | 66 | // Load fixture 67 | const fixturePath = join(import.meta.dir, "fixtures", "example_tool_use.json"); 68 | const fixture = JSON.parse(readFileSync(fixturePath, "utf-8")); 69 | 70 | console.log(`Testing fixture: ${fixture.name}`); 71 | console.log(`Expected blocks: ${fixture.assertions.contentBlocks.length}\n`); 72 | 73 | // Make request 74 | const response = await fetch(`http://127.0.0.1:${PORT}/v1/messages`, { 75 | method: "POST", 76 | headers: { 77 | "Content-Type": "application/json", 78 | ...fixture.request.headers, 79 | }, 80 | body: JSON.stringify(fixture.request.body), 81 | }); 82 | 83 | console.log("Response status:", response.status); 84 | console.log("\nSSE Events:\n"); 85 | 86 | const events = await parseSSE(response); 87 | 88 | console.log(`\nTotal events: ${events.length}\n`); 89 | 90 | // Analyze content blocks 91 | const starts = events.filter(e => e.event === "content_block_start"); 92 | const stops = events.filter(e => e.event === "content_block_stop"); 93 | 94 | console.log("Content Block Analysis:"); 95 | console.log(` Starts: ${starts.length}`); 96 | starts.forEach((e, i) => { 97 | console.log(` [${i}] index=${e.data.index}, type=${e.data.content_block?.type}, name=${e.data.content_block?.name || "n/a"}`); 98 | }); 99 | 100 | console.log(` Stops: ${stops.length}`); 101 | stops.forEach((e, i) => { 102 | console.log(` [${i}] index=${e.data.index}`); 103 | }); 104 | 105 | await server.shutdown(); 106 | console.log("\n✅ Test complete"); 107 | } 108 | 109 | main().catch(err => { 110 | console.error("Error:", err); 111 | process.exit(1); 112 | }); 113 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | // AUTO-GENERATED from shared/recommended-models.md 2 | // DO NOT EDIT MANUALLY - Run 'bun run extract-models' to regenerate 3 | 4 | // OpenRouter Models - Top Recommended for Development (Priority Order) 5 | export const OPENROUTER_MODELS = [ 6 | "x-ai/grok-code-fast-1", 7 | "minimax/minimax-m2", 8 | "google/gemini-2.5-flash", 9 | "openai/gpt-5", 10 | "openai/gpt-5.1-codex", 11 | "qwen/qwen3-vl-235b-a22b-instruct", 12 | "openrouter/polaris-alpha", 13 | "custom", 14 | ] as const; 15 | 16 | export type OpenRouterModel = (typeof OPENROUTER_MODELS)[number]; 17 | 18 | // CLI Configuration 19 | export interface ClaudishConfig { 20 | model?: OpenRouterModel | string; // Optional - will prompt if not provided 21 | port?: number; 22 | autoApprove: boolean; 23 | dangerous: boolean; 24 | interactive: boolean; 25 | debug: boolean; 26 | logLevel: "debug" | "info" | "minimal"; // Log verbosity level (default: info) 27 | quiet: boolean; // Suppress [claudish] log messages (default true in single-shot mode) 28 | jsonOutput: boolean; // Output in JSON format for tool integration 29 | monitor: boolean; // Monitor mode - proxy to real Anthropic API and log everything 30 | stdin: boolean; // Read prompt from stdin instead of args 31 | openrouterApiKey?: string; // Optional in monitor mode 32 | anthropicApiKey?: string; // Required in monitor mode 33 | agent?: string; // Agent to use for execution (e.g., "frontend:developer") 34 | freeOnly?: boolean; // Show only free models in selector 35 | profile?: string; // Profile name to use for model mapping 36 | claudeArgs: string[]; 37 | 38 | // Model Mapping 39 | modelOpus?: string; 40 | modelSonnet?: string; 41 | modelHaiku?: string; 42 | modelSubagent?: string; 43 | 44 | // Cost tracking 45 | costTracking?: boolean; 46 | auditCosts?: boolean; 47 | resetCosts?: boolean; 48 | 49 | // Local model optimizations 50 | summarizeTools?: boolean; // Summarize tool descriptions to reduce prompt size for local models 51 | } 52 | 53 | // Anthropic API Types 54 | export interface AnthropicMessage { 55 | role: "user" | "assistant"; 56 | content: string | ContentBlock[]; 57 | } 58 | 59 | export interface ContentBlock { 60 | type: "text" | "image"; 61 | text?: string; 62 | source?: { 63 | type: "base64"; 64 | media_type: string; 65 | data: string; 66 | }; 67 | } 68 | 69 | export interface AnthropicRequest { 70 | model: string; 71 | messages: AnthropicMessage[]; 72 | max_tokens?: number; 73 | temperature?: number; 74 | top_p?: number; 75 | stream?: boolean; 76 | system?: string; 77 | } 78 | 79 | export interface AnthropicResponse { 80 | id: string; 81 | type: "message"; 82 | role: "assistant"; 83 | content: ContentBlock[]; 84 | model: string; 85 | stop_reason: string | null; 86 | usage: { 87 | input_tokens: number; 88 | output_tokens: number; 89 | }; 90 | } 91 | 92 | // OpenRouter API Types 93 | export interface OpenRouterMessage { 94 | role: "system" | "user" | "assistant"; 95 | content: string; 96 | } 97 | 98 | export interface OpenRouterRequest { 99 | model: string; 100 | messages: OpenRouterMessage[]; 101 | max_tokens?: number; 102 | temperature?: number; 103 | top_p?: number; 104 | stream?: boolean; 105 | } 106 | 107 | export interface OpenRouterResponse { 108 | id: string; 109 | model: string; 110 | choices: Array<{ 111 | message: { 112 | role: "assistant"; 113 | content: string; 114 | }; 115 | finish_reason: string | null; 116 | }>; 117 | usage: { 118 | prompt_tokens: number; 119 | completion_tokens: number; 120 | total_tokens: number; 121 | }; 122 | } 123 | 124 | // Proxy Server 125 | export interface ProxyServer { 126 | port: number; 127 | url: string; 128 | shutdown: () => Promise; 129 | } 130 | -------------------------------------------------------------------------------- /landingpage/components/BlockLogo.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | // Grid definition: 1 = filled block, 0 = empty space 4 | const LETTERS: Record = { 5 | C: [ 6 | [1, 1, 1, 1], 7 | [1, 0, 0, 0], 8 | [1, 0, 0, 0], 9 | [1, 0, 0, 0], 10 | [1, 1, 1, 1], 11 | ], 12 | L: [ 13 | [1, 0, 0, 0], 14 | [1, 0, 0, 0], 15 | [1, 0, 0, 0], 16 | [1, 0, 0, 0], 17 | [1, 1, 1, 1], 18 | ], 19 | A: [ 20 | [1, 1, 1, 1], 21 | [1, 0, 0, 1], 22 | [1, 1, 1, 1], 23 | [1, 0, 0, 1], 24 | [1, 0, 0, 1], 25 | ], 26 | U: [ 27 | [1, 0, 0, 1], 28 | [1, 0, 0, 1], 29 | [1, 0, 0, 1], 30 | [1, 0, 0, 1], 31 | [1, 1, 1, 1], 32 | ], 33 | D: [ 34 | [1, 1, 1, 0], 35 | [1, 0, 0, 1], 36 | [1, 0, 0, 1], 37 | [1, 0, 0, 1], 38 | [1, 1, 1, 0], 39 | ], 40 | I: [ // Fallback 41 | [1, 1, 1], 42 | [0, 1, 0], 43 | [0, 1, 0], 44 | [0, 1, 0], 45 | [1, 1, 1], 46 | ], 47 | }; 48 | 49 | const WORD = "CLAUD"; 50 | 51 | export const BlockLogo: React.FC = () => { 52 | return ( 53 |
54 | {/* Main Block Letters */} 55 |
56 | {WORD.split('').map((char, i) => ( 57 | 58 | ))} 59 |
60 | 61 | {/* Handwritten 'ish' suffix */} 62 |
63 | 64 | ish 65 | 66 |
67 |
68 |
69 | ); 70 | }; 71 | 72 | const Letter: React.FC<{ char: string }> = ({ char }) => { 73 | const grid = LETTERS[char] || LETTERS['I']; 74 | 75 | // Dimensions for blocks 76 | const blockSize = "w-2 h-2 md:w-[18px] md:h-[18px]"; 77 | const gapSize = "gap-[1px] md:gap-[2px]"; 78 | 79 | return ( 80 |
81 | {/* Shadow Layer (Offset Wireframe) */} 82 |