├── .gitignore ├── README.md ├── frontend ├── .gitignore ├── .npmrc ├── README.md ├── components.json ├── package-lock.json ├── package.json ├── postcss.config.cjs ├── src │ ├── app.d.ts │ ├── app.html │ ├── app.pcss │ ├── lib │ │ ├── components │ │ │ ├── ImportTasksModal.svelte │ │ │ ├── QuestionModal.svelte │ │ │ ├── TaskFormModal.svelte │ │ │ └── ui │ │ │ │ ├── badge │ │ │ │ ├── badge.svelte │ │ │ │ └── index.ts │ │ │ │ ├── button │ │ │ │ ├── button.svelte │ │ │ │ └── index.ts │ │ │ │ ├── card │ │ │ │ ├── card-content.svelte │ │ │ │ ├── card-description.svelte │ │ │ │ ├── card-footer.svelte │ │ │ │ ├── card-header.svelte │ │ │ │ ├── card-title.svelte │ │ │ │ ├── card.svelte │ │ │ │ └── index.ts │ │ │ │ ├── checkbox │ │ │ │ ├── checkbox.svelte │ │ │ │ └── index.ts │ │ │ │ ├── dialog │ │ │ │ ├── dialog-content.svelte │ │ │ │ ├── dialog-description.svelte │ │ │ │ ├── dialog-footer.svelte │ │ │ │ ├── dialog-header.svelte │ │ │ │ ├── dialog-overlay.svelte │ │ │ │ ├── dialog-portal.svelte │ │ │ │ ├── dialog-title.svelte │ │ │ │ └── index.ts │ │ │ │ ├── input │ │ │ │ ├── index.ts │ │ │ │ └── input.svelte │ │ │ │ ├── label │ │ │ │ ├── index.ts │ │ │ │ └── label.svelte │ │ │ │ ├── progress │ │ │ │ ├── index.ts │ │ │ │ └── progress.svelte │ │ │ │ ├── select │ │ │ │ ├── index.ts │ │ │ │ ├── select-content.svelte │ │ │ │ ├── select-group-heading.svelte │ │ │ │ ├── select-item.svelte │ │ │ │ ├── select-scroll-down-button.svelte │ │ │ │ ├── select-scroll-up-button.svelte │ │ │ │ ├── select-separator.svelte │ │ │ │ └── select-trigger.svelte │ │ │ │ ├── separator │ │ │ │ ├── index.ts │ │ │ │ └── separator.svelte │ │ │ │ └── textarea │ │ │ │ ├── index.ts │ │ │ │ └── textarea.svelte │ │ ├── index.ts │ │ ├── types.ts │ │ └── utils.ts │ └── routes │ │ ├── +layout.server.ts │ │ ├── +layout.svelte │ │ └── +page.svelte ├── static │ └── favicon.png ├── svelte.config.js ├── tailwind.config.js ├── tsconfig.json └── vite.config.ts ├── img └── ui.png ├── jest.config.js ├── package-lock.json ├── package.json ├── src ├── config │ ├── index.ts │ ├── migrations.sql │ └── schema.sql ├── index.ts ├── lib │ ├── dbUtils.ts │ ├── llmUtils.ts │ ├── logger.ts │ ├── repomixUtils.ts │ ├── utils.ts │ └── winstonLogger.ts ├── models │ └── types.ts ├── server.ts ├── services │ ├── aiService.ts │ ├── databaseService.ts │ ├── planningStateService.ts │ └── webSocketService.ts └── tools │ ├── adjustPlan.ts │ ├── markTaskComplete.ts │ ├── planFeature.ts │ └── reviewChanges.ts ├── tests ├── json-parser.test.ts ├── llmUtils.unit.test.ts ├── reviewChanges.integration.test.ts └── setupEnv.ts └── tsconfig.json /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | .env 4 | repomix-output.txt 5 | .mcp 6 | logs 7 | frontend/node_modules/ 8 | frontend/build/ 9 | frontend/.svelte-kit/ 10 | .DS_Store 11 | tsconfig.tsbuildinfo 12 | *.db 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Task Manager MCP Server 2 | 3 | This is an MCP server built to integrate with AI code editors like Cursor. The main goal here is to maximize Cursor's agentic capabilities and Gemini 2.5's excellent architecting capabilities while working around Cursor's extremely limited context window. This was inspired largely by Roo Code's Boomerang mode, but I found it extremely expensive as the only model that works with it's apply bot is Claude 3.7 Sonnet. With this server, you get the best of both worlds: unlimited context window and unlimited usage for the price of Cursor's $20/month subscription. 4 | 5 | In addition, it includes a Svelte UI that allows you to view the task list and progress, manually adjust the plan, and review the changes. 6 | 7 | ## Svelte UI 8 | 9 | ![Task List](./img/ui.png) 10 | 11 | ## Core Features 12 | 13 | - **Complex Feature Planning:** Give it a feature description, and it uses an LLM with project context via `repomix` to generate a step-by-step coding plan for the AI agent to follow with recursive task breakdown for high-effort tasks. 14 | - **Integrated UI Server:** Runs an Express server to serve static frontend files and provides basic API endpoints for the UI. Opens the UI in the default browser after planning is complete or when clarification is needed and displays the task list and progress. 15 | - **Unlimited Context Window:** Uses Gemini 2.5's 1 million token context window with `repomix`'s truncation when needed. 16 | - **Conversation History:** Keeps track of the conversation history for each feature in a separate JSON file within `.mcp/features/` for each feature, allowing Gemini 2.5 to have context when the user asks for adjustments to the plan. 17 | - **Clarification Workflow:** Handles cases where the LLM needs more info, pausing planning and interacting with a connected UI via WebSockets. 18 | - **Task CRUD:** Allows for creating, reading, updating, and deleting tasks via the UI. 19 | - **Code Review:** Analyzes `git diff HEAD` output using an LLM and creates new tasks if needed. 20 | - **Automatic Review (Optional):** If configured (`AUTO_REVIEW_ON_COMPLETION=true`), automatically runs the code review process after the last original task for a feature is completed. 21 | - **Plan Adjustment:** Allows for adjusting the plan after it's created via the `adjust_plan` tool. 22 | 23 | ## Setup 24 | 25 | ### Prerequisites: 26 | 27 | - Node.js 28 | - npm 29 | - Git 30 | 31 | ### Installation & Build: 32 | 33 | 1. **Clone:** 34 | 35 | ```bash 36 | git clone https://github.com/jhawkins11/task-manager-mcp.git 37 | cd task-manager-mcp 38 | ``` 39 | 40 | 2. **Install Backend Deps:** 41 | 42 | ```bash 43 | npm install 44 | ``` 45 | 46 | 3. **Configure:** You'll configure API keys later directly in Cursor's MCP settings (see Usage section), but you might still want a local `.env` file for manual testing (see Configuration section). 47 | 48 | 4. **Build:** This command builds the backend and frontend servers and copies the Svelte UI to the `dist/frontend-ui/` directory. 49 | ```bash 50 | npm run build 51 | ``` 52 | 53 | ### Running the Server (Manually): 54 | 55 | For local testing _without_ Cursor, you can run the server using Node directly or the npm script. This method **will** use the `.env` file for configuration. 56 | 57 | **Using Node directly (use absolute path):** 58 | 59 | ```bash 60 | node /full/path/to/your/task-manager-mcp/dist/server.js 61 | ``` 62 | 63 | **Using npm start:** 64 | 65 | ```bash 66 | npm start 67 | ``` 68 | 69 | This starts the MCP server (stdio), WebSocket server, and the HTTP server for the UI. The UI should be accessible at http://localhost: (default 3000). 70 | 71 | ### Configuration (.env file for Manual Running): 72 | 73 | If running manually (not via Cursor), create a .env file in the project root for API keys and ports. Note: When running via Cursor, these should be set in Cursor's mcp.json configuration instead (see Usage section). 74 | 75 | ```bash 76 | # .env - USED ONLY FOR MANUAL `npm start` or `node dist/server.js` 77 | # === OpenRouter (Recommended) === 78 | 79 | # Get key: https://openrouter.ai/keys 80 | OPENROUTER_API_KEY=sk-or-v1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 81 | OPENROUTER_MODEL=google/gemini-2.5-flash-preview:thinking 82 | FALLBACK_OPENROUTER_MODEL=google/gemini-2.5-flash-preview:thinking 83 | 84 | # === Google AI API (Alternative) === 85 | # GEMINI_API_KEY=your_google_ai_api_key 86 | # GEMINI_MODEL=gemini-1.5-flash-latest 87 | # FALLBACK_GEMINI_MODEL=gemini-1.5-flash-latest 88 | 89 | # === UI / WebSocket Ports === 90 | # Default is 4999 if not set. 91 | UI_PORT=4999 92 | WS_PORT=4999 93 | 94 | # === Auto Review === 95 | # If true, the agent will automatically run the 'review_changes' tool after the last task is completed. 96 | # Defaults to false. 97 | AUTO_REVIEW_ON_COMPLETION=false 98 | ``` 99 | 100 | ## Avoiding Costs 101 | 102 | **IMPORTANT:** It's highly recommended to integrate your own Google AI API key to OpenRouter to avoid the free models' rate limits. See below. 103 | 104 | **Using OpenRouter's Free Tiers:** You can significantly minimize or eliminate costs by using models marked as "Free" on OpenRouter (like google/gemini-2.5-flash-preview:thinking at the time of writing) while connecting your own Google AI API key. Check out this reddit thread for more info: https://www.reddit.com/r/ChatGPTCoding/comments/1jrp1tj/a_simple_guide_to_setting_up_gemini_25_pro_free/ 105 | 106 | **Fallback Costs:** The server automatically retries with a fallback model if the primary hits a rate limit. The default fallback (FALLBACK_OPENROUTER_MODEL) is often a faster/cheaper model like Gemini Flash, which might still have associated costs depending on OpenRouter's current pricing/tiers. Check their site and adjust the fallback model in your configuration if needed. 107 | 108 | ## Usage with Cursor (Task Manager Mode) 109 | 110 | This is the primary way this server is intended to be used. I have not yet tested it with other AI code editors yet. If you try it, please let me know how it goes, and I'll update the README. 111 | 112 | ### 1. Configure the MCP Server in Cursor: 113 | 114 | After building the server (`npm run build`), you need to tell Cursor how to run it. 115 | 116 | Find Cursor's MCP configuration file. This can be: 117 | 118 | - **Project-specific:** Create/edit a file at `.cursor/mcp.json` inside your project's root directory. 119 | - **Global:** Create/edit a file at `~/.cursor/mcp.json` in your user home directory (for use across all projects). 120 | 121 | Add the following entry to the mcpServers object within that JSON file: 122 | 123 | ```json 124 | { 125 | "mcpServers": { 126 | "task-manager-mcp": { 127 | "command": "node", 128 | "args": ["/full/path/to/your/task-manager-mcp/dist/server.js"], 129 | "env": { 130 | "OPENROUTER_API_KEY": "sk-or-v1-xxxxxxxxxxxxxxxxxxxx" 131 | // optional: my recommended model for MCP is Gemini 2.5 Pro Free which is already set by default 132 | // "OPENROUTER_MODEL": "google/gemini-2.5-flash-preview:thinking", 133 | // also optional 134 | // "FALLBACK_OPENROUTER_MODEL": "google/gemini-2.5-flash-preview:thinking", 135 | // optional: the default port for the UI is 4999 if not set 136 | // "UI_PORT": "4999", 137 | // optional: the default port for the WebSocket server is 4999 if not set 138 | // "WS_PORT": "4999" 139 | // Add GEMINI_API_KEY here instead if using Google directly 140 | // Add any other necessary env vars here 141 | } 142 | } 143 | // Add other MCP servers here if you have them 144 | } 145 | } 146 | ``` 147 | 148 | **IMPORTANT:** 149 | 150 | - Replace `/full/path/to/your/task-manager-mcp/dist/server.js` with the absolute path to the compiled server script on your machine. 151 | - Replace `sk-or-v1-xxxxxxxxxxxxxxxxxxxx` with your actual OpenRouter API key (or set GEMINI_API_KEY if using Google AI directly). 152 | - These env variables defined here will be passed to the server process when Cursor starts it, overriding any `.env` file. 153 | 154 | ### 2. Create a Custom Cursor Mode: 155 | 156 | 1. Go to Cursor Settings -> Features -> Chat -> Enable Custom modes. 157 | 2. Go back to the chat view, click the mode selector (bottom left), and click Add custom mode. 158 | 3. Give it a name (e.g., "MCP Planner", "Task Dev"), choose an icon/shortcut. 159 | 4. Enable Tools: Make sure the tools exposed by this server (`plan_feature`, `mark_task_complete`, `get_next_task`, `review_changes`, `adjust_plan`) are available and enabled for this mode. You might want to enable other tools like Codebase, Terminal, etc., depending on your workflow. 160 | 5. Recommended Instructions for Agent: Paste these rules exactly into the "Custom Instructions" text box: 161 | 162 | ``` 163 | Always use plan_feature mcp tool when getting feature request before doing anything else. ALWAYS!!!!!!!! It will return the first step of the implementation. DO NOT IMPLEMENT MORE THAN WHAT THE TASK STATES. After you're done run mark_task_complete which will give you the next task. If the user says "review" use the review_changes tool. The review_changes tool will generate new tasks for you to follow, just like plan_feature. After a review, follow the same one-at-a-time task completion workflow: complete each review-generated task, mark it complete, and call get_next_task until all are done. 164 | 165 | If clarification is required at any step, you will not receive the next task and will have to run get_next_task manually after the user answers the clarification question through the UI. 166 | 167 | IMPORTANT: Your job is to complete the tasks one at a time. DO NOT DO ANY OTHER CHANGES, ONLY WHAT THE CURRENT TASK SAYS TO DO. 168 | ``` 169 | 170 | 6. Save the custom mode. 171 | 172 | ## Expected Workflow (Using the Custom Mode): 173 | 174 | 1. Select your new custom mode in Cursor. 175 | 2. Give Cursor a feature request (e.g., "add auth using JWT"). 176 | 3. Cursor, following the instructions, will call the `plan_feature` tool. 177 | 4. The server plans, saves data, and returns a JSON response (inside the text content) to Cursor. 178 | - If successful: The response includes `status: "completed"` and the description of the first task in the `message` field. The UI (if running) is launched/updated. 179 | - If clarification needed: The response includes `status: "awaiting_clarification"`, the `featureId`, the `uiUrl`, and instructions for the agent to wait and call `get_next_task` later. The UI is launched/updated with the question. 180 | 5. Cursor implements only the task described (if provided). 181 | 6. If clarification was needed, the user answers in the UI, the server resumes planning, and updates the UI via WebSocket. The agent, following instructions, then calls `get_next_task` with the `featureId`. 182 | 7. If a task was completed, Cursor calls `mark_task_complete` (with `taskId` and `featureId`). 183 | 8. The server marks the task done and returns the next pending task in the response message. 184 | 9. Cursor repeats steps 4-8. 185 | 10. If the user asks Cursor to "review", it calls `review_changes`. 186 | 187 | ## API Endpoints (for UI) 188 | 189 | The integrated Express server provides these basic endpoints for the frontend: 190 | 191 | - `GET /api/features`: Returns a list of existing feature IDs. 192 | - `GET /api/tasks/:featureId`: Returns the list of tasks for a specific feature. 193 | - `GET /api/tasks`: Returns tasks for the most recently created/modified feature. 194 | - `GET /api/features/:featureId/pending-question`: Checks if there's an active clarification question for the feature. 195 | - `POST /api/tasks`: Creates a new task for a feature. 196 | - `PUT /api/tasks/:taskId`: Updates an existing task. 197 | - `DELETE /api/tasks/:taskId`: Deletes a task. 198 | - _(Static Files)_: Serves files from `dist/frontend-ui/` (e.g., `index.html`). 199 | -------------------------------------------------------------------------------- /frontend/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | 3 | # Output 4 | .output 5 | .vercel 6 | .netlify 7 | .wrangler 8 | /.svelte-kit 9 | /build 10 | 11 | # OS 12 | .DS_Store 13 | Thumbs.db 14 | 15 | # Env 16 | .env 17 | .env.* 18 | !.env.example 19 | !.env.test 20 | 21 | # Vite 22 | vite.config.js.timestamp-* 23 | vite.config.ts.timestamp-* 24 | -------------------------------------------------------------------------------- /frontend/.npmrc: -------------------------------------------------------------------------------- 1 | engine-strict=true 2 | -------------------------------------------------------------------------------- /frontend/README.md: -------------------------------------------------------------------------------- 1 | # sv 2 | 3 | Everything you need to build a Svelte project, powered by [`sv`](https://github.com/sveltejs/cli). 4 | 5 | ## Creating a project 6 | 7 | If you're seeing this, you've probably already done this step. Congrats! 8 | 9 | ```bash 10 | # create a new project in the current directory 11 | npx sv create 12 | 13 | # create a new project in my-app 14 | npx sv create my-app 15 | ``` 16 | 17 | ## Developing 18 | 19 | Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: 20 | 21 | ```bash 22 | npm run dev 23 | 24 | # or start the server and open the app in a new browser tab 25 | npm run dev -- --open 26 | ``` 27 | 28 | ## Building 29 | 30 | To create a production version of your app: 31 | 32 | ```bash 33 | npm run build 34 | ``` 35 | 36 | You can preview the production build with `npm run preview`. 37 | 38 | > To deploy your app, you may need to install an [adapter](https://svelte.dev/docs/kit/adapters) for your target environment. 39 | -------------------------------------------------------------------------------- /frontend/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://shadcn-svelte.com/schema.json", 3 | "style": "default", 4 | "tailwind": { 5 | "config": "tailwind.config.js", 6 | "css": "src/app.pcss", 7 | "baseColor": "slate" 8 | }, 9 | "aliases": { 10 | "components": "$lib/components", 11 | "utils": "$lib/utils", 12 | "ui": "$lib/components/ui", 13 | "hooks": "$lib/hooks" 14 | }, 15 | "typescript": true, 16 | "registry": "https://next.shadcn-svelte.com/registry" 17 | } 18 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend", 3 | "private": true, 4 | "version": "0.0.1", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite dev", 8 | "build": "vite build", 9 | "preview": "vite preview", 10 | "prepare": "svelte-kit sync || echo ''", 11 | "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", 12 | "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch" 13 | }, 14 | "devDependencies": { 15 | "@lucide/svelte": "^0.488.0", 16 | "@sveltejs/adapter-auto": "^4.0.0", 17 | "@sveltejs/adapter-static": "^3.0.8", 18 | "@sveltejs/kit": "^2.16.0", 19 | "@sveltejs/vite-plugin-svelte": "^5.0.0", 20 | "autoprefixer": "^10.4.21", 21 | "bits-ui": "^0.22.0", 22 | "clsx": "^2.1.1", 23 | "lucide-svelte": "^0.488.0", 24 | "postcss": "^8.5.3", 25 | "svelte": "^5.0.0", 26 | "svelte-check": "^4.0.0", 27 | "tailwind-merge": "^3.2.0", 28 | "tailwind-variants": "^1.0.0", 29 | "tailwindcss": "^3.4.17", 30 | "typescript": "^5.0.0", 31 | "vite": "^6.2.5" 32 | }, 33 | "dependencies": { 34 | "shadcn-svelte": "^1.0.0-next.9" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /frontend/postcss.config.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /frontend/src/app.d.ts: -------------------------------------------------------------------------------- 1 | // See https://svelte.dev/docs/kit/types#app.d.ts 2 | // for information about these interfaces 3 | declare global { 4 | namespace App { 5 | // interface Error {} 6 | // interface Locals {} 7 | // interface PageData {} 8 | // interface PageState {} 9 | // interface Platform {} 10 | } 11 | } 12 | 13 | export {}; 14 | -------------------------------------------------------------------------------- /frontend/src/app.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | %sveltekit.head% 8 | 9 | 10 |
%sveltekit.body%
11 | 12 | 13 | -------------------------------------------------------------------------------- /frontend/src/app.pcss: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap'); 2 | 3 | @tailwind base; 4 | @tailwind components; 5 | @tailwind utilities; 6 | 7 | @layer base { 8 | :root { 9 | --background: 0 0% 100%; 10 | --foreground: 222.2 84% 4.9%; 11 | 12 | --muted: 210 40% 96.1%; 13 | --muted-foreground: 215.4 16.3% 46.9%; 14 | 15 | --popover: 0 0% 100%; 16 | --popover-foreground: 222.2 84% 4.9%; 17 | 18 | --card: 0 0% 100%; 19 | --card-foreground: 222.2 84% 4.9%; 20 | 21 | --border: 214.3 31.8% 91.4%; 22 | --input: 214.3 31.8% 91.4%; 23 | 24 | --primary: 222.2 47.4% 11.2%; 25 | --primary-foreground: 210 40% 98%; 26 | 27 | --secondary: 210 40% 96.1%; 28 | --secondary-foreground: 222.2 47.4% 11.2%; 29 | 30 | --accent: 210 40% 96.1%; 31 | --accent-foreground: 222.2 47.4% 11.2%; 32 | 33 | --destructive: 0 72.2% 50.6%; 34 | --destructive-foreground: 210 40% 98%; 35 | 36 | --ring: 222.2 84% 4.9%; 37 | 38 | --radius: 0.5rem; 39 | } 40 | 41 | .dark { 42 | --background: 222.2 84% 4.9%; 43 | --foreground: 210 40% 98%; 44 | 45 | --muted: 217.2 32.6% 17.5%; 46 | --muted-foreground: 215 20.2% 65.1%; 47 | 48 | --popover: 222.2 84% 4.9%; 49 | --popover-foreground: 210 40% 98%; 50 | 51 | --card: 222.2 84% 4.9%; 52 | --card-foreground: 210 40% 98%; 53 | 54 | --border: 217.2 32.6% 17.5%; 55 | --input: 217.2 32.6% 17.5%; 56 | 57 | --primary: 210 40% 98%; 58 | --primary-foreground: 222.2 47.4% 11.2%; 59 | 60 | --secondary: 217.2 32.6% 17.5%; 61 | --secondary-foreground: 210 40% 98%; 62 | 63 | --accent: 217.2 32.6% 17.5%; 64 | --accent-foreground: 210 40% 98%; 65 | 66 | --destructive: 0 62.8% 30.6%; 67 | --destructive-foreground: 210 40% 98%; 68 | 69 | --ring: 212.7 26.8% 83.9%; 70 | } 71 | } 72 | 73 | @layer base { 74 | * { 75 | @apply border-border; 76 | } 77 | body { 78 | @apply bg-background text-foreground; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ImportTasksModal.svelte: -------------------------------------------------------------------------------- 1 | 51 | 52 | 53 | 54 | 55 | Import Tasks 56 | 57 | Paste a list of tasks generated by a chatbot, following the format below. 58 | 59 | 60 |
61 |
62 | 63 |
64 | 86 |
87 | {/if} 88 | 89 | 90 | 91 | 98 | 99 | 106 | 107 | 108 |
109 | 110 | -------------------------------------------------------------------------------- /frontend/src/lib/components/TaskFormModal.svelte: -------------------------------------------------------------------------------- 1 | 56 | 57 | 58 | 59 | 60 | {isEditing ? 'Edit Task' : 'Add New Task'} 61 | 62 | {isEditing ? 'Update task title and effort.' : 'Create a new task for this feature.'} 63 | 64 | 65 | 66 |
67 |
68 |
69 |
70 | 71 | 72 |
73 | 74 |
75 | 76 | 85 |
86 |
87 | 88 | 89 | 90 | 97 | 98 | 105 | 106 |
107 |
108 |
109 |
-------------------------------------------------------------------------------- /frontend/src/lib/components/ui/badge/badge.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/badge/index.ts: -------------------------------------------------------------------------------- 1 | import { type VariantProps, tv } from "tailwind-variants"; 2 | export { default as Badge } from "./badge.svelte"; 3 | 4 | export const badgeVariants = tv({ 5 | base: "focus:ring-ring inline-flex select-none items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-offset-2", 6 | variants: { 7 | variant: { 8 | default: "bg-primary text-primary-foreground hover:bg-primary/80 border-transparent", 9 | secondary: 10 | "bg-secondary text-secondary-foreground hover:bg-secondary/80 border-transparent", 11 | destructive: 12 | "bg-destructive text-destructive-foreground hover:bg-destructive/80 border-transparent", 13 | outline: "text-foreground", 14 | }, 15 | }, 16 | defaultVariants: { 17 | variant: "default", 18 | }, 19 | }); 20 | 21 | export type Variant = VariantProps["variant"]; 22 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/button/button.svelte: -------------------------------------------------------------------------------- 1 | 15 | 16 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/button/index.ts: -------------------------------------------------------------------------------- 1 | import { type VariantProps, tv } from "tailwind-variants"; 2 | import type { Button as ButtonPrimitive } from "bits-ui"; 3 | import Root from "./button.svelte"; 4 | 5 | const buttonVariants = tv({ 6 | base: "ring-offset-background focus-visible:ring-ring inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50", 7 | variants: { 8 | variant: { 9 | default: "bg-primary text-primary-foreground hover:bg-primary/90", 10 | destructive: "bg-destructive text-destructive-foreground hover:bg-destructive/90", 11 | outline: 12 | "border-input bg-background hover:bg-accent hover:text-accent-foreground border", 13 | secondary: "bg-secondary text-secondary-foreground hover:bg-secondary/80", 14 | ghost: "hover:bg-accent hover:text-accent-foreground", 15 | link: "text-primary underline-offset-4 hover:underline", 16 | }, 17 | size: { 18 | default: "h-10 px-4 py-2", 19 | sm: "h-9 rounded-md px-3", 20 | lg: "h-11 rounded-md px-8", 21 | icon: "h-10 w-10", 22 | }, 23 | }, 24 | defaultVariants: { 25 | variant: "default", 26 | size: "default", 27 | }, 28 | }); 29 | 30 | type Variant = VariantProps["variant"]; 31 | type Size = VariantProps["size"]; 32 | 33 | type Props = ButtonPrimitive.Props & { 34 | variant?: Variant; 35 | size?: Size; 36 | }; 37 | 38 | type Events = ButtonPrimitive.Events; 39 | 40 | export { 41 | Root, 42 | type Props, 43 | type Events, 44 | // 45 | Root as Button, 46 | type Props as ButtonProps, 47 | type Events as ButtonEvents, 48 | buttonVariants, 49 | }; 50 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/card-content.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |
12 | 13 |
14 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/card-description.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |

12 | 13 |

14 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/card-footer.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |
12 | 13 |
14 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/card-header.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |
12 | 13 |
14 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/card-title.svelte: -------------------------------------------------------------------------------- 1 | 14 | 15 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/card.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |
15 | 16 |
17 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/card/index.ts: -------------------------------------------------------------------------------- 1 | import Root from "./card.svelte"; 2 | import Content from "./card-content.svelte"; 3 | import Description from "./card-description.svelte"; 4 | import Footer from "./card-footer.svelte"; 5 | import Header from "./card-header.svelte"; 6 | import Title from "./card-title.svelte"; 7 | 8 | export { 9 | Root, 10 | Content, 11 | Description, 12 | Footer, 13 | Header, 14 | Title, 15 | // 16 | Root as Card, 17 | Content as CardContent, 18 | Description as CardDescription, 19 | Footer as CardFooter, 20 | Header as CardHeader, 21 | Title as CardTitle, 22 | }; 23 | 24 | export type HeadingLevel = "h1" | "h2" | "h3" | "h4" | "h5" | "h6"; 25 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/checkbox/checkbox.svelte: -------------------------------------------------------------------------------- 1 | 15 | 16 | 26 | {#snippet children({ checked, indeterminate })} 27 |
28 | {#if indeterminate} 29 | 30 | {:else} 31 | 32 | {/if} 33 |
34 | {/snippet} 35 |
36 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/checkbox/index.ts: -------------------------------------------------------------------------------- 1 | import Root from "./checkbox.svelte"; 2 | export { 3 | Root, 4 | // 5 | Root as Checkbox, 6 | }; 7 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-content.svelte: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 28 | 29 | 32 | 33 | Close 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-description.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-footer.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |
15 | 16 |
17 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-header.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 |
12 | 13 |
14 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-overlay.svelte: -------------------------------------------------------------------------------- 1 | 15 | 16 | 22 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-portal.svelte: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/dialog-title.svelte: -------------------------------------------------------------------------------- 1 | 10 | 11 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/dialog/index.ts: -------------------------------------------------------------------------------- 1 | import { Dialog as DialogPrimitive } from "bits-ui"; 2 | 3 | import Title from "./dialog-title.svelte"; 4 | import Portal from "./dialog-portal.svelte"; 5 | import Footer from "./dialog-footer.svelte"; 6 | import Header from "./dialog-header.svelte"; 7 | import Overlay from "./dialog-overlay.svelte"; 8 | import Content from "./dialog-content.svelte"; 9 | import Description from "./dialog-description.svelte"; 10 | 11 | const Root = DialogPrimitive.Root; 12 | const Trigger = DialogPrimitive.Trigger; 13 | const Close = DialogPrimitive.Close; 14 | 15 | export { 16 | Root, 17 | Title, 18 | Portal, 19 | Footer, 20 | Header, 21 | Trigger, 22 | Overlay, 23 | Content, 24 | Description, 25 | Close, 26 | // 27 | Root as Dialog, 28 | Title as DialogTitle, 29 | Portal as DialogPortal, 30 | Footer as DialogFooter, 31 | Header as DialogHeader, 32 | Trigger as DialogTrigger, 33 | Overlay as DialogOverlay, 34 | Content as DialogContent, 35 | Description as DialogDescription, 36 | Close as DialogClose, 37 | }; 38 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/input/index.ts: -------------------------------------------------------------------------------- 1 | import Root from "./input.svelte"; 2 | 3 | export type FormInputEvent = T & { 4 | currentTarget: EventTarget & HTMLInputElement; 5 | }; 6 | export type InputEvents = { 7 | blur: FormInputEvent; 8 | change: FormInputEvent; 9 | click: FormInputEvent; 10 | focus: FormInputEvent; 11 | focusin: FormInputEvent; 12 | focusout: FormInputEvent; 13 | keydown: FormInputEvent; 14 | keypress: FormInputEvent; 15 | keyup: FormInputEvent; 16 | mouseover: FormInputEvent; 17 | mouseenter: FormInputEvent; 18 | mouseleave: FormInputEvent; 19 | mousemove: FormInputEvent; 20 | paste: FormInputEvent; 21 | input: FormInputEvent; 22 | wheel: FormInputEvent; 23 | }; 24 | 25 | export { 26 | Root, 27 | // 28 | Root as Input, 29 | }; 30 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/input/input.svelte: -------------------------------------------------------------------------------- 1 | 17 | 18 | 43 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/label/index.ts: -------------------------------------------------------------------------------- 1 | import Root from "./label.svelte"; 2 | 3 | export { 4 | Root, 5 | // 6 | Root as Label, 7 | }; 8 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/label/label.svelte: -------------------------------------------------------------------------------- 1 | 11 | 12 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/progress/index.ts: -------------------------------------------------------------------------------- 1 | import Root from "./progress.svelte"; 2 | 3 | export { 4 | Root, 5 | // 6 | Root as Progress, 7 | }; 8 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/progress/progress.svelte: -------------------------------------------------------------------------------- 1 | 13 | 14 | 21 |
25 |
26 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/index.ts: -------------------------------------------------------------------------------- 1 | import { Select as SelectPrimitive } from "bits-ui"; 2 | 3 | import GroupHeading from "./select-group-heading.svelte"; 4 | import Item from "./select-item.svelte"; 5 | import Content from "./select-content.svelte"; 6 | import Trigger from "./select-trigger.svelte"; 7 | import Separator from "./select-separator.svelte"; 8 | import ScrollDownButton from "./select-scroll-down-button.svelte"; 9 | import ScrollUpButton from "./select-scroll-up-button.svelte"; 10 | 11 | const Root = SelectPrimitive.Root; 12 | const Group = SelectPrimitive.Group; 13 | 14 | export { 15 | Root, 16 | Group, 17 | GroupHeading, 18 | Item, 19 | Content, 20 | Trigger, 21 | Separator, 22 | ScrollDownButton, 23 | ScrollUpButton, 24 | // 25 | Root as Select, 26 | Group as SelectGroup, 27 | GroupHeading as SelectGroupHeading, 28 | Item as SelectItem, 29 | Content as SelectContent, 30 | Trigger as SelectTrigger, 31 | Separator as SelectSeparator, 32 | ScrollDownButton as SelectScrollDownButton, 33 | ScrollUpButton as SelectScrollUpButton, 34 | }; 35 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-content.svelte: -------------------------------------------------------------------------------- 1 | 18 | 19 | 20 | 29 | 30 | 35 | {@render children?.()} 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-group-heading.svelte: -------------------------------------------------------------------------------- 1 | 11 | 12 | 17 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-item.svelte: -------------------------------------------------------------------------------- 1 | 15 | 16 | 25 | {#snippet children({ selected, highlighted })} 26 | 27 | {#if selected} 28 | 29 | {/if} 30 | 31 | {#if childrenProp} 32 | {@render childrenProp({ selected, highlighted })} 33 | {:else} 34 | {label || value} 35 | {/if} 36 | {/snippet} 37 | 38 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-scroll-down-button.svelte: -------------------------------------------------------------------------------- 1 | 12 | 13 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-scroll-up-button.svelte: -------------------------------------------------------------------------------- 1 | 12 | 13 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-separator.svelte: -------------------------------------------------------------------------------- 1 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/select/select-trigger.svelte: -------------------------------------------------------------------------------- 1 | 13 | 14 | span]:line-clamp-1", 18 | className 19 | )} 20 | {...restProps} 21 | > 22 | {@render children?.()} 23 | 24 | 25 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/separator/index.ts: -------------------------------------------------------------------------------- 1 | import Root from "./separator.svelte"; 2 | 3 | export { 4 | Root, 5 | // 6 | Root as Separator, 7 | }; 8 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/separator/separator.svelte: -------------------------------------------------------------------------------- 1 | 12 | 13 | 23 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/textarea/index.ts: -------------------------------------------------------------------------------- 1 | import Root from './textarea.svelte' 2 | 3 | type FormTextareaEvent = T & { 4 | currentTarget: EventTarget & HTMLTextAreaElement 5 | } 6 | 7 | type TextareaEvents = { 8 | blur: FormTextareaEvent 9 | change: FormTextareaEvent 10 | click: FormTextareaEvent 11 | focus: FormTextareaEvent 12 | keydown: FormTextareaEvent 13 | keypress: FormTextareaEvent 14 | keyup: FormTextareaEvent 15 | mouseover: FormTextareaEvent 16 | mouseenter: FormTextareaEvent 17 | mouseleave: FormTextareaEvent 18 | paste: FormTextareaEvent 19 | input: FormTextareaEvent 20 | } 21 | 22 | export { 23 | Root, 24 | // 25 | Root as Textarea, 26 | type TextareaEvents, 27 | type FormTextareaEvent, 28 | } 29 | 30 | export default Root 31 | -------------------------------------------------------------------------------- /frontend/src/lib/components/ui/textarea/textarea.svelte: -------------------------------------------------------------------------------- 1 | 17 | 18 | 39 | -------------------------------------------------------------------------------- /frontend/src/lib/index.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhawkins11/task-manager-mcp/7e1c44136b13852d8dc62ddc37ab0e32a934b3ff/frontend/src/lib/index.ts -------------------------------------------------------------------------------- /frontend/src/lib/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Task interface mirroring the backend structure 3 | */ 4 | export interface Task { 5 | id: string 6 | title: string 7 | description?: string 8 | status: TaskStatus 9 | completed: boolean 10 | effort: 'low' | 'medium' | 'high' 11 | feature_id?: string 12 | parentTaskId?: string 13 | createdAt?: string 14 | updatedAt?: string 15 | children?: Task[] 16 | fromReview?: boolean 17 | } 18 | 19 | /** 20 | * Feature interface for grouping tasks 21 | */ 22 | export interface Feature { 23 | id: string 24 | title: string 25 | description: string 26 | tasks?: Task[] 27 | createdAt?: string 28 | updatedAt?: string 29 | } 30 | 31 | /** 32 | * Task status enum for type safety 33 | */ 34 | export enum TaskStatus { 35 | PENDING = 'pending', 36 | IN_PROGRESS = 'in_progress', 37 | COMPLETED = 'completed', 38 | DECOMPOSED = 'decomposed', 39 | } 40 | 41 | /** 42 | * Task effort enum for type safety 43 | */ 44 | export enum TaskEffort { 45 | LOW = 'low', 46 | MEDIUM = 'medium', 47 | HIGH = 'high', 48 | } 49 | 50 | // --- Frontend Specific Types --- 51 | 52 | // Mirror the backend WebSocket message structure 53 | export type WebSocketMessageType = 54 | | 'tasks_updated' 55 | | 'status_changed' 56 | | 'show_question' 57 | | 'question_response' 58 | | 'request_screenshot' 59 | | 'request_screenshot_ack' 60 | | 'error' 61 | | 'connection_established' 62 | | 'client_registration' 63 | | 'task_created' 64 | | 'task_updated' 65 | | 'task_deleted' 66 | 67 | export interface WebSocketMessage { 68 | type: WebSocketMessageType 69 | featureId?: string 70 | payload?: any // Keep payload generic for now, specific handlers will parse 71 | } 72 | 73 | // Interface for clarification question payload 74 | export interface ShowQuestionPayload { 75 | questionId: string 76 | question: string 77 | options?: string[] 78 | allowsText?: boolean 79 | } 80 | 81 | // Interface for user's response to a clarification question 82 | export interface QuestionResponsePayload { 83 | questionId: string 84 | response: string 85 | } 86 | 87 | // Interface for task created event 88 | export interface TaskCreatedPayload { 89 | task: Task 90 | featureId: string 91 | createdAt: string 92 | } 93 | 94 | // Interface for task updated event 95 | export interface TaskUpdatedPayload { 96 | task: Task 97 | featureId: string 98 | updatedAt: string 99 | } 100 | 101 | // Interface for task deleted event 102 | export interface TaskDeletedPayload { 103 | taskId: string 104 | featureId: string 105 | deletedAt: string 106 | } 107 | -------------------------------------------------------------------------------- /frontend/src/lib/utils.ts: -------------------------------------------------------------------------------- 1 | import { type ClassValue, clsx } from "clsx"; 2 | import { twMerge } from "tailwind-merge"; 3 | import { cubicOut } from "svelte/easing"; 4 | import type { TransitionConfig } from "svelte/transition"; 5 | 6 | export function cn(...inputs: ClassValue[]) { 7 | return twMerge(clsx(inputs)); 8 | } 9 | 10 | type FlyAndScaleParams = { 11 | y?: number; 12 | x?: number; 13 | start?: number; 14 | duration?: number; 15 | }; 16 | 17 | export const flyAndScale = ( 18 | node: Element, 19 | params: FlyAndScaleParams = { y: -8, x: 0, start: 0.95, duration: 150 } 20 | ): TransitionConfig => { 21 | const style = getComputedStyle(node); 22 | const transform = style.transform === "none" ? "" : style.transform; 23 | 24 | const scaleConversion = ( 25 | valueA: number, 26 | scaleA: [number, number], 27 | scaleB: [number, number] 28 | ) => { 29 | const [minA, maxA] = scaleA; 30 | const [minB, maxB] = scaleB; 31 | 32 | const percentage = (valueA - minA) / (maxA - minA); 33 | const valueB = percentage * (maxB - minB) + minB; 34 | 35 | return valueB; 36 | }; 37 | 38 | const styleToString = ( 39 | style: Record 40 | ): string => { 41 | return Object.keys(style).reduce((str, key) => { 42 | if (style[key] === undefined) return str; 43 | return str + `${key}:${style[key]};`; 44 | }, ""); 45 | }; 46 | 47 | return { 48 | duration: params.duration ?? 200, 49 | delay: 0, 50 | css: (t) => { 51 | const y = scaleConversion(t, [0, 1], [params.y ?? 5, 0]); 52 | const x = scaleConversion(t, [0, 1], [params.x ?? 0, 0]); 53 | const scale = scaleConversion(t, [0, 1], [params.start ?? 0.95, 1]); 54 | 55 | return styleToString({ 56 | transform: `${transform} translate3d(${x}px, ${y}px, 0) scale(${scale})`, 57 | opacity: t 58 | }); 59 | }, 60 | easing: cubicOut 61 | }; 62 | }; -------------------------------------------------------------------------------- /frontend/src/routes/+layout.server.ts: -------------------------------------------------------------------------------- 1 | // Enforces static prerendering for the entire site 2 | export const prerender = true 3 | -------------------------------------------------------------------------------- /frontend/src/routes/+layout.svelte: -------------------------------------------------------------------------------- 1 | 34 | 35 |
36 |
37 | 38 |
39 |
-------------------------------------------------------------------------------- /frontend/static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhawkins11/task-manager-mcp/7e1c44136b13852d8dc62ddc37ab0e32a934b3ff/frontend/static/favicon.png -------------------------------------------------------------------------------- /frontend/svelte.config.js: -------------------------------------------------------------------------------- 1 | import adapter from '@sveltejs/adapter-static' 2 | import { vitePreprocess } from '@sveltejs/vite-plugin-svelte' 3 | 4 | /** @type {import('@sveltejs/kit').Config} */ 5 | const config = { 6 | // Consult https://svelte.dev/docs/kit/integrations 7 | // for more information about preprocessors 8 | preprocess: vitePreprocess(), 9 | 10 | kit: { 11 | // Using adapter-static to output a static site build 12 | adapter: adapter({ 13 | // Output to the default build folder 14 | pages: 'build', 15 | assets: 'build', 16 | precompress: false, 17 | }), 18 | }, 19 | } 20 | 21 | export default config 22 | -------------------------------------------------------------------------------- /frontend/tailwind.config.js: -------------------------------------------------------------------------------- 1 | import { fontFamily } from 'tailwindcss/defaultTheme' 2 | 3 | /** @type {import('tailwindcss').Config} */ 4 | const config = { 5 | darkMode: ['class'], 6 | content: ['./src/**/*.{html,js,svelte,ts}'], 7 | safelist: ['dark'], 8 | theme: { 9 | container: { 10 | center: true, 11 | padding: '2rem', 12 | screens: { 13 | '2xl': '1400px', 14 | }, 15 | }, 16 | extend: { 17 | colors: { 18 | border: 'hsl(var(--border) / )', 19 | input: 'hsl(var(--input) / )', 20 | ring: 'hsl(var(--ring) / )', 21 | background: 'hsl(var(--background) / )', 22 | foreground: 'hsl(var(--foreground) / )', 23 | primary: { 24 | DEFAULT: 'hsl(var(--primary) / )', 25 | foreground: 'hsl(var(--primary-foreground) / )', 26 | }, 27 | secondary: { 28 | DEFAULT: 'hsl(var(--secondary) / )', 29 | foreground: 'hsl(var(--secondary-foreground) / )', 30 | }, 31 | destructive: { 32 | DEFAULT: 'hsl(var(--destructive) / )', 33 | foreground: 'hsl(var(--destructive-foreground) / )', 34 | }, 35 | muted: { 36 | DEFAULT: 'hsl(var(--muted) / )', 37 | foreground: 'hsl(var(--muted-foreground) / )', 38 | }, 39 | accent: { 40 | DEFAULT: 'hsl(var(--accent) / )', 41 | foreground: 'hsl(var(--accent-foreground) / )', 42 | }, 43 | popover: { 44 | DEFAULT: 'hsl(var(--popover) / )', 45 | foreground: 'hsl(var(--popover-foreground) / )', 46 | }, 47 | card: { 48 | DEFAULT: 'hsl(var(--card) / )', 49 | foreground: 'hsl(var(--card-foreground) / )', 50 | }, 51 | }, 52 | borderRadius: { 53 | lg: 'var(--radius)', 54 | md: 'calc(var(--radius) - 2px)', 55 | sm: 'calc(var(--radius) - 4px)', 56 | }, 57 | fontFamily: { 58 | sans: [...fontFamily.sans], 59 | }, 60 | }, 61 | }, 62 | } 63 | 64 | export default config 65 | -------------------------------------------------------------------------------- /frontend/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./.svelte-kit/tsconfig.json", 3 | "compilerOptions": { 4 | "allowJs": true, 5 | "checkJs": true, 6 | "esModuleInterop": true, 7 | "forceConsistentCasingInFileNames": true, 8 | "resolveJsonModule": true, 9 | "skipLibCheck": true, 10 | "sourceMap": true, 11 | "strict": true, 12 | "moduleResolution": "bundler" 13 | } 14 | // Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias 15 | // except $lib which is handled by https://svelte.dev/docs/kit/configuration#files 16 | // 17 | // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes 18 | // from the referenced tsconfig.json - TypeScript does not merge them in 19 | } 20 | -------------------------------------------------------------------------------- /frontend/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { sveltekit } from '@sveltejs/kit/vite'; 2 | import { defineConfig } from 'vite'; 3 | 4 | export default defineConfig({ 5 | plugins: [sveltekit()] 6 | }); 7 | -------------------------------------------------------------------------------- /img/ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhawkins11/task-manager-mcp/7e1c44136b13852d8dc62ddc37ab0e32a934b3ff/img/ui.png -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | preset: 'ts-jest', 3 | testEnvironment: 'node', 4 | moduleNameMapper: { 5 | '^../services/aiService$': '/src/services/aiService', 6 | '^../models/types$': '/src/models/types', 7 | '^../lib/logger$': '/src/lib/logger', 8 | '^../lib/llmUtils$': '/src/lib/llmUtils', 9 | '^../lib/repomixUtils$': '/src/lib/repomixUtils', 10 | '^../lib/dbUtils$': '/src/lib/dbUtils', 11 | '^../config$': '/src/config', 12 | '^../services/databaseService$': '/src/services/databaseService', 13 | }, 14 | setupFiles: ['/tests/setupEnv.ts'], 15 | } 16 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "task-manager-mcp", 3 | "version": "1.1.0", 4 | "main": "dist/server.js", 5 | "scripts": { 6 | "build": "npm run build:frontend && npm run build:server", 7 | "build:server": "tsc && mkdir -p dist/config && cp src/config/*.sql dist/config/", 8 | "build:frontend": "cd frontend && npm run build && cd .. && mkdir -p dist/frontend-ui && cp -r frontend/build/* dist/frontend-ui/", 9 | "start": "node dist/server.js", 10 | "dev": "nodemon --watch src --ext ts --exec ts-node src/server.ts", 11 | "test": "jest" 12 | }, 13 | "keywords": [], 14 | "author": "", 15 | "license": "ISC", 16 | "description": "", 17 | "dependencies": { 18 | "@google/generative-ai": "^0.24.0", 19 | "@modelcontextprotocol/sdk": "^1.9.0", 20 | "@openrouter/ai-sdk-provider": "^0.4.5", 21 | "@types/express": "^4.17.21", 22 | "dotenv": "^16.5.0", 23 | "express": "^4.21.2", 24 | "open": "^8.4.2", 25 | "openai": "^4.94.0", 26 | "sqlite3": "^5.1.7", 27 | "svelte": "^5.27.1", 28 | "tiktoken": "^1.0.20", 29 | "winston": "^3.17.0", 30 | "winston-daily-rotate-file": "^5.0.0", 31 | "ws": "^8.16.0", 32 | "zod": "^3.24.2" 33 | }, 34 | "devDependencies": { 35 | "@sveltejs/adapter-auto": "^6.0.0", 36 | "@sveltejs/kit": "^2.20.7", 37 | "@types/jest": "^29.5.14", 38 | "@types/node": "^22.14.1", 39 | "@types/ws": "^8.5.10", 40 | "jest": "^29.7.0", 41 | "nodemon": "^3.1.9", 42 | "ts-jest": "^29.3.2", 43 | "ts-node": "^10.9.2", 44 | "typescript": "^5.8.3" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/config/index.ts: -------------------------------------------------------------------------------- 1 | // Load environment variables from .env file 2 | import * as dotenv from 'dotenv' 3 | import path from 'path' 4 | 5 | // Load env vars as early as possible 6 | dotenv.config() 7 | 8 | // --- Configuration --- 9 | const FEATURE_TASKS_DIR = path.resolve(__dirname, '../../.mcp', 'features') // Directory for feature-specific task files 10 | const SQLITE_DB_PATH = 11 | process.env.SQLITE_DB_PATH || 12 | path.resolve(__dirname, '../../data/taskmanager.db') // Path to SQLite database file 13 | const GEMINI_API_KEY = process.env.GEMINI_API_KEY 14 | const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY 15 | const GEMINI_MODEL = 16 | process.env.GEMINI_MODEL || 'gemini-2.5-flash-preview-04-17' // Default model 17 | const OPENROUTER_MODEL = 18 | process.env.OPENROUTER_MODEL || 'google/gemini-2.5-flash-preview:thinking' 19 | const FALLBACK_OPENROUTER_MODEL = 20 | process.env.FALLBACK_OPENROUTER_MODEL || 'google/gemini-2.0-flash-001' 21 | const FALLBACK_GEMINI_MODEL = 22 | process.env.FALLBACK_GEMINI_MODEL || 'gemini-2.0-flash-001' 23 | const REVIEW_LLM_API_KEY = process.env.REVIEW_LLM_API_KEY || GEMINI_API_KEY 24 | 25 | // Logging configuration 26 | type LogLevel = 'debug' | 'info' | 'warn' | 'error' 27 | const LOG_LEVEL = (process.env.LOG_LEVEL?.toLowerCase() as LogLevel) || 'info' // Default to INFO 28 | 29 | // WebSocket server configuration 30 | const WS_PORT = parseInt(process.env.WS_PORT || '4999', 10) 31 | const WS_HOST = process.env.WS_HOST || 'localhost' 32 | // UI server uses the same port as WebSocket 33 | const UI_PORT = WS_PORT 34 | 35 | // Add config for git diff max buffer (in MB) 36 | const GIT_DIFF_MAX_BUFFER_MB = parseInt( 37 | process.env.GIT_DIFF_MAX_BUFFER_MB || '10', 38 | 10 39 | ) 40 | 41 | // Add config for auto-review on completion 42 | const AUTO_REVIEW_ON_COMPLETION = 43 | process.env.AUTO_REVIEW_ON_COMPLETION === 'true' 44 | 45 | // Define safety settings for content generation 46 | import { HarmCategory, HarmBlockThreshold } from '@google/generative-ai' 47 | const safetySettings = [ 48 | { 49 | category: HarmCategory.HARM_CATEGORY_HARASSMENT, 50 | threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, 51 | }, 52 | { 53 | category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, 54 | threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, 55 | }, 56 | { 57 | category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, 58 | threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, 59 | }, 60 | { 61 | category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, 62 | threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, 63 | }, 64 | ] 65 | 66 | export { 67 | FEATURE_TASKS_DIR, 68 | SQLITE_DB_PATH, 69 | GEMINI_API_KEY, 70 | OPENROUTER_API_KEY, 71 | GEMINI_MODEL, 72 | OPENROUTER_MODEL, 73 | FALLBACK_OPENROUTER_MODEL, 74 | FALLBACK_GEMINI_MODEL, 75 | REVIEW_LLM_API_KEY, 76 | LOG_LEVEL, 77 | LogLevel, 78 | safetySettings, 79 | WS_PORT, 80 | WS_HOST, 81 | UI_PORT, 82 | GIT_DIFF_MAX_BUFFER_MB, 83 | AUTO_REVIEW_ON_COMPLETION, 84 | } 85 | -------------------------------------------------------------------------------- /src/config/migrations.sql: -------------------------------------------------------------------------------- 1 | -- Add from_review column to tasks table if it doesn't exist 2 | ALTER TABLE tasks ADD COLUMN from_review INTEGER DEFAULT 0; 3 | 4 | -- Add task_id column to history_entries table if it doesn't exist 5 | ALTER TABLE history_entries ADD COLUMN task_id TEXT; 6 | 7 | -- Add action and details columns to history_entries table if they don't exist 8 | ALTER TABLE history_entries ADD COLUMN action TEXT; 9 | ALTER TABLE history_entries ADD COLUMN details TEXT; 10 | 11 | -- Add project_path column to features table if it doesn't exist 12 | ALTER TABLE features ADD COLUMN project_path TEXT; -------------------------------------------------------------------------------- /src/config/schema.sql: -------------------------------------------------------------------------------- 1 | -- Tasks Table 2 | CREATE TABLE IF NOT EXISTS tasks ( 3 | id TEXT PRIMARY KEY, 4 | title TEXT, 5 | description TEXT, 6 | status TEXT NOT NULL CHECK (status IN ('pending', 'in_progress', 'completed', 'decomposed')), 7 | completed INTEGER NOT NULL DEFAULT 0, -- SQLite uses INTEGER for boolean (0=false, 1=true) 8 | effort TEXT CHECK (effort IN ('low', 'medium', 'high')), 9 | feature_id TEXT, 10 | parent_task_id TEXT, 11 | created_at INTEGER NOT NULL, -- Unix timestamp 12 | updated_at INTEGER NOT NULL, -- Unix timestamp 13 | from_review INTEGER DEFAULT 0, -- Track if task was generated from a review 14 | FOREIGN KEY (parent_task_id) REFERENCES tasks(id) ON DELETE CASCADE 15 | ); 16 | 17 | -- History Entries Table 18 | CREATE TABLE IF NOT EXISTS history_entries ( 19 | id INTEGER PRIMARY KEY AUTOINCREMENT, 20 | timestamp INTEGER NOT NULL, -- Unix timestamp 21 | role TEXT NOT NULL CHECK (role IN ('user', 'model', 'tool_call', 'tool_response')), 22 | content TEXT NOT NULL, 23 | feature_id TEXT NOT NULL, 24 | task_id TEXT, 25 | action TEXT, 26 | details TEXT 27 | ); 28 | 29 | -- Features Table 30 | CREATE TABLE IF NOT EXISTS features ( 31 | id TEXT PRIMARY KEY, 32 | description TEXT NOT NULL, 33 | status TEXT NOT NULL DEFAULT 'in_progress' CHECK (status IN ('in_progress', 'completed', 'abandoned')), 34 | project_path TEXT, 35 | created_at INTEGER NOT NULL, -- Unix timestamp 36 | updated_at INTEGER NOT NULL -- Unix timestamp 37 | ); 38 | 39 | -- Task Relationships Table 40 | CREATE TABLE IF NOT EXISTS task_relationships ( 41 | id INTEGER PRIMARY KEY AUTOINCREMENT, 42 | parent_id TEXT NOT NULL, 43 | child_id TEXT NOT NULL, 44 | FOREIGN KEY (parent_id) REFERENCES tasks(id) ON DELETE CASCADE, 45 | FOREIGN KEY (child_id) REFERENCES tasks(id) ON DELETE CASCADE, 46 | UNIQUE (parent_id, child_id) 47 | ); 48 | 49 | -- Planning States Table 50 | CREATE TABLE IF NOT EXISTS planning_states ( 51 | question_id TEXT PRIMARY KEY, -- UUID as the primary key 52 | feature_id TEXT NOT NULL, 53 | prompt TEXT NOT NULL, 54 | partial_response TEXT NOT NULL, 55 | planning_type TEXT NOT NULL CHECK (planning_type IN ('feature_planning', 'plan_adjustment')), 56 | created_at INTEGER NOT NULL, -- Unix timestamp 57 | FOREIGN KEY (feature_id) REFERENCES features(id) ON DELETE CASCADE 58 | ); 59 | 60 | -- Indexes for performance 61 | CREATE INDEX IF NOT EXISTS idx_tasks_feature_id ON tasks(feature_id); 62 | CREATE INDEX IF NOT EXISTS idx_tasks_parent_task_id ON tasks(parent_task_id); 63 | CREATE INDEX IF NOT EXISTS idx_history_entries_feature_id ON history_entries(feature_id); 64 | CREATE INDEX IF NOT EXISTS idx_task_relationships_parent_id ON task_relationships(parent_id); 65 | CREATE INDEX IF NOT EXISTS idx_task_relationships_child_id ON task_relationships(child_id); 66 | CREATE INDEX IF NOT EXISTS idx_planning_states_feature_id ON planning_states(feature_id); -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | // Re-export server for backwards compatibility 2 | export * from './server' 3 | -------------------------------------------------------------------------------- /src/lib/dbUtils.ts: -------------------------------------------------------------------------------- 1 | import { databaseService, HistoryEntry } from '../services/databaseService' 2 | import crypto from 'crypto' 3 | 4 | // Types 5 | interface Task { 6 | id: string 7 | title?: string 8 | description?: string 9 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed' 10 | completed: boolean 11 | effort?: 'low' | 'medium' | 'high' 12 | feature_id?: string 13 | parent_task_id?: string 14 | created_at: number 15 | updated_at: number 16 | fromReview?: boolean 17 | } 18 | 19 | interface TaskUpdate { 20 | title?: string 21 | description?: string 22 | effort?: 'low' | 'medium' | 'high' 23 | parent_task_id?: string 24 | fromReview?: boolean 25 | } 26 | 27 | interface PlanningState { 28 | questionId: string 29 | featureId: string 30 | prompt: string 31 | partialResponse: string 32 | planningType: 'feature_planning' | 'plan_adjustment' 33 | } 34 | 35 | /** 36 | * Adds a new entry to the feature history 37 | * @param featureId The unique ID of the feature 38 | * @param role The role of the entry ('user', 'model', 'tool_call', 'tool_response') 39 | * @param content The content of the entry 40 | */ 41 | export async function addHistoryEntry( 42 | featureId: string, 43 | role: 'user' | 'model' | 'tool_call' | 'tool_response', 44 | content: any 45 | ): Promise { 46 | try { 47 | // Convert timestamp to number if not already 48 | const timestamp = Math.floor(Date.now() / 1000) 49 | 50 | // Prepare history entry 51 | const entry = { 52 | timestamp, 53 | role, 54 | content, 55 | feature_id: featureId, 56 | } 57 | 58 | // Connect to database 59 | await databaseService.connect() 60 | 61 | // Add entry 62 | await databaseService.addHistoryEntry(entry) 63 | 64 | // Close connection 65 | await databaseService.close() 66 | } catch (error) { 67 | console.error( 68 | `[TaskServer] Error adding history entry to database: ${error}` 69 | ) 70 | // Re-throw the error so the caller is aware 71 | throw error 72 | } 73 | } 74 | 75 | /** 76 | * Gets all tasks for a feature 77 | * @param featureId The unique ID of the feature 78 | * @returns Array of tasks 79 | */ 80 | export async function getAllTasksForFeature( 81 | featureId: string 82 | ): Promise { 83 | try { 84 | await databaseService.connect() 85 | const tasks = await databaseService.getTasksByFeatureId(featureId) 86 | await databaseService.close() 87 | return tasks 88 | } catch (error) { 89 | console.error( 90 | `[TaskServer] Error getting tasks for feature ${featureId}: ${error}` 91 | ) 92 | throw error 93 | } 94 | } 95 | 96 | /** 97 | * Gets a task by ID 98 | * @param taskId The unique ID of the task 99 | * @returns The task or null if not found 100 | */ 101 | export async function getTaskById(taskId: string): Promise { 102 | try { 103 | await databaseService.connect() 104 | const task = await databaseService.getTaskById(taskId) 105 | await databaseService.close() 106 | return task 107 | } catch (error) { 108 | console.error(`[TaskServer] Error getting task ${taskId}: ${error}`) 109 | throw error 110 | } 111 | } 112 | 113 | /** 114 | * Creates a new task 115 | * @param featureId The feature ID the task belongs to 116 | * @param description The task description 117 | * @param options Optional task properties (title, effort, parentTaskId) 118 | * @returns The created task 119 | */ 120 | export async function createTask( 121 | featureId: string, 122 | description: string, 123 | options: { 124 | title?: string 125 | effort?: 'low' | 'medium' | 'high' 126 | parentTaskId?: string 127 | fromReview?: boolean 128 | } = {} 129 | ): Promise { 130 | try { 131 | const now = Math.floor(Date.now() / 1000) 132 | const newTask: Task = { 133 | id: crypto.randomUUID(), 134 | description, 135 | title: options.title || description, 136 | status: 'pending', 137 | completed: false, 138 | effort: options.effort, 139 | feature_id: featureId, 140 | parent_task_id: options.parentTaskId, 141 | created_at: now, 142 | updated_at: now, 143 | fromReview: options.fromReview, 144 | } 145 | 146 | await databaseService.connect() 147 | await databaseService.addTask(newTask) 148 | await databaseService.close() 149 | 150 | return newTask 151 | } catch (error) { 152 | console.error( 153 | `[TaskServer] Error creating task for feature ${featureId}: ${error}` 154 | ) 155 | throw error 156 | } 157 | } 158 | 159 | /** 160 | * Updates a task's status 161 | * @param taskId The unique ID of the task 162 | * @param status The new status 163 | * @param completed Optional completed flag 164 | * @returns True if successful, false otherwise 165 | */ 166 | export async function updateTaskStatus( 167 | taskId: string, 168 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed', 169 | completed?: boolean 170 | ): Promise { 171 | try { 172 | await databaseService.connect() 173 | const result = await databaseService.updateTaskStatus( 174 | taskId, 175 | status, 176 | completed 177 | ) 178 | await databaseService.close() 179 | return result 180 | } catch (error) { 181 | console.error( 182 | `[TaskServer] Error updating task status for ${taskId}: ${error}` 183 | ) 184 | throw error 185 | } 186 | } 187 | 188 | /** 189 | * Updates a task's details 190 | * @param taskId The unique ID of the task 191 | * @param updates The properties to update 192 | * @returns True if successful, false otherwise 193 | */ 194 | export async function updateTaskDetails( 195 | taskId: string, 196 | updates: TaskUpdate 197 | ): Promise { 198 | try { 199 | await databaseService.connect() 200 | const result = await databaseService.updateTaskDetails(taskId, updates) 201 | await databaseService.close() 202 | return result 203 | } catch (error) { 204 | console.error( 205 | `[TaskServer] Error updating task details for ${taskId}: ${error}` 206 | ) 207 | throw error 208 | } 209 | } 210 | 211 | /** 212 | * Deletes a task 213 | * @param taskId The unique ID of the task 214 | * @returns True if successful, false otherwise 215 | */ 216 | export async function deleteTask(taskId: string): Promise { 217 | try { 218 | await databaseService.connect() 219 | const result = await databaseService.deleteTask(taskId) 220 | await databaseService.close() 221 | return result 222 | } catch (error) { 223 | console.error(`[TaskServer] Error deleting task ${taskId}: ${error}`) 224 | throw error 225 | } 226 | } 227 | 228 | /** 229 | * Gets history entries for a feature 230 | * @param featureId The unique ID of the feature 231 | * @param limit Maximum number of entries to retrieve 232 | * @returns Array of history entries 233 | */ 234 | export async function getHistoryForFeature( 235 | featureId: string, 236 | limit: number = 100 237 | ): Promise { 238 | try { 239 | await databaseService.connect() 240 | const history = await databaseService.getHistoryByFeatureId( 241 | featureId, 242 | limit 243 | ) 244 | await databaseService.close() 245 | return history 246 | } catch (error) { 247 | console.error( 248 | `[TaskServer] Error getting history for feature ${featureId}: ${error}` 249 | ) 250 | throw error 251 | } 252 | } 253 | 254 | /** 255 | * Stores intermediate planning state 256 | * @param featureId The feature ID being planned 257 | * @param prompt The original prompt 258 | * @param partialResponse The LLM's partial response 259 | * @param planningType The type of planning operation 260 | * @returns The generated question ID 261 | */ 262 | export async function addPlanningState( 263 | featureId: string, 264 | prompt: string, 265 | partialResponse: string, 266 | planningType: 'feature_planning' | 'plan_adjustment' 267 | ): Promise { 268 | try { 269 | const questionId = crypto.randomUUID() 270 | const now = Math.floor(Date.now() / 1000) 271 | 272 | await databaseService.connect() 273 | 274 | await databaseService.runAsync( 275 | `INSERT INTO planning_states ( 276 | question_id, feature_id, prompt, partial_response, planning_type, created_at 277 | ) VALUES (?, ?, ?, ?, ?, ?)`, 278 | [questionId, featureId, prompt, partialResponse, planningType, now] 279 | ) 280 | 281 | await databaseService.close() 282 | 283 | return questionId 284 | } catch (error) { 285 | console.error(`[TaskServer] Error storing planning state: ${error}`) 286 | // Generate a questionId even in error case to avoid breaking the flow 287 | return crypto.randomUUID() 288 | } 289 | } 290 | 291 | /** 292 | * Gets planning state by question ID 293 | * @param questionId The question ID 294 | * @returns The planning state or null if not found 295 | */ 296 | export async function getPlanningStateByQuestionId( 297 | questionId: string 298 | ): Promise { 299 | try { 300 | if (!questionId) { 301 | return null 302 | } 303 | 304 | await databaseService.connect() 305 | 306 | const row = await databaseService.get( 307 | `SELECT question_id, feature_id, prompt, partial_response, planning_type 308 | FROM planning_states 309 | WHERE question_id = ?`, 310 | [questionId] 311 | ) 312 | 313 | await databaseService.close() 314 | 315 | if (!row) { 316 | return null 317 | } 318 | 319 | return { 320 | questionId: row.question_id, 321 | featureId: row.feature_id, 322 | prompt: row.prompt, 323 | partialResponse: row.partial_response, 324 | planningType: row.planning_type, 325 | } 326 | } catch (error) { 327 | console.error( 328 | `[TaskServer] Error getting planning state for question ${questionId}: ${error}` 329 | ) 330 | // Re-throw error to distinguish DB errors from 'not found' 331 | throw error 332 | } 333 | } 334 | 335 | /** 336 | * Gets planning state by feature ID 337 | * @param featureId The feature ID 338 | * @returns The most recent planning state for the feature or null if not found 339 | */ 340 | export async function getPlanningStateByFeatureId( 341 | featureId: string 342 | ): Promise { 343 | try { 344 | if (!featureId) { 345 | return null 346 | } 347 | 348 | await databaseService.connect() 349 | 350 | const row = await databaseService.get( 351 | `SELECT question_id, feature_id, prompt, partial_response, planning_type 352 | FROM planning_states 353 | WHERE feature_id = ? 354 | ORDER BY created_at DESC 355 | LIMIT 1`, 356 | [featureId] 357 | ) 358 | 359 | await databaseService.close() 360 | 361 | if (!row) { 362 | return null 363 | } 364 | 365 | return { 366 | questionId: row.question_id, 367 | featureId: row.feature_id, 368 | prompt: row.prompt, 369 | partialResponse: row.partial_response, 370 | planningType: row.planning_type, 371 | } 372 | } catch (error) { 373 | console.error( 374 | `[TaskServer] Error getting planning state for feature ${featureId}: ${error}` 375 | ) 376 | // Re-throw error to distinguish DB errors from 'not found' 377 | throw error 378 | } 379 | } 380 | 381 | /** 382 | * Clears planning state 383 | * @param questionId The question ID 384 | * @returns True if successful, false otherwise 385 | */ 386 | export async function clearPlanningState(questionId: string): Promise { 387 | try { 388 | if (!questionId) { 389 | return false 390 | } 391 | 392 | await databaseService.connect() 393 | 394 | const result = await databaseService.runAsync( 395 | `DELETE FROM planning_states WHERE question_id = ?`, 396 | [questionId] 397 | ) 398 | 399 | await databaseService.close() 400 | 401 | return result.changes > 0 402 | } catch (error) { 403 | console.error( 404 | `[TaskServer] Error clearing planning state for question ${questionId}: ${error}` 405 | ) 406 | return false 407 | } 408 | } 409 | 410 | /** 411 | * Clears all planning states for a feature 412 | * @param featureId The feature ID 413 | * @returns Number of states cleared 414 | */ 415 | export async function clearPlanningStatesForFeature( 416 | featureId: string 417 | ): Promise { 418 | try { 419 | if (!featureId) { 420 | return 0 421 | } 422 | 423 | await databaseService.connect() 424 | 425 | const result = await databaseService.runAsync( 426 | `DELETE FROM planning_states WHERE feature_id = ?`, 427 | [featureId] 428 | ) 429 | 430 | await databaseService.close() 431 | 432 | return result.changes || 0 433 | } catch (error) { 434 | console.error( 435 | `[TaskServer] Error clearing planning states for feature ${featureId}: ${error}` 436 | ) 437 | return 0 438 | } 439 | } 440 | 441 | // Utility to get project_path from feature record 442 | export async function getProjectPathForFeature( 443 | featureId: string 444 | ): Promise { 445 | try { 446 | await databaseService.connect() 447 | 448 | // First try to get it from the feature record 449 | const feature = await databaseService.getFeatureById(featureId) 450 | if (feature && feature.project_path) { 451 | await databaseService.close() 452 | return feature.project_path 453 | } 454 | 455 | // Fallback to the old method if needed 456 | const history = await getHistoryForFeature(featureId, 50) // limit to 50 for efficiency 457 | const firstToolCall = history.find( 458 | (entry: any) => 459 | entry.role === 'tool_call' && 460 | entry.content && 461 | entry.content.tool === 'plan_feature' && 462 | entry.content.params && 463 | entry.content.params.project_path 464 | ) 465 | 466 | // If we found it in history but not in the feature record, update the feature record for next time 467 | const projectPath = JSON.parse(firstToolCall?.content || '{}')?.params 468 | ?.project_path 469 | if (projectPath && feature) { 470 | try { 471 | await databaseService.runAsync( 472 | 'UPDATE features SET project_path = ? WHERE id = ?', 473 | [projectPath, featureId] 474 | ) 475 | } catch (updateError) { 476 | console.error( 477 | `[getProjectPathForFeature] Error updating project_path: ${updateError}` 478 | ) 479 | } 480 | } 481 | 482 | await databaseService.close() 483 | return projectPath 484 | } catch (e) { 485 | await databaseService.close() 486 | return undefined 487 | } 488 | } 489 | -------------------------------------------------------------------------------- /src/lib/logger.ts: -------------------------------------------------------------------------------- 1 | import logger from './winstonLogger' 2 | import { LOG_LEVEL, LogLevel } from '../config' // Import LOG_LEVEL and LogLevel type 3 | 4 | // Define log level hierarchy (lower number = higher priority) 5 | const levelHierarchy: Record = { 6 | error: 0, 7 | warn: 1, 8 | info: 2, 9 | debug: 3, 10 | } 11 | 12 | const configuredLevel = levelHierarchy[LOG_LEVEL] || levelHierarchy.info // Default to INFO if invalid 13 | 14 | /** 15 | * Logs a message to the debug log file if the provided level meets the configured threshold. 16 | * @param message The message to log 17 | * @param level The level of the message (default: 'info') 18 | */ 19 | export async function logToFile( 20 | message: string, 21 | level: LogLevel = 'info' 22 | ): Promise { 23 | try { 24 | const messageLevel = levelHierarchy[level] || levelHierarchy.info 25 | 26 | // Only log if the message level is less than or equal to the configured level 27 | if (messageLevel <= configuredLevel) { 28 | switch (level) { 29 | case 'error': 30 | logger.error(message) 31 | break 32 | case 'warn': 33 | logger.warn(message) 34 | break 35 | case 'info': 36 | logger.info(message) 37 | break 38 | case 'debug': 39 | default: 40 | logger.debug(message) // Default to debug if level not specified or recognized 41 | break 42 | } 43 | } 44 | } catch (error) { 45 | // Fallback to console if logger fails 46 | console.error(`[TaskServer] Error using logger:`, error) 47 | console.error( 48 | `[TaskServer] Original log message (Level: ${level}): ${message}` 49 | ) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/lib/repomixUtils.ts: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import fs from 'fs/promises' 3 | import { promisify } from 'util' 4 | import { exec } from 'child_process' 5 | import { logToFile } from './logger' 6 | // Potentially import encoding_for_model and config if including token counting/compression 7 | 8 | const execPromise = promisify(exec) 9 | 10 | /** 11 | * Executes repomix in the target directory and returns the codebase context. 12 | * Handles errors and ensures an empty string is returned if context cannot be gathered. 13 | * 14 | * @param targetDir The directory to run repomix in. 15 | * @param logContext An identifier (like featureId or reviewId) for logging. 16 | * @returns The codebase context string, or an empty string on failure or no context. 17 | * @throws Error if repomix command is not found. 18 | */ 19 | export async function getCodebaseContext( 20 | targetDir: string, 21 | logContext: string 22 | ): Promise<{ context: string; error?: string }> { 23 | let codebaseContext = '' 24 | let userFriendlyError: string | undefined 25 | 26 | try { 27 | const repomixOutputPath = path.join(targetDir, 'repomix-output.txt') 28 | // Ensure the output directory exists 29 | await fs.mkdir(path.dirname(repomixOutputPath), { recursive: true }) 30 | 31 | const repomixCommand = `npx repomix ${targetDir} --style plain --output ${repomixOutputPath}` 32 | await logToFile( 33 | `[RepomixUtil/${logContext}] Running command: ${repomixCommand}`, 34 | 'debug' 35 | ) 36 | 37 | // Execute repomix in the target directory 38 | let { stdout: repomixStdout, stderr: repomixStderr } = await execPromise( 39 | repomixCommand, 40 | { cwd: targetDir, maxBuffer: 10 * 1024 * 1024 } // 10MB buffer 41 | ) 42 | 43 | if (repomixStderr) { 44 | await logToFile( 45 | `[RepomixUtil/${logContext}] repomix stderr: ${repomixStderr}`, 46 | 'warn' 47 | ) 48 | if (repomixStderr.includes('Permission denied')) { 49 | userFriendlyError = `Error running repomix: Permission denied scanning directory '${targetDir}'. Check folder permissions.` 50 | await logToFile( 51 | `[RepomixUtil/${logContext}] ${userFriendlyError}`, 52 | 'error' 53 | ) 54 | } 55 | } 56 | if ( 57 | !repomixStdout && 58 | !(await fs.stat(repomixOutputPath).catch(() => null)) 59 | ) { 60 | await logToFile( 61 | `[RepomixUtil/${logContext}] repomix stdout was empty and output file does not exist.`, 62 | 'warn' 63 | ) 64 | } 65 | 66 | // Read output file 67 | try { 68 | codebaseContext = await fs.readFile(repomixOutputPath, 'utf-8') 69 | } catch (readError: any) { 70 | if (readError.code === 'ENOENT') { 71 | await logToFile( 72 | `[RepomixUtil/${logContext}] repomix-output.txt not found at ${repomixOutputPath}. Proceeding without context.`, 73 | 'warn' 74 | ) 75 | codebaseContext = '' // Expected case if repomix finds nothing 76 | } else { 77 | // Rethrow unexpected read errors 78 | throw readError 79 | } 80 | } 81 | 82 | if (!codebaseContext.trim()) { 83 | await logToFile( 84 | `[RepomixUtil/${logContext}] repomix output file (${repomixOutputPath}) was empty or missing.`, 85 | 'info' // Info level might be sufficient here 86 | ) 87 | codebaseContext = '' 88 | } else { 89 | await logToFile( 90 | `[RepomixUtil/${logContext}] repomix context gathered (${codebaseContext.length} chars).`, 91 | 'debug' 92 | ) 93 | // TODO: Add token counting/compression logic here if desired, similar to planFeature 94 | } 95 | } catch (error: any) { 96 | await logToFile( 97 | `[RepomixUtil/${logContext}] Error running repomix: ${error}`, 98 | 'error' 99 | ) 100 | if (error.message?.includes('command not found')) { 101 | userFriendlyError = 102 | "Error: 'npx' or 'repomix' command not found. Make sure Node.js and repomix are installed and in the PATH." 103 | } else if (userFriendlyError) { 104 | // Use the permission denied error if already set 105 | } else { 106 | userFriendlyError = 'Error running repomix to gather codebase context.' 107 | } 108 | codebaseContext = '' // Ensure context is empty on error 109 | } 110 | 111 | return { context: codebaseContext, error: userFriendlyError } 112 | } 113 | -------------------------------------------------------------------------------- /src/lib/utils.ts: -------------------------------------------------------------------------------- 1 | import { logToFile } from './logger' 2 | 3 | /** 4 | * Dynamically imports an ES Module from a CommonJS module. 5 | * Handles default exports correctly. 6 | * @param modulePath The path or name of the module to import. 7 | * @returns The default export of the module. 8 | * @throws If the import fails. 9 | */ 10 | export async function dynamicImportDefault( 11 | modulePath: string 12 | ): Promise { 13 | try { 14 | // Perform the dynamic import 15 | const module = await import(modulePath) 16 | 17 | // Check for and return the default export 18 | if (module.default) { 19 | return module.default as T 20 | } 21 | 22 | // If no default export, return the module namespace object itself 23 | // (less likely needed for 'open', but good fallback) 24 | return module as T 25 | } catch (error: any) { 26 | await logToFile( 27 | `[Utils] Failed to dynamically import '${modulePath}': ${error.message}` 28 | ) 29 | console.error(`[Utils] Dynamic import error for '${modulePath}':`, error) 30 | // Re-throw the error so the calling function knows it failed 31 | throw error 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/lib/winstonLogger.ts: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import winston from 'winston' 3 | import 'winston-daily-rotate-file' 4 | 5 | const logDir = path.join(__dirname, '../../logs') 6 | 7 | // Define log formats 8 | const formats = { 9 | console: winston.format.combine( 10 | winston.format.colorize(), 11 | winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), 12 | winston.format.printf( 13 | (info) => `${info.timestamp} ${info.level}: ${info.message}` 14 | ) 15 | ), 16 | file: winston.format.combine( 17 | winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), 18 | winston.format.json() 19 | ), 20 | } 21 | 22 | // Configure file transport with rotation 23 | const fileRotateTransport = new winston.transports.DailyRotateFile({ 24 | dirname: logDir, 25 | filename: 'application-%DATE%.log', 26 | datePattern: 'YYYY-MM-DD', 27 | maxSize: '20m', 28 | maxFiles: '14d', 29 | zippedArchive: true, 30 | }) 31 | 32 | // Error log transport with rotation 33 | const errorFileRotateTransport = new winston.transports.DailyRotateFile({ 34 | dirname: logDir, 35 | filename: 'error-%DATE%.log', 36 | datePattern: 'YYYY-MM-DD', 37 | maxSize: '20m', 38 | maxFiles: '14d', 39 | level: 'error', 40 | zippedArchive: true, 41 | }) 42 | 43 | // Create the logger 44 | const logger = winston.createLogger({ 45 | level: process.env.NODE_ENV === 'production' ? 'info' : 'debug', 46 | format: formats.file, 47 | transports: [ 48 | // Console transport for development 49 | new winston.transports.Console({ 50 | format: formats.console, 51 | }), 52 | // File transports with rotation 53 | fileRotateTransport, 54 | errorFileRotateTransport, 55 | ], 56 | exceptionHandlers: [ 57 | new winston.transports.DailyRotateFile({ 58 | dirname: logDir, 59 | filename: 'exceptions-%DATE%.log', 60 | datePattern: 'YYYY-MM-DD', 61 | maxSize: '20m', 62 | maxFiles: '14d', 63 | zippedArchive: true, 64 | }), 65 | ], 66 | rejectionHandlers: [ 67 | new winston.transports.DailyRotateFile({ 68 | dirname: logDir, 69 | filename: 'rejections-%DATE%.log', 70 | datePattern: 'YYYY-MM-DD', 71 | maxSize: '20m', 72 | maxFiles: '14d', 73 | zippedArchive: true, 74 | }), 75 | ], 76 | }) 77 | 78 | // Helper function to maintain compatibility with previous logger 79 | export async function logToFile(message: string): Promise { 80 | logger.debug(message) 81 | } 82 | 83 | export default logger 84 | -------------------------------------------------------------------------------- /src/models/types.ts: -------------------------------------------------------------------------------- 1 | import { z } from 'zod' 2 | 3 | // --- Zod Schemas --- 4 | export const TaskSchema = z.object({ 5 | id: z.string().uuid(), 6 | title: z.string().optional(), 7 | description: z.string().optional(), 8 | status: z.enum(['pending', 'in_progress', 'completed', 'decomposed']), 9 | completed: z.boolean().default(false), 10 | effort: z.enum(['low', 'medium', 'high']).optional(), 11 | feature_id: z.string().uuid().optional(), 12 | parentTaskId: z.string().uuid().optional(), 13 | createdAt: z.string().optional(), 14 | updatedAt: z.string().optional(), 15 | fromReview: z.boolean().optional(), 16 | }) 17 | 18 | export const TaskListSchema = z.array(TaskSchema) 19 | export type Task = z.infer 20 | 21 | // History entry schema 22 | export const HistoryEntrySchema = z.object({ 23 | timestamp: z.string().datetime(), 24 | role: z.enum(['user', 'model', 'tool_call', 'tool_response']), 25 | content: z.any(), 26 | featureId: z.string().uuid(), 27 | }) 28 | 29 | export const FeatureHistorySchema = z.array(HistoryEntrySchema) 30 | export type HistoryEntry = z.infer 31 | 32 | /** 33 | * Interface for a parent-child task relationship 34 | */ 35 | export interface TaskRelationship { 36 | parentId: string 37 | parentDescription: string 38 | childIds: string[] 39 | } 40 | 41 | /** 42 | * Options for task breakdown 43 | */ 44 | export interface BreakdownOptions { 45 | minSubtasks?: number 46 | maxSubtasks?: number 47 | preferredEffort?: 'low' | 'medium' 48 | maxRetries?: number 49 | } 50 | 51 | // --- Structured Output Schemas --- 52 | 53 | // Schema for a single task in planning response 54 | export const PlanningTaskSchema = z.object({ 55 | description: z.string().describe('Description of the task to be done'), 56 | effort: z 57 | .enum(['low', 'medium', 'high']) 58 | .describe('Estimated effort level for this task'), 59 | }) 60 | 61 | // Full planning response schema 62 | export const PlanningOutputSchema = z.object({ 63 | tasks: z 64 | .array(PlanningTaskSchema) 65 | .describe('List of tasks for implementation'), 66 | }) 67 | 68 | export type PlanningOutput = z.infer 69 | 70 | // Schema for effort estimation response 71 | export const EffortEstimationSchema = z.object({ 72 | effort: z 73 | .enum(['low', 'medium', 'high']) 74 | .describe('Estimated effort required for the task'), 75 | reasoning: z 76 | .string() 77 | .describe('Reasoning behind the effort estimation') 78 | .optional(), 79 | }) 80 | 81 | export type EffortEstimation = z.infer 82 | 83 | // Schema for task breakdown response 84 | export const TaskBreakdownSchema = z.object({ 85 | parentTaskId: z.string().uuid().describe('ID of the high-effort parent task'), 86 | subtasks: z 87 | .array( 88 | z.object({ 89 | description: z.string().describe('Description of the subtask'), 90 | effort: z 91 | .enum(['low', 'medium']) 92 | .describe('Effort level for this subtask'), 93 | }) 94 | ) 95 | .describe('List of smaller subtasks that make up the original task'), 96 | }) 97 | 98 | export type TaskBreakdown = z.infer 99 | 100 | // Schema for code review response 101 | export const CodeReviewSchema = z.object({ 102 | summary: z.string().describe('Brief summary of the code changes reviewed'), 103 | issues: z 104 | .array( 105 | z.object({ 106 | type: z 107 | .enum(['bug', 'style', 'performance', 'security', 'suggestion']) 108 | .describe('Type of issue found'), 109 | severity: z 110 | .enum(['low', 'medium', 'high']) 111 | .describe('Severity of the issue'), 112 | description: z.string().describe('Description of the issue'), 113 | location: z 114 | .string() 115 | .describe('File and line number where the issue was found') 116 | .optional(), 117 | suggestion: z 118 | .string() 119 | .describe('Suggested fix for the issue') 120 | .optional(), 121 | }) 122 | ) 123 | .describe('List of issues found in the code review'), 124 | recommendations: z 125 | .array(z.string()) 126 | .describe('Overall recommendations for improving the code'), 127 | }) 128 | 129 | export type CodeReview = z.infer 130 | 131 | // --- WebSocket Message Types --- 132 | 133 | export type WebSocketMessageType = 134 | | 'tasks_updated' 135 | | 'status_changed' 136 | | 'show_question' 137 | | 'question_response' 138 | | 'request_screenshot' 139 | | 'request_screenshot_ack' 140 | | 'error' 141 | | 'connection_established' 142 | | 'client_registration' 143 | | 'task_created' 144 | | 'task_updated' 145 | | 'task_deleted' 146 | 147 | export interface WebSocketMessage { 148 | type: WebSocketMessageType 149 | featureId?: string 150 | payload?: any 151 | } 152 | 153 | export interface TasksUpdatedPayload { 154 | tasks: Task[] 155 | updatedAt: string 156 | } 157 | 158 | export interface StatusChangedPayload { 159 | taskId: string 160 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed' 161 | updatedAt: string 162 | } 163 | 164 | export interface ShowQuestionPayload { 165 | questionId: string 166 | question: string 167 | options?: string[] 168 | allowsText?: boolean 169 | } 170 | 171 | export interface QuestionResponsePayload { 172 | questionId: string 173 | response: string 174 | } 175 | 176 | export interface RequestScreenshotPayload { 177 | requestId: string 178 | target?: string 179 | } 180 | 181 | export interface RequestScreenshotAckPayload { 182 | requestId: string 183 | status: 'success' | 'error' 184 | imagePath?: string 185 | error?: string 186 | } 187 | 188 | export interface ClientRegistrationPayload { 189 | featureId: string 190 | clientId?: string 191 | } 192 | 193 | export interface ErrorPayload { 194 | code: string 195 | message: string 196 | } 197 | 198 | export interface TaskCreatedPayload { 199 | task: Task 200 | featureId: string 201 | createdAt: string 202 | } 203 | 204 | export interface TaskUpdatedPayload { 205 | task: Task 206 | featureId: string 207 | updatedAt: string 208 | } 209 | 210 | export interface TaskDeletedPayload { 211 | taskId: string 212 | featureId: string 213 | deletedAt: string 214 | } 215 | 216 | // Schema for task breakdown response used in llmUtils.ts 217 | export const TaskBreakdownResponseSchema = z.object({ 218 | subtasks: z 219 | .array( 220 | z.object({ 221 | description: z.string().describe('Description of the subtask'), 222 | effort: z 223 | .string() 224 | .transform((val) => val.toLowerCase()) 225 | .pipe(z.enum(['low', 'medium'])) 226 | .describe( 227 | 'Estimated effort level for the subtask (transformed to lowercase)' 228 | ), 229 | }) 230 | ) 231 | .describe('List of smaller subtasks that make up the original task'), 232 | }) 233 | 234 | export type TaskBreakdownResponse = z.infer 235 | 236 | // Schema for LLM clarification request content (used within PlanFeatureResponseSchema) 237 | const ClarificationNeededSchema = z.object({ 238 | question: z.string().describe('The question text to display to the user'), 239 | options: z 240 | .array(z.string()) 241 | .optional() 242 | .describe('Optional multiple choice options'), 243 | allowsText: z 244 | .boolean() 245 | .optional() 246 | .default(true) 247 | .describe('Whether free text response is allowed'), 248 | }) 249 | 250 | // Schema for feature planning response used in planFeature.ts 251 | // Can now represent either a list of tasks OR a clarification request. 252 | export const PlanFeatureResponseSchema = z.union([ 253 | // Option 1: Successful plan with tasks 254 | z.object({ 255 | tasks: z 256 | .array( 257 | z.object({ 258 | description: z 259 | .string() 260 | .describe('Detailed description of the coding task'), 261 | effort: z 262 | .enum(['low', 'medium', 'high']) 263 | .describe('Estimated effort required for this task'), 264 | }) 265 | ) 266 | // Ensure tasks array is not empty if provided 267 | .min(1, { message: 'Tasks array cannot be empty if planning succeeded.' }) 268 | .describe( 269 | 'List of ordered, sequential tasks for implementing the feature' 270 | ), 271 | clarificationNeeded: z.undefined().optional(), // Ensure clarification is not present 272 | }), 273 | // Option 2: Clarification is needed 274 | z.object({ 275 | tasks: z.undefined().optional(), // Ensure tasks are not present 276 | clarificationNeeded: ClarificationNeededSchema.describe( 277 | 'Details of the clarification needed from the user' 278 | ), 279 | }), 280 | ]) 281 | 282 | export type PlanFeatureResponse = z.infer 283 | 284 | // Schema for adjust_plan tool input 285 | export const AdjustPlanInputSchema = z.object({ 286 | featureId: z 287 | .string() 288 | .uuid() 289 | .describe('The ID of the feature whose plan needs adjustment.'), 290 | adjustment_request: z 291 | .string() 292 | .describe('User request detailing the desired changes to the task list.'), 293 | }) 294 | 295 | export type AdjustPlanInput = z.infer 296 | 297 | // Schema for LLM clarification request format 298 | export const LLMClarificationRequestSchema = z.object({ 299 | type: z 300 | .literal('clarification_needed') 301 | .describe('Indicates LLM needs clarification'), 302 | question: z.string().describe('The question text to display to the user'), 303 | options: z 304 | .array(z.string()) 305 | .optional() 306 | .describe('Optional multiple choice options'), 307 | allowsText: z 308 | .boolean() 309 | .optional() 310 | .default(true) 311 | .describe('Whether free text response is allowed'), 312 | }) 313 | 314 | export type LLMClarificationRequest = z.infer< 315 | typeof LLMClarificationRequestSchema 316 | > 317 | 318 | // Schema for storing intermediate planning state 319 | export const IntermediatePlanningStateSchema = z.object({ 320 | featureId: z.string().uuid().describe('The feature ID being planned'), 321 | prompt: z.string().describe('The original prompt that led to the question'), 322 | partialResponse: z 323 | .string() 324 | .describe("The LLM's partial response including the question"), 325 | questionId: z.string().describe('ID of the clarification question'), 326 | planningType: z 327 | .enum(['feature_planning', 'plan_adjustment']) 328 | .describe('Type of planning operation'), 329 | }) 330 | 331 | export type IntermediatePlanningState = z.infer< 332 | typeof IntermediatePlanningStateSchema 333 | > 334 | 335 | // Schema for review response with tasks (for review_changes tool) 336 | export const ReviewResponseWithTasksSchema = z.object({ 337 | tasks: z 338 | .array( 339 | z.object({ 340 | description: z.string().describe('Description of the task to be done'), 341 | effort: z 342 | .enum(['low', 'medium', 'high']) 343 | .describe('Estimated effort level for this task'), 344 | }) 345 | ) 346 | .describe('List of tasks generated from code review'), 347 | }) 348 | 349 | export type ReviewResponseWithTasks = z.infer< 350 | typeof ReviewResponseWithTasksSchema 351 | > 352 | -------------------------------------------------------------------------------- /src/services/aiService.ts: -------------------------------------------------------------------------------- 1 | import { 2 | GoogleGenerativeAI, 3 | GenerativeModel, 4 | GenerateContentResult, 5 | GoogleGenerativeAIError, 6 | } from '@google/generative-ai' 7 | import OpenAI, { OpenAIError } from 'openai' 8 | import { logToFile } from '../lib/logger' 9 | import { 10 | GEMINI_API_KEY, 11 | OPENROUTER_API_KEY, 12 | GEMINI_MODEL, 13 | OPENROUTER_MODEL, 14 | REVIEW_LLM_API_KEY, 15 | safetySettings, 16 | FALLBACK_GEMINI_MODEL, 17 | FALLBACK_OPENROUTER_MODEL, 18 | } from '../config' 19 | import { z } from 'zod' 20 | import { parseAndValidateJsonResponse } from '../lib/llmUtils' 21 | 22 | type StructuredCallResult = 23 | | { success: true; data: z.infer; rawResponse: R } 24 | | { success: false; error: string; rawResponse?: R | null } 25 | 26 | // Class to manage AI models and provide access to them 27 | class AIService { 28 | private genAI: GoogleGenerativeAI | null = null 29 | private openRouter: OpenAI | null = null 30 | private planningModel: GenerativeModel | undefined 31 | private reviewModel: GenerativeModel | undefined 32 | private initialized = false 33 | 34 | constructor() { 35 | this.initialize() 36 | } 37 | 38 | private initialize(): void { 39 | // Initialize OpenRouter if API key is available 40 | if (OPENROUTER_API_KEY) { 41 | try { 42 | this.openRouter = new OpenAI({ 43 | apiKey: OPENROUTER_API_KEY, 44 | baseURL: 'https://openrouter.ai/api/v1', 45 | }) 46 | console.error( 47 | '[TaskServer] LOG: OpenRouter SDK initialized successfully.' 48 | ) 49 | } catch (sdkError) { 50 | console.error( 51 | '[TaskServer] CRITICAL ERROR initializing OpenRouter SDK:', 52 | sdkError 53 | ) 54 | } 55 | } else if (GEMINI_API_KEY) { 56 | try { 57 | this.genAI = new GoogleGenerativeAI(GEMINI_API_KEY) 58 | // Configure the model. 59 | this.planningModel = this.genAI.getGenerativeModel({ 60 | model: GEMINI_MODEL, 61 | }) 62 | this.reviewModel = this.genAI.getGenerativeModel({ 63 | model: GEMINI_MODEL, 64 | }) 65 | console.error( 66 | '[TaskServer] LOG: Google AI SDK initialized successfully.' 67 | ) 68 | } catch (sdkError) { 69 | console.error( 70 | '[TaskServer] CRITICAL ERROR initializing Google AI SDK:', 71 | sdkError 72 | ) 73 | } 74 | } else { 75 | console.error( 76 | '[TaskServer] WARNING: Neither OPENROUTER_API_KEY nor GEMINI_API_KEY environment variable is set. API calls will fail.' 77 | ) 78 | } 79 | 80 | this.initialized = true 81 | } 82 | 83 | /** 84 | * Gets the appropriate planning model for task planning 85 | */ 86 | getPlanningModel(): GenerativeModel | OpenAI | null { 87 | logToFile( 88 | `[TaskServer] Planning model: ${JSON.stringify( 89 | this.openRouter ? 'OpenRouter' : 'Gemini' 90 | )}` 91 | ) 92 | return this.openRouter || this.planningModel || null 93 | } 94 | 95 | /** 96 | * Gets the appropriate review model for code reviews 97 | */ 98 | getReviewModel(): GenerativeModel | OpenAI | null { 99 | return this.openRouter || this.reviewModel || null 100 | } 101 | 102 | /** 103 | * Extracts the text content from an AI API result. 104 | * Handles both OpenRouter and Gemini responses. 105 | */ 106 | extractTextFromResponse( 107 | result: 108 | | GenerateContentResult 109 | | OpenAI.Chat.Completions.ChatCompletion 110 | | undefined 111 | ): string | null { 112 | // For OpenRouter responses 113 | if ( 114 | result && 115 | 'choices' in result && 116 | result.choices && 117 | result.choices.length > 0 118 | ) { 119 | const choice = result.choices[0] 120 | if (choice.message && choice.message.content) { 121 | return choice.message.content 122 | } 123 | return null 124 | } 125 | 126 | // For Gemini responses 127 | if (result && 'response' in result) { 128 | try { 129 | const response = result.response 130 | if (response.promptFeedback?.blockReason) { 131 | console.error( 132 | `[TaskServer] Gemini response blocked: ${response.promptFeedback.blockReason}` 133 | ) 134 | return null 135 | } 136 | if (response.candidates && response.candidates.length > 0) { 137 | const candidate = response.candidates[0] 138 | if (candidate.content?.parts?.[0]?.text) { 139 | return candidate.content.parts[0].text 140 | } 141 | } 142 | console.error( 143 | '[TaskServer] No text content found in Gemini response candidate.' 144 | ) 145 | return null 146 | } catch (error) { 147 | console.error( 148 | '[TaskServer] Error extracting text from Gemini response:', 149 | error 150 | ) 151 | return null 152 | } 153 | } 154 | 155 | return null 156 | } 157 | 158 | /** 159 | * Extracts and validates structured data from an AI API result. 160 | * Handles both OpenRouter and Gemini responses and validates against a schema. 161 | * 162 | * @param result The raw API response from either OpenRouter or Gemini 163 | * @param schema The Zod schema to validate against 164 | * @returns An object with either validated data or error information 165 | */ 166 | extractStructuredResponse( 167 | result: 168 | | GenerateContentResult 169 | | OpenAI.Chat.Completions.ChatCompletion 170 | | undefined, 171 | schema: T 172 | ): 173 | | { success: true; data: z.infer } 174 | | { success: false; error: string; rawData: any | null } { 175 | // First extract text content using existing method 176 | const textContent = this.extractTextFromResponse(result) 177 | 178 | // Then parse and validate as JSON against the schema 179 | return parseAndValidateJsonResponse(textContent, schema) 180 | } 181 | 182 | /** 183 | * Makes a structured OpenRouter API call with JSON schema validation 184 | * 185 | * @param modelName The model to use for the request 186 | * @param messages The messages to send to the model 187 | * @param schema The Zod schema to validate the response against 188 | * @param options Additional options for the API call 189 | * @returns A promise that resolves to the validated data or error information 190 | */ 191 | async callOpenRouterWithSchema( 192 | modelName: string, 193 | messages: Array, 194 | schema: T, 195 | options: { 196 | temperature?: number 197 | max_tokens?: number 198 | } = {}, 199 | isRetry: boolean = false 200 | ): Promise> { 201 | if (!this.openRouter) { 202 | return { 203 | success: false, 204 | error: 'OpenRouter client is not initialized', 205 | rawResponse: null, 206 | } 207 | } 208 | 209 | const currentModel = isRetry ? FALLBACK_OPENROUTER_MODEL : modelName 210 | await logToFile( 211 | `[AIService] Calling OpenRouter model: ${currentModel}${ 212 | isRetry ? ' (Fallback)' : '' 213 | }` 214 | ) 215 | 216 | let response: OpenAI.Chat.Completions.ChatCompletion | null = null 217 | try { 218 | response = await this.openRouter.chat.completions.create({ 219 | model: currentModel, 220 | messages, 221 | temperature: options.temperature ?? 0.7, 222 | max_tokens: options.max_tokens, 223 | response_format: { type: 'json_object' }, 224 | }) 225 | 226 | const openRouterError = (response as any)?.error 227 | let responseBodyRateLimitDetected = false 228 | 229 | if (openRouterError) { 230 | await logToFile( 231 | `[AIService] OpenRouter response contains error object: ${JSON.stringify( 232 | openRouterError 233 | )}` 234 | ) 235 | if ( 236 | openRouterError.code === 429 || 237 | openRouterError.status === 'RESOURCE_EXHAUSTED' || 238 | (typeof openRouterError.message === 'string' && 239 | openRouterError.message.includes('quota')) 240 | ) { 241 | responseBodyRateLimitDetected = true 242 | } 243 | } 244 | 245 | if (responseBodyRateLimitDetected && !isRetry) { 246 | await logToFile( 247 | `[AIService] Rate limit (429) detected in response body for ${currentModel}. Retrying with fallback ${FALLBACK_OPENROUTER_MODEL}...` 248 | ) 249 | return this.callOpenRouterWithSchema( 250 | modelName, 251 | messages, 252 | schema, 253 | options, 254 | true 255 | ) 256 | } 257 | 258 | const textContent = this.extractTextFromResponse(response) 259 | const validationResult = parseAndValidateJsonResponse(textContent, schema) 260 | 261 | if (openRouterError && !validationResult.success) { 262 | await logToFile( 263 | `[AIService] Non-retryable error detected in response body for ${currentModel}.` 264 | ) 265 | return { 266 | success: false, 267 | error: `API response contained error: ${ 268 | openRouterError.message || 'Unknown error' 269 | }`, 270 | rawResponse: response, 271 | } 272 | } 273 | 274 | if (validationResult.success) { 275 | return { 276 | success: true, 277 | data: validationResult.data, 278 | rawResponse: response, 279 | } 280 | } else { 281 | await logToFile( 282 | `[AIService] Schema validation failed for ${currentModel}: ${ 283 | validationResult.error 284 | }. Raw data: ${JSON.stringify(validationResult.rawData)?.substring( 285 | 0, 286 | 200 287 | )}` 288 | ) 289 | const errorMessage = openRouterError?.message 290 | ? `API response contained error: ${openRouterError.message}` 291 | : validationResult.error 292 | return { 293 | success: false, 294 | error: errorMessage, 295 | rawResponse: response, 296 | } 297 | } 298 | } catch (error: any) { 299 | await logToFile( 300 | `[AIService] API call failed for ${currentModel}. Error: ${ 301 | error.message 302 | }, Status: ${error.status || 'unknown'}` 303 | ) 304 | 305 | let isRateLimitError = false 306 | if (error instanceof OpenAIError && (error as any).status === 429) { 307 | isRateLimitError = true 308 | } else if (error.status === 429) { 309 | isRateLimitError = true 310 | } 311 | 312 | if (isRateLimitError && !isRetry) { 313 | await logToFile( 314 | `[AIService] Rate limit hit (thrown error ${ 315 | error.status || 429 316 | }) for ${currentModel}. Retrying with fallback ${FALLBACK_OPENROUTER_MODEL}...` 317 | ) 318 | return this.callOpenRouterWithSchema( 319 | FALLBACK_OPENROUTER_MODEL, 320 | messages, 321 | schema, 322 | options, 323 | true 324 | ) 325 | } 326 | 327 | const rawErrorResponse = error?.response 328 | return { 329 | success: false, 330 | error: `API call failed: ${error.message}`, 331 | rawResponse: rawErrorResponse || null, 332 | } 333 | } 334 | } 335 | 336 | /** 337 | * Makes a structured Gemini API call with JSON schema validation. 338 | * Note: Gemini currently has limited built-in JSON schema support, 339 | * so we use prompt engineering to get structured output. 340 | * 341 | * @param modelName The model to use for the request 342 | * @param prompt The prompt to send to the model 343 | * @param schema The Zod schema to validate the response against 344 | * @param options Additional options for the API call 345 | * @returns A promise that resolves to the validated data or error information 346 | */ 347 | async callGeminiWithSchema( 348 | modelName: string, 349 | prompt: string, 350 | schema: T, 351 | options: { 352 | temperature?: number 353 | maxOutputTokens?: number 354 | } = {}, 355 | isRetry: boolean = false 356 | ): Promise< 357 | | { success: true; data: z.infer; rawResponse: GenerateContentResult } 358 | | { 359 | success: false 360 | error: string 361 | rawResponse?: GenerateContentResult | null 362 | } 363 | > { 364 | if (!this.genAI) { 365 | return { 366 | success: false, 367 | error: 'Gemini client is not initialized', 368 | rawResponse: null, 369 | } 370 | } 371 | 372 | const currentModelName = isRetry ? FALLBACK_GEMINI_MODEL : modelName 373 | await logToFile( 374 | `[AIService] Calling Gemini model: ${currentModelName}${ 375 | isRetry ? ' (Fallback)' : '' 376 | }` 377 | ) 378 | 379 | const schemaDescription = this.createSchemaDescription(schema) 380 | const enhancedPrompt = `${prompt}\n\nYour response must be a valid JSON object with the following structure:\n${schemaDescription}\n\nEnsure your response is valid JSON with no markdown formatting or additional text.` 381 | 382 | try { 383 | const model = this.genAI.getGenerativeModel({ model: currentModelName }) 384 | const response = await model.generateContent({ 385 | contents: [{ role: 'user', parts: [{ text: enhancedPrompt }] }], 386 | generationConfig: { 387 | temperature: options.temperature ?? 0.7, 388 | maxOutputTokens: options.maxOutputTokens, 389 | }, 390 | safetySettings, 391 | }) 392 | 393 | const textContent = this.extractTextFromResponse(response) 394 | const validationResult = parseAndValidateJsonResponse(textContent, schema) 395 | 396 | if (validationResult.success) { 397 | return { 398 | success: true, 399 | data: validationResult.data, 400 | rawResponse: response, 401 | } 402 | } else { 403 | await logToFile( 404 | `[AIService] Schema validation failed for ${currentModelName}: ${ 405 | validationResult.error 406 | }. Raw data: ${JSON.stringify(validationResult.rawData)?.substring( 407 | 0, 408 | 200 409 | )}` 410 | ) 411 | return { 412 | success: false, 413 | error: validationResult.error, 414 | rawResponse: response, 415 | } 416 | } 417 | } catch (error: any) { 418 | await logToFile( 419 | `[AIService] API call failed for ${currentModelName}. Error: ${error.message}` 420 | ) 421 | 422 | let isRateLimitError = false 423 | if ( 424 | error instanceof GoogleGenerativeAIError && 425 | error.message.includes('RESOURCE_EXHAUSTED') 426 | ) { 427 | isRateLimitError = true 428 | } else if (error.status === 429) { 429 | isRateLimitError = true 430 | } 431 | 432 | if (isRateLimitError && !isRetry) { 433 | await logToFile( 434 | `[AIService] Rate limit hit for ${currentModelName}. Retrying with fallback model ${FALLBACK_GEMINI_MODEL}...` 435 | ) 436 | return this.callGeminiWithSchema( 437 | FALLBACK_GEMINI_MODEL, 438 | prompt, 439 | schema, 440 | options, 441 | true 442 | ) 443 | } 444 | 445 | return { 446 | success: false, 447 | error: `API call failed: ${error.message}`, 448 | rawResponse: null, 449 | } 450 | } 451 | } 452 | 453 | /** 454 | * Creates a human-readable description of a Zod schema for prompt engineering 455 | */ 456 | private createSchemaDescription(schema: z.ZodType): string { 457 | // Use the schema describe functionality to extract metadata 458 | const description = schema._def.description ?? 'JSON object' 459 | 460 | // For object schemas, extract shape information 461 | if (schema instanceof z.ZodObject) { 462 | const shape = schema._def.shape() 463 | const fields = Object.entries(shape).map(([key, field]) => { 464 | const fieldType = this.getZodTypeDescription(field as z.ZodType) 465 | const fieldDesc = (field as z.ZodType)._def.description || '' 466 | return ` "${key}": ${fieldType}${fieldDesc ? ` // ${fieldDesc}` : ''}` 467 | }) 468 | 469 | return `{\n${fields.join(',\n')}\n}` 470 | } 471 | 472 | // For array schemas 473 | if (schema instanceof z.ZodArray) { 474 | const elementType = this.getZodTypeDescription(schema._def.type) 475 | return `[\n ${elementType} // Array of items\n]` 476 | } 477 | 478 | // For other types 479 | return description 480 | } 481 | 482 | /** 483 | * Gets a simple description of a Zod type for schema representation 484 | */ 485 | private getZodTypeDescription(schema: z.ZodType): string { 486 | if (schema instanceof z.ZodString) return '"string"' 487 | if (schema instanceof z.ZodNumber) return 'number' 488 | if (schema instanceof z.ZodBoolean) return 'boolean' 489 | if (schema instanceof z.ZodArray) { 490 | const elementType = this.getZodTypeDescription(schema._def.type) 491 | return `[${elementType}]` 492 | } 493 | if (schema instanceof z.ZodObject) { 494 | const shape = schema._def.shape() 495 | const fields = Object.entries(shape).map(([key]) => `"${key}"`) 496 | return `{ ${fields.join(', ')} }` 497 | } 498 | if (schema instanceof z.ZodEnum) { 499 | const values = schema._def.values.map((v: string) => `"${v}"`) 500 | return `one of: ${values.join(' | ')}` 501 | } 502 | 503 | return 'any' 504 | } 505 | 506 | /** 507 | * Checks if the service is properly initialized 508 | */ 509 | isInitialized(): boolean { 510 | return this.initialized && (!!this.openRouter || !!this.planningModel) 511 | } 512 | } 513 | 514 | // Export a singleton instance 515 | export const aiService = new AIService() 516 | -------------------------------------------------------------------------------- /src/services/databaseService.ts: -------------------------------------------------------------------------------- 1 | import sqlite3 from 'sqlite3' 2 | import fs from 'fs' 3 | import path from 'path' 4 | import { promisify } from 'util' 5 | import { SQLITE_DB_PATH } from '../config' 6 | import logger from '../lib/winstonLogger' 7 | 8 | // Define Task type for database operations 9 | interface Task { 10 | id: string 11 | title?: string 12 | description?: string 13 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed' 14 | completed: boolean 15 | effort?: 'low' | 'medium' | 'high' 16 | feature_id?: string 17 | parent_task_id?: string 18 | created_at: number 19 | updated_at: number 20 | fromReview?: boolean 21 | } 22 | 23 | // Define interface for task updates 24 | interface TaskUpdate { 25 | title?: string 26 | description?: string 27 | effort?: 'low' | 'medium' | 'high' 28 | parent_task_id?: string 29 | fromReview?: boolean 30 | } 31 | 32 | // Define History Entry type for database operations 33 | export interface HistoryEntry { 34 | id?: number 35 | timestamp: number 36 | role: 'user' | 'model' | 'tool_call' | 'tool_response' 37 | content: string 38 | feature_id: string 39 | task_id?: string 40 | action?: string 41 | details?: string 42 | } 43 | 44 | class DatabaseService { 45 | private db: sqlite3.Database | null = null 46 | private dbPath: string 47 | 48 | constructor(dbPath: string = SQLITE_DB_PATH) { 49 | this.dbPath = dbPath 50 | try { 51 | this.ensureDatabaseDirectory() 52 | } catch (error: any) { 53 | console.error( 54 | `[DatabaseService] CRITICAL: Failed to ensure database directory exists at ${path.dirname( 55 | this.dbPath 56 | )}: ${error.message}` 57 | ) 58 | } 59 | } 60 | 61 | private ensureDatabaseDirectory(): void { 62 | const dbDir = path.dirname(this.dbPath) 63 | if (!fs.existsSync(dbDir)) { 64 | console.log(`[DatabaseService] Creating database directory: ${dbDir}`) 65 | fs.mkdirSync(dbDir, { recursive: true }) 66 | } 67 | } 68 | 69 | async connect(): Promise { 70 | if (this.db) { 71 | logger.debug('[DatabaseService] Already connected.') 72 | return Promise.resolve() 73 | } 74 | logger.debug(`[DatabaseService] Connecting to database at: ${this.dbPath}`) 75 | return new Promise((resolve, reject) => { 76 | const verboseDb = new (sqlite3.verbose().Database)(this.dbPath, (err) => { 77 | if (err) { 78 | logger.error(`Error connecting to SQLite database: ${err.message}`, { 79 | stack: err.stack, 80 | }) 81 | reject( 82 | new Error(`Error connecting to SQLite database: ${err.message}`) 83 | ) 84 | return 85 | } 86 | this.db = verboseDb 87 | logger.debug('[DatabaseService] Database connection successful.') 88 | resolve() 89 | }) 90 | }) 91 | } 92 | 93 | async close(): Promise { 94 | logger.debug('[DatabaseService] Attempting to close database connection.') 95 | return new Promise((resolve, reject) => { 96 | if (!this.db) { 97 | logger.debug('[DatabaseService] No active connection to close.') 98 | resolve() 99 | return 100 | } 101 | this.db.close((err) => { 102 | if (err) { 103 | logger.error(`Error closing SQLite database: ${err.message}`, { 104 | stack: err.stack, 105 | }) 106 | reject(new Error(`Error closing SQLite database: ${err.message}`)) 107 | return 108 | } 109 | this.db = null 110 | logger.debug( 111 | '[DatabaseService] Database connection closed successfully.' 112 | ) 113 | resolve() 114 | }) 115 | }) 116 | } 117 | 118 | public async runAsync( 119 | sql: string, 120 | params: any[] = [] 121 | ): Promise { 122 | if (!this.db) { 123 | logger.error( 124 | '[DatabaseService] runAsync called but database is not connected.' 125 | ) 126 | throw new Error('Database is not connected') 127 | } 128 | return new Promise((resolve, reject) => { 129 | this.db!.run(sql, params, function (err) { 130 | if (err) { 131 | logger.error( 132 | `Error executing SQL: ${sql} - Params: ${JSON.stringify( 133 | params 134 | )} - Error: ${err.message}`, 135 | { stack: err.stack } 136 | ) 137 | reject(new Error(`Error executing SQL: ${err.message}`)) 138 | } else { 139 | resolve(this) 140 | } 141 | }) 142 | }) 143 | } 144 | 145 | private async runSchemaFromFile(): Promise { 146 | const schemaPath = path.join(__dirname, '..', 'config', 'schema.sql') 147 | logger.info(`Attempting to run schema from: ${schemaPath}`) 148 | if (!fs.existsSync(schemaPath)) { 149 | logger.error(`Schema file not found at ${schemaPath}`) 150 | throw new Error(`Schema file not found at ${schemaPath}`) 151 | } 152 | logger.info(`Schema file found at ${schemaPath}`) 153 | const schema = fs.readFileSync(schemaPath, 'utf8') 154 | const statements = schema 155 | .split(';') 156 | .map((statement) => statement.trim()) 157 | .filter((statement) => statement.length > 0) 158 | logger.info(`Found ${statements.length} SQL statements in schema file.`) 159 | if (!this.db) { 160 | logger.error('Database is not connected in runSchemaFromFile.') 161 | throw new Error('Database is not connected') 162 | } 163 | try { 164 | logger.info('Starting transaction for schema execution.') 165 | await this.runAsync('BEGIN TRANSACTION;') 166 | for (let i = 0; i < statements.length; i++) { 167 | const statement = statements[i] 168 | logger.debug( 169 | `Executing schema statement #${i + 1}: ${statement.substring( 170 | 0, 171 | 60 172 | )}...` 173 | ) 174 | await this.runAsync(statement) 175 | logger.debug(`Successfully executed statement #${i + 1}`) 176 | } 177 | logger.info('Committing transaction for schema execution.') 178 | await this.runAsync('COMMIT;') 179 | logger.info('Schema execution committed successfully.') 180 | } catch (error: any) { 181 | logger.error( 182 | `Error during schema execution: ${error.message}. Rolling back transaction.`, 183 | { stack: error.stack } 184 | ) 185 | try { 186 | await this.runAsync('ROLLBACK;') 187 | logger.info('Transaction rolled back successfully.') 188 | } catch (rollbackError: any) { 189 | logger.error(`Failed to rollback transaction: ${rollbackError.message}`) 190 | } 191 | throw new Error(`Schema execution failed: ${error.message}`) 192 | } 193 | } 194 | 195 | async tableExists(tableName: string): Promise { 196 | if (!this.db) { 197 | logger.error( 198 | '[DatabaseService] tableExists called but database is not connected.' 199 | ) 200 | throw new Error('Database is not connected') 201 | } 202 | return new Promise((resolve, reject) => { 203 | this.db!.get( 204 | "SELECT name FROM sqlite_master WHERE type='table' AND name=?", 205 | [tableName], 206 | (err, row) => { 207 | if (err) { 208 | logger.error( 209 | `Error checking if table ${tableName} exists: ${err.message}` 210 | ) 211 | reject(err) 212 | } else { 213 | resolve(!!row) 214 | } 215 | } 216 | ) 217 | }) 218 | } 219 | 220 | async initializeDatabase(): Promise { 221 | if (!this.db) { 222 | logger.info( 223 | '[DatabaseService] Connecting DB within initializeDatabase...' 224 | ) 225 | await this.connect() 226 | } else { 227 | logger.debug('[DatabaseService] DB already connected for initialization.') 228 | } 229 | try { 230 | logger.info('[DatabaseService] Checking if tables exist...') 231 | const tablesExist = await this.tableExists('tasks') 232 | logger.info( 233 | `[DatabaseService] 'tasks' table exists check returned: ${tablesExist}` 234 | ) 235 | if (!tablesExist) { 236 | logger.info( 237 | '[DatabaseService] Initializing database schema as tables do not exist...' 238 | ) 239 | await this.runSchemaFromFile() 240 | logger.info( 241 | '[DatabaseService] Database schema initialization complete.' 242 | ) 243 | } else { 244 | logger.info( 245 | '[DatabaseService] Database tables already exist. Skipping schema initialization.' 246 | ) 247 | } 248 | } catch (error: any) { 249 | logger.error(`Error during database initialization: ${error.message}`, { 250 | stack: error.stack, 251 | }) 252 | console.error('Error initializing database:', error) 253 | throw error 254 | } 255 | } 256 | 257 | async runMigrations(): Promise { 258 | if (!this.db) { 259 | throw new Error('Database is not connected') 260 | } 261 | 262 | try { 263 | // Run schema first to create tables if they don't exist 264 | await this.runSchemaFromFile() 265 | 266 | // Run migrations to update existing tables 267 | await this.runMigrationsFromFile() 268 | } catch (error) { 269 | console.error('Error running migrations:', error) 270 | throw error 271 | } 272 | } 273 | 274 | private async runMigrationsFromFile(): Promise { 275 | // Use __dirname to reliably locate the file relative to the compiled JS file 276 | const migrationsPath = path.join( 277 | __dirname, 278 | '..', 279 | 'config', 280 | 'migrations.sql' 281 | ) 282 | console.log( 283 | `[DB Service] Attempting to load migrations from: ${migrationsPath}` 284 | ) // Log path 285 | 286 | if (!fs.existsSync(migrationsPath)) { 287 | console.log( 288 | `[DB Service] Migrations file not found at ${migrationsPath}, skipping migrations.` // Adjusted log level 289 | ) 290 | return 291 | } 292 | console.log( 293 | `[DB Service] Migrations file found at ${migrationsPath}. Reading...` 294 | ) // Log if found 295 | 296 | const migrations = fs.readFileSync(migrationsPath, 'utf8') 297 | const statements = migrations 298 | .split(';') 299 | .map((statement) => statement.trim()) 300 | .filter((statement) => statement.length > 0) 301 | 302 | console.log( 303 | `[DB Service] Executing ${statements.length} statements from migrations.sql...` 304 | ) // Log count 305 | for (const statement of statements) { 306 | try { 307 | console.log( 308 | `[DB Service] Executing migration statement: ${statement.substring( 309 | 0, 310 | 100 311 | )}...` 312 | ) // Log statement (truncated) 313 | await this.runAsync(statement) 314 | } catch (error: any) { 315 | // Only ignore the error if it's specifically about a duplicate column 316 | if (error?.message?.includes('duplicate column name')) { 317 | console.log( 318 | `[DB Service] Migration statement likely already applied (duplicate column): ${statement}` // Adjusted log 319 | ) 320 | } else { 321 | // Re-throw any other error during migration 322 | console.error( 323 | `[DB Service] Migration statement failed: ${statement}`, 324 | error 325 | ) // Adjusted log 326 | throw error 327 | } 328 | } 329 | } 330 | console.log(`[DB Service] Finished executing migration statements.`) // Log completion 331 | } 332 | 333 | async get(sql: string, params: any[] = []): Promise { 334 | if (!this.db) { 335 | throw new Error('Database is not connected') 336 | } 337 | 338 | return new Promise((resolve, reject) => { 339 | this.db!.get(sql, params, (err, row) => { 340 | if (err) { 341 | reject(`Error executing SQL: ${err.message}`) 342 | return 343 | } 344 | resolve(row) 345 | }) 346 | }) 347 | } 348 | 349 | async all(sql: string, params: any[] = []): Promise { 350 | if (!this.db) { 351 | throw new Error('Database is not connected') 352 | } 353 | 354 | return new Promise((resolve, reject) => { 355 | this.db!.all(sql, params, (err, rows) => { 356 | if (err) { 357 | reject(`Error executing SQL: ${err.message}`) 358 | return 359 | } 360 | resolve(rows) 361 | }) 362 | }) 363 | } 364 | 365 | async getTasksByFeatureId(featureId: string): Promise { 366 | if (!this.db) { 367 | throw new Error('Database is not connected') 368 | } 369 | 370 | try { 371 | const rows = await this.all( 372 | `SELECT 373 | id, title, description, status, 374 | completed, effort, feature_id, parent_task_id, 375 | created_at, updated_at, from_review 376 | FROM tasks 377 | WHERE feature_id = ? 378 | ORDER BY created_at ASC`, 379 | [featureId] 380 | ) 381 | 382 | return rows.map((row) => ({ 383 | ...row, 384 | completed: Boolean(row.completed), 385 | fromReview: Boolean(row.from_review), 386 | })) 387 | } catch (error) { 388 | console.error(`Error fetching tasks for feature ${featureId}:`, error) 389 | throw error 390 | } 391 | } 392 | 393 | async getTaskById(taskId: string): Promise { 394 | if (!this.db) { 395 | throw new Error('Database is not connected') 396 | } 397 | 398 | try { 399 | const row = await this.get( 400 | `SELECT 401 | id, title, description, status, 402 | completed, effort, feature_id, parent_task_id, 403 | created_at, updated_at, from_review 404 | FROM tasks 405 | WHERE id = ?`, 406 | [taskId] 407 | ) 408 | 409 | if (!row) { 410 | return null 411 | } 412 | 413 | return { 414 | ...row, 415 | completed: Boolean(row.completed), 416 | fromReview: Boolean(row.from_review), 417 | } 418 | } catch (error) { 419 | console.error(`Error fetching task ${taskId}:`, error) 420 | throw error 421 | } 422 | } 423 | 424 | async addTask(task: Task): Promise { 425 | if (!this.db) { 426 | throw new Error('Database is not connected') 427 | } 428 | 429 | const now = Math.floor(Date.now() / 1000) 430 | const timestamp = task.created_at || now 431 | 432 | try { 433 | await this.runAsync( 434 | `INSERT INTO tasks ( 435 | id, title, description, status, 436 | completed, effort, feature_id, parent_task_id, 437 | created_at, updated_at, from_review 438 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 439 | [ 440 | task.id, 441 | task.title || null, 442 | task.description || null, 443 | task.status, 444 | task.completed ? 1 : 0, 445 | task.effort || null, 446 | task.feature_id || null, 447 | task.parent_task_id || null, 448 | timestamp, 449 | task.updated_at || timestamp, 450 | task.fromReview ? 1 : 0, 451 | ] 452 | ) 453 | 454 | return task.id 455 | } catch (error) { 456 | console.error('Error adding task:', error) 457 | throw error 458 | } 459 | } 460 | 461 | async updateTaskStatus( 462 | taskId: string, 463 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed', 464 | completed?: boolean 465 | ): Promise { 466 | if (!this.db) { 467 | throw new Error('Database is not connected') 468 | } 469 | 470 | const now = Math.floor(Date.now() / 1000) 471 | 472 | try { 473 | let result 474 | 475 | if (completed !== undefined) { 476 | result = await this.runAsync( 477 | `UPDATE tasks 478 | SET status = ?, completed = ?, updated_at = ? 479 | WHERE id = ?`, 480 | [status, completed ? 1 : 0, now, taskId] 481 | ) 482 | } else { 483 | result = await this.runAsync( 484 | `UPDATE tasks 485 | SET status = ?, updated_at = ? 486 | WHERE id = ?`, 487 | [status, now, taskId] 488 | ) 489 | } 490 | 491 | return result.changes > 0 492 | } catch (error) { 493 | console.error(`Error updating status for task ${taskId}:`, error) 494 | throw error 495 | } 496 | } 497 | 498 | async updateTaskDetails( 499 | taskId: string, 500 | updates: TaskUpdate 501 | ): Promise { 502 | if (!this.db) { 503 | throw new Error('Database is not connected') 504 | } 505 | 506 | const now = Math.floor(Date.now() / 1000) 507 | 508 | try { 509 | const task = await this.getTaskById(taskId) 510 | 511 | if (!task) { 512 | return false 513 | } 514 | 515 | const updatedTask = { 516 | ...task, 517 | title: updates.title ?? task.title, 518 | description: updates.description ?? task.description, 519 | effort: updates.effort ?? task.effort, 520 | parent_task_id: updates.parent_task_id ?? task.parent_task_id, 521 | fromReview: 522 | updates.fromReview !== undefined 523 | ? updates.fromReview 524 | : task.fromReview, 525 | updated_at: now, 526 | } 527 | 528 | const result = await this.runAsync( 529 | `UPDATE tasks 530 | SET title = ?, description = ?, effort = ?, parent_task_id = ?, updated_at = ?, from_review = ? 531 | WHERE id = ?`, 532 | [ 533 | updatedTask.title || null, 534 | updatedTask.description || null, 535 | updatedTask.effort || null, 536 | updatedTask.parent_task_id || null, 537 | updatedTask.updated_at, 538 | updatedTask.fromReview ? 1 : 0, 539 | taskId, 540 | ] 541 | ) 542 | 543 | return result.changes > 0 544 | } catch (error) { 545 | console.error(`Error updating details for task ${taskId}:`, error) 546 | throw error 547 | } 548 | } 549 | 550 | async deleteTask(taskId: string): Promise { 551 | if (!this.db) { 552 | throw new Error('Database is not connected') 553 | } 554 | 555 | try { 556 | // Begin transaction 557 | await this.runAsync('BEGIN TRANSACTION') 558 | 559 | try { 560 | // Delete any task relationships first 561 | await this.runAsync( 562 | 'DELETE FROM task_relationships WHERE parent_id = ? OR child_id = ?', 563 | [taskId, taskId] 564 | ) 565 | 566 | // Finally delete the task 567 | const result = await this.runAsync('DELETE FROM tasks WHERE id = ?', [ 568 | taskId, 569 | ]) 570 | 571 | // Commit transaction 572 | await this.runAsync('COMMIT') 573 | 574 | return result.changes > 0 575 | } catch (error) { 576 | // Rollback in case of error 577 | await this.runAsync('ROLLBACK') 578 | throw error 579 | } 580 | } catch (error) { 581 | console.error(`Error deleting task ${taskId}:`, error) 582 | throw error 583 | } 584 | } 585 | 586 | // History Entry Operations 587 | 588 | async getHistoryByFeatureId( 589 | featureId: string, 590 | limit: number = 100 591 | ): Promise { 592 | if (!this.db) { 593 | throw new Error('Database is not connected') 594 | } 595 | 596 | try { 597 | const rows = await this.all( 598 | `SELECT 599 | id, timestamp, role, content, feature_id, 600 | task_id, action, details 601 | FROM history_entries 602 | WHERE feature_id = ? 603 | ORDER BY timestamp DESC 604 | LIMIT ?`, 605 | [featureId, limit] 606 | ) 607 | 608 | return rows.map((row) => ({ 609 | ...row, 610 | content: 611 | typeof row.content === 'string' 612 | ? JSON.parse(row.content) 613 | : row.content, 614 | })) 615 | } catch (error) { 616 | console.error(`Error fetching history for feature ${featureId}:`, error) 617 | throw error 618 | } 619 | } 620 | 621 | async addHistoryEntry(entry: HistoryEntry): Promise { 622 | if (!this.db) { 623 | throw new Error('Database is not connected') 624 | } 625 | 626 | const now = Math.floor(Date.now() / 1000) 627 | const timestamp = entry.timestamp || now 628 | const content = 629 | typeof entry.content === 'object' 630 | ? JSON.stringify(entry.content) 631 | : entry.content 632 | 633 | try { 634 | const result = await this.runAsync( 635 | `INSERT INTO history_entries ( 636 | timestamp, role, content, feature_id, 637 | task_id, action, details 638 | ) VALUES (?, ?, ?, ?, ?, ?, ?)`, 639 | [ 640 | timestamp, 641 | entry.role, 642 | content, 643 | entry.feature_id, 644 | entry.task_id || null, 645 | entry.action || null, 646 | entry.details || null, 647 | ] 648 | ) 649 | 650 | return result.lastID 651 | } catch (error) { 652 | console.error('Error adding history entry:', error) 653 | throw error 654 | } 655 | } 656 | 657 | async deleteHistoryByFeatureId(featureId: string): Promise { 658 | if (!this.db) { 659 | throw new Error('Database is not connected') 660 | } 661 | 662 | try { 663 | const result = await this.runAsync( 664 | 'DELETE FROM history_entries WHERE feature_id = ?', 665 | [featureId] 666 | ) 667 | 668 | return result.changes > 0 669 | } catch (error) { 670 | console.error(`Error deleting history for feature ${featureId}:`, error) 671 | throw error 672 | } 673 | } 674 | 675 | // Feature Management 676 | 677 | /** 678 | * Creates a new feature in the database 679 | * @param id The feature ID 680 | * @param description The feature description 681 | * @param projectPath The project path for the feature 682 | * @returns The created feature 683 | */ 684 | async createFeature( 685 | id: string, 686 | description: string, 687 | projectPath: string 688 | ): Promise<{ id: string; description: string; project_path: string }> { 689 | try { 690 | const now = Math.floor(Date.now() / 1000) 691 | 692 | await this.connect() 693 | 694 | await this.runAsync( 695 | `INSERT INTO features (id, description, project_path, created_at, updated_at) 696 | VALUES (?, ?, ?, ?, ?)`, 697 | [id, description, projectPath, now, now] 698 | ) 699 | 700 | await this.close() 701 | 702 | return { id, description, project_path: projectPath } 703 | } catch (error) { 704 | console.error(`Error creating feature:`, error) 705 | throw error 706 | } 707 | } 708 | 709 | /** 710 | * Gets a feature by ID 711 | * @param featureId The feature ID 712 | * @returns The feature or null if not found 713 | */ 714 | async getFeatureById(featureId: string): Promise<{ 715 | id: string 716 | description: string 717 | project_path: string | null 718 | status: string 719 | } | null> { 720 | try { 721 | const feature = await this.get( 722 | `SELECT id, description, project_path, status 723 | FROM features 724 | WHERE id = ?`, 725 | [featureId] 726 | ) 727 | 728 | return feature || null 729 | } catch (error) { 730 | console.error(`Error fetching feature ${featureId}:`, error) 731 | return null 732 | } 733 | } 734 | } 735 | 736 | export const databaseService = new DatabaseService() 737 | export default DatabaseService 738 | -------------------------------------------------------------------------------- /src/services/planningStateService.ts: -------------------------------------------------------------------------------- 1 | import { IntermediatePlanningState } from '../models/types' 2 | import { logToFile } from '../lib/logger' 3 | import crypto from 'crypto' 4 | import { 5 | addPlanningState, 6 | getPlanningStateByQuestionId, 7 | getPlanningStateByFeatureId, 8 | clearPlanningState, 9 | clearPlanningStatesForFeature, 10 | } from '../lib/dbUtils' 11 | 12 | /** 13 | * Service for managing intermediate planning state when LLM needs clarification 14 | */ 15 | class PlanningStateService { 16 | /** 17 | * Stores intermediate planning state when LLM needs clarification 18 | * 19 | * @param featureId The feature ID being planned 20 | * @param prompt The original prompt that led to the question 21 | * @param partialResponse The LLM's partial response including the question 22 | * @param planningType The type of planning operation (feature planning or adjustment) 23 | * @returns The generated question ID 24 | */ 25 | async storeIntermediateState( 26 | featureId: string, 27 | prompt: string, 28 | partialResponse: string, 29 | planningType: 'feature_planning' | 'plan_adjustment' 30 | ): Promise { 31 | try { 32 | const questionId = await addPlanningState( 33 | featureId, 34 | prompt, 35 | partialResponse, 36 | planningType 37 | ) 38 | 39 | logToFile( 40 | `[PlanningStateService] Stored intermediate state for question ${questionId}, feature ${featureId}` 41 | ) 42 | 43 | return questionId 44 | } catch (error: any) { 45 | logToFile( 46 | `[PlanningStateService] Error storing intermediate state: ${error.message}` 47 | ) 48 | // Generate a questionId even in error case to avoid breaking the flow 49 | return crypto.randomUUID() 50 | } 51 | } 52 | 53 | /** 54 | * Retrieves intermediate planning state by question ID 55 | * 56 | * @param questionId The ID of the clarification question 57 | * @returns The intermediate planning state if found, null otherwise 58 | */ 59 | async getStateByQuestionId( 60 | questionId: string 61 | ): Promise { 62 | try { 63 | if (!questionId) { 64 | logToFile( 65 | `[PlanningStateService] Cannot retrieve state with empty questionId` 66 | ) 67 | return null 68 | } 69 | 70 | const state = await getPlanningStateByQuestionId(questionId) 71 | 72 | if (!state) { 73 | logToFile( 74 | `[PlanningStateService] No intermediate state found for question ${questionId}` 75 | ) 76 | return null 77 | } 78 | 79 | // Map the database planning state to IntermediatePlanningState 80 | const intermediateState: IntermediatePlanningState = { 81 | questionId: state.questionId, 82 | featureId: state.featureId, 83 | prompt: state.prompt, 84 | partialResponse: state.partialResponse, 85 | planningType: state.planningType, 86 | } 87 | 88 | logToFile( 89 | `[PlanningStateService] Retrieved intermediate state for question ${questionId}, feature ${state.featureId}` 90 | ) 91 | 92 | return intermediateState 93 | } catch (error: any) { 94 | logToFile( 95 | `[PlanningStateService] Error retrieving state for question ${questionId}: ${error.message}` 96 | ) 97 | return null 98 | } 99 | } 100 | 101 | /** 102 | * Retrieves intermediate planning state by feature ID 103 | * 104 | * @param featureId The feature ID 105 | * @returns The intermediate planning state if found, null otherwise 106 | */ 107 | async getStateByFeatureId( 108 | featureId: string 109 | ): Promise { 110 | try { 111 | if (!featureId) { 112 | logToFile( 113 | `[PlanningStateService] Cannot retrieve state with empty featureId` 114 | ) 115 | return null 116 | } 117 | 118 | const state = await getPlanningStateByFeatureId(featureId) 119 | 120 | if (!state) { 121 | logToFile( 122 | `[PlanningStateService] No intermediate state found for feature ${featureId}` 123 | ) 124 | return null 125 | } 126 | 127 | // Map the database planning state to IntermediatePlanningState 128 | const intermediateState: IntermediatePlanningState = { 129 | questionId: state.questionId, 130 | featureId: state.featureId, 131 | prompt: state.prompt, 132 | partialResponse: state.partialResponse, 133 | planningType: state.planningType, 134 | } 135 | 136 | logToFile( 137 | `[PlanningStateService] Retrieved intermediate state for feature ${featureId}` 138 | ) 139 | 140 | return intermediateState 141 | } catch (error: any) { 142 | logToFile( 143 | `[PlanningStateService] Error retrieving state for feature ${featureId}: ${error.message}` 144 | ) 145 | return null 146 | } 147 | } 148 | 149 | /** 150 | * Clears intermediate planning state after it's no longer needed 151 | * 152 | * @param questionId The ID of the clarification question 153 | * @returns True if the state was cleared, false if not found 154 | */ 155 | async clearState(questionId: string): Promise { 156 | try { 157 | if (!questionId) { 158 | logToFile( 159 | `[PlanningStateService] Cannot clear state with empty questionId` 160 | ) 161 | return false 162 | } 163 | 164 | // Get the state first to log the feature ID 165 | const state = await this.getStateByQuestionId(questionId) 166 | 167 | if (!state) { 168 | logToFile( 169 | `[PlanningStateService] No intermediate state to clear for question ${questionId}` 170 | ) 171 | return false 172 | } 173 | 174 | const cleared = await clearPlanningState(questionId) 175 | 176 | if (cleared) { 177 | logToFile( 178 | `[PlanningStateService] Cleared intermediate state for question ${questionId}, feature ${state.featureId}` 179 | ) 180 | return true 181 | } 182 | 183 | return false 184 | } catch (error: any) { 185 | logToFile( 186 | `[PlanningStateService] Error clearing state for question ${questionId}: ${error.message}` 187 | ) 188 | return false 189 | } 190 | } 191 | 192 | /** 193 | * Clears all states for a specific feature 194 | * 195 | * @param featureId The feature ID to clear states for 196 | * @returns Number of states cleared 197 | */ 198 | async clearStatesForFeature(featureId: string): Promise { 199 | try { 200 | if (!featureId) { 201 | logToFile( 202 | `[PlanningStateService] Cannot clear states with empty featureId` 203 | ) 204 | return 0 205 | } 206 | 207 | const count = await clearPlanningStatesForFeature(featureId) 208 | 209 | logToFile( 210 | `[PlanningStateService] Cleared ${count} intermediate states for feature ${featureId}` 211 | ) 212 | 213 | return count 214 | } catch (error: any) { 215 | logToFile( 216 | `[PlanningStateService] Error clearing states for feature ${featureId}: ${error.message}` 217 | ) 218 | return 0 219 | } 220 | } 221 | } 222 | 223 | // Singleton instance 224 | const planningStateService = new PlanningStateService() 225 | export default planningStateService 226 | -------------------------------------------------------------------------------- /src/tools/adjustPlan.ts: -------------------------------------------------------------------------------- 1 | import { z } from 'zod' 2 | import { 3 | AdjustPlanInputSchema, 4 | HistoryEntry, 5 | PlanFeatureResponseSchema, 6 | Task, 7 | TaskListSchema, 8 | } from '../models/types' // Assuming types.ts is in ../models 9 | import { addHistoryEntry } from '../lib/dbUtils' // Use the new dbUtils instead of fsUtils 10 | import { aiService } from '../services/aiService' // Import aiService 11 | import webSocketService from '../services/webSocketService' // Import the service instance 12 | import { OPENROUTER_MODEL } from '../config' // Assuming model config is here 13 | import { 14 | ensureEffortRatings, 15 | processAndBreakdownTasks, 16 | detectClarificationRequest, 17 | processAndFinalizePlan, 18 | } from '../lib/llmUtils' // Import the refactored utils 19 | import { GenerativeModel } from '@google/generative-ai' // Import types for model 20 | import OpenAI from 'openai' // Import OpenAI 21 | import planningStateService from '../services/planningStateService' 22 | import { databaseService } from '../services/databaseService' 23 | 24 | // Placeholder for the actual prompt construction logic 25 | async function constructAdjustmentPrompt( 26 | originalRequest: string, // Need to retrieve this 27 | currentTasks: any[], // Type according to TaskListSchema 28 | history: any[], // Type according to FeatureHistorySchema 29 | adjustmentRequest: string 30 | ): Promise { 31 | // TODO: Implement detailed prompt engineering here 32 | // Include original request, current task list, relevant history, and the adjustment request 33 | // Provide clear instructions for the LLM to output a revised task list in the correct format. 34 | console.log('Constructing adjustment prompt...') 35 | const prompt = ` 36 | Original Feature Request: 37 | ${originalRequest} 38 | 39 | Current Task List: 40 | ${JSON.stringify(currentTasks, null, 2)} 41 | 42 | Relevant Conversation History: 43 | ${JSON.stringify(history.slice(-5), null, 2)} // Example: last 5 entries 44 | 45 | User Adjustment Request: 46 | ${adjustmentRequest} 47 | 48 | Instructions: 49 | Review the original request, current tasks, history, and the user's adjustment request. 50 | Output a *revised* and *complete* task list based on the adjustment request. 51 | The revised list should incorporate the requested changes (additions, removals, modifications, reordering). 52 | Maintain the same JSON format as the 'Current Task List' shown above. 53 | Ensure all tasks have necessary fields (id, description, status, effort, etc.). If IDs need regeneration, use UUID format. Preserve existing IDs where possible for unmodified tasks. 54 | Output *only* the JSON object containing the revised task list under the key 'tasks', like this: { "tasks": [...] }. 55 | 56 | IF YOU NEED CLARIFICATION BEFORE YOU CAN PROPERLY ADJUST THE PLAN: 57 | 1. Instead of returning a task list, use the following format to ask for clarification: 58 | [CLARIFICATION_NEEDED] 59 | Your specific question here. Be precise about what information you need to proceed. 60 | Options: [Option A, Option B, Option C] (include this line only if providing multiple-choice options) 61 | MULTIPLE_CHOICE_ONLY (include this if only the listed options are valid, omit if free text is also acceptable) 62 | [END_CLARIFICATION] 63 | 64 | For example: 65 | [CLARIFICATION_NEEDED] 66 | Should the authentication system use JWT or session-based authentication? 67 | Options: [JWT, Session Cookies, OAuth2] 68 | [END_CLARIFICATION] 69 | ` 70 | return prompt 71 | } 72 | 73 | // Updated to use refactored task processing logic 74 | async function parseAndProcessLLMResponse( 75 | llmResult: 76 | | { success: true; data: z.infer } 77 | | { success: false; error: string }, 78 | featureId: string, 79 | model: GenerativeModel | OpenAI | null // Pass the model instance 80 | ): Promise { 81 | console.log('Processing LLM response using refactored logic...') 82 | if (llmResult.success) { 83 | // Check if tasks exist before accessing 84 | if (!llmResult.data.tasks) { 85 | console.error( 86 | '[TaskServer] Error: parseAndProcessLLMResponse called but response contained clarificationNeeded instead of tasks.' 87 | ) 88 | // Should not happen if adjustPlanHandler checks for clarification first, but handle defensively 89 | throw new Error( 90 | 'parseAndProcessLLMResponse received clarification request, expected tasks.' 91 | ) 92 | } 93 | // 1. Map LLM output to "[effort] description" strings 94 | const rawPlanSteps = llmResult.data.tasks.map( 95 | (task) => `[${task.effort}] ${task.description}` 96 | ) 97 | 98 | // 2. Call the centralized function to process, finalize, save, and notify 99 | const finalTasks = await processAndFinalizePlan( 100 | rawPlanSteps, 101 | model, 102 | featureId 103 | ) 104 | 105 | // Validation is handled inside processAndFinalizePlan, but we double-check the final output count 106 | if (finalTasks.length === 0 && rawPlanSteps.length > 0) { 107 | console.warn( 108 | '[TaskServer] Warning: LLM provided tasks, but processing resulted in an empty list.' 109 | ) 110 | // Potentially throw an error or return empty based on desired behavior 111 | } 112 | 113 | console.log(`Processed LLM response into ${finalTasks.length} final tasks.`) 114 | return finalTasks 115 | } else { 116 | console.error('LLM call failed:', llmResult.error) 117 | throw new Error(`LLM failed to generate revised plan: ${llmResult.error}`) 118 | } 119 | } 120 | 121 | // The main handler function for the adjust_plan tool 122 | export async function adjustPlanHandler( 123 | input: z.infer 124 | ): Promise<{ status: string; message: string; tasks?: Task[] }> { 125 | const { featureId, adjustment_request } = input 126 | 127 | try { 128 | console.log(`Adjusting plan for feature ${featureId}`) 129 | 130 | // Get the planning model instance 131 | const planningModel = aiService.getPlanningModel() // Need the model instance 132 | if (!planningModel) { 133 | throw new Error('Planning model not available.') 134 | } 135 | 136 | // 1. Load current tasks and history 137 | await databaseService.connect() 138 | const currentTasks = await databaseService.getTasksByFeatureId(featureId) 139 | const history = await databaseService.getHistoryByFeatureId(featureId) 140 | await databaseService.close() 141 | 142 | // TODO: Retrieve the original feature request. This might need to be stored 143 | // alongside tasks or history, or retrieved from the initial history entry. 144 | const originalFeatureRequest = 145 | history.find( 146 | (entry) => 147 | entry.role === 'user' && 148 | typeof entry.content === 'string' && 149 | entry.content.startsWith('Feature Request:') 150 | )?.content || 'Original request not found' 151 | 152 | // 2. Construct the prompt for the LLM 153 | const prompt = await constructAdjustmentPrompt( 154 | originalFeatureRequest, 155 | currentTasks, 156 | history, 157 | adjustment_request 158 | ) 159 | 160 | // 3. Call the LLM using aiService with schema 161 | console.log('Calling LLM for plan adjustment via aiService...') 162 | const llmResult = await aiService.callOpenRouterWithSchema( 163 | OPENROUTER_MODEL, // Or choose GEMINI_MODEL 164 | [{ role: 'user', content: prompt }], 165 | PlanFeatureResponseSchema, // Expecting this structure back 166 | { temperature: 0.3 } // Adjust parameters as needed 167 | ) 168 | 169 | // Check for clarification requests in the LLM response 170 | if (llmResult.rawResponse) { 171 | const textContent = aiService.extractTextFromResponse( 172 | llmResult.rawResponse 173 | ) 174 | if (textContent) { 175 | const clarificationCheck = detectClarificationRequest(textContent) 176 | 177 | if (clarificationCheck.detected) { 178 | // Store the intermediate state 179 | const questionId = await planningStateService.storeIntermediateState( 180 | featureId, 181 | prompt, 182 | clarificationCheck.rawResponse, 183 | 'plan_adjustment' 184 | ) 185 | 186 | // Send WebSocket message to UI asking for clarification 187 | webSocketService.broadcast({ 188 | type: 'show_question', 189 | featureId, 190 | payload: { 191 | questionId, 192 | question: clarificationCheck.clarificationRequest.question, 193 | options: clarificationCheck.clarificationRequest.options, 194 | allowsText: clarificationCheck.clarificationRequest.allowsText, 195 | }, 196 | }) 197 | 198 | // Record in history 199 | await addHistoryEntry(featureId, 'tool_response', { 200 | tool: 'adjust_plan', 201 | status: 'awaiting_clarification', 202 | questionId, 203 | }) 204 | 205 | return { 206 | status: 'awaiting_clarification', 207 | message: `Plan adjustment paused for feature ${featureId}. User clarification needed via UI. Once submitted, call 'get_next_task' with featureId '${featureId}' to retrieve the first task.`, 208 | } 209 | } 210 | } 211 | } 212 | 213 | // 4. Process the LLM response (this now handles finalization, saving, notification) 214 | const revisedTasks = await parseAndProcessLLMResponse( 215 | llmResult, 216 | featureId, 217 | planningModel 218 | ) 219 | 220 | // 5. Add history entries (saving and notification are handled within parseAndProcessLLMResponse -> processAndFinalizePlan) 221 | await addHistoryEntry( 222 | featureId, 223 | 'tool_call', 224 | `Adjust plan request: ${adjustment_request}` 225 | ) 226 | await addHistoryEntry(featureId, 'tool_response', { 227 | tool: 'adjust_plan', 228 | status: 'completed', 229 | taskCount: revisedTasks.length, 230 | }) 231 | 232 | // 6. Return confirmation 233 | return { 234 | status: 'success', 235 | message: `Successfully adjusted the plan for feature ${featureId}.`, 236 | tasks: revisedTasks, 237 | } 238 | } catch (error: any) { 239 | console.error(`Error adjusting plan for feature ${featureId}:`, error) 240 | // Broadcast error using the service 241 | webSocketService.broadcast({ 242 | type: 'error', 243 | featureId: featureId, 244 | payload: { code: 'PLAN_ADJUST_FAILED', message: error.message }, 245 | }) 246 | // Add history entry, but handle potential errors during logging itself 247 | try { 248 | await addHistoryEntry(featureId, 'tool_response', { 249 | tool: 'adjust_plan', 250 | status: 'failed', 251 | error: error.message, 252 | }) 253 | } catch (historyError) { 254 | console.error( 255 | `[TaskServer] Failed to add error history entry during adjustPlan failure: ${historyError}` 256 | ) 257 | } 258 | return { 259 | status: 'error', 260 | message: `Error adjusting plan: ${error.message}`, 261 | } 262 | } 263 | } 264 | 265 | // Example usage (for testing purposes) 266 | /* 267 | async function testAdjustPlan() { 268 | const testInput = { 269 | featureId: 'your-test-feature-id', // Replace with a valid UUID from your data 270 | adjustment_request: 'Please add a new task for setting up logging after the initial setup task, and remove the task about documentation.', 271 | }; 272 | 273 | // Ensure you have dummy files like 'your-test-feature-id_mcp_tasks.json' 274 | // and 'your-test-feature-id_mcp_history.json' in your data directory. 275 | 276 | try { 277 | const result = await adjustPlanHandler(testInput); 278 | console.log('Adjustment Result:', result); 279 | } catch (error) { 280 | console.error('Adjustment Test Failed:', error); 281 | } 282 | } 283 | 284 | // testAdjustPlan(); // Uncomment to run test 285 | */ 286 | -------------------------------------------------------------------------------- /src/tools/markTaskComplete.ts: -------------------------------------------------------------------------------- 1 | import { Task } from '../models/types' 2 | import { logToFile } from '../lib/logger' 3 | import webSocketService from '../services/webSocketService' 4 | import { databaseService } from '../services/databaseService' 5 | import { addHistoryEntry, getProjectPathForFeature } from '../lib/dbUtils' 6 | import { AUTO_REVIEW_ON_COMPLETION } from '../config' 7 | import { handleReviewChanges } from '../tools/reviewChanges' 8 | import fs from 'fs/promises' 9 | import path from 'path' 10 | 11 | interface MarkTaskCompleteParams { 12 | task_id: string 13 | feature_id: string 14 | } 15 | 16 | interface MarkTaskCompleteResult { 17 | content: Array<{ type: string; text: string }> 18 | isError?: boolean 19 | } 20 | 21 | /** 22 | * Maps database task objects (with snake_case properties) to application Task objects (with camelCase) 23 | */ 24 | function mapDatabaseTaskToAppTask(dbTask: any): Task { 25 | return { 26 | ...dbTask, 27 | feature_id: dbTask.feature_id, 28 | parentTaskId: dbTask.parent_task_id, 29 | } 30 | } 31 | 32 | /** 33 | * Handles the mark_task_complete tool request and returns the next task 34 | */ 35 | export async function handleMarkTaskComplete( 36 | params: MarkTaskCompleteParams 37 | ): Promise { 38 | const { task_id, feature_id } = params 39 | let message: string = '' 40 | let isError = false 41 | let finalTasks: Task[] = [] // Hold the final state of tasks for reporting 42 | let taskStatusUpdate: any = { isError: false, status: 'unknown' } 43 | 44 | await logToFile( 45 | `[TaskServer] Handling mark_task_complete request for ID: ${task_id} in feature: ${feature_id}` 46 | ) 47 | 48 | // Record initial tool call attempt 49 | try { 50 | await addHistoryEntry(feature_id, 'tool_call', { 51 | tool: 'mark_task_complete', 52 | params: { task_id, feature_id }, 53 | }) 54 | } catch (historyError) { 55 | console.error( 56 | `[TaskServer] Failed to add initial history entry: ${historyError}` 57 | ) 58 | // Potentially return error here if initial logging is critical 59 | // For now, we log and continue 60 | } 61 | 62 | try { 63 | // --- Database Operations Block --- 64 | await databaseService.connect() 65 | try { 66 | const dbTasks = await databaseService.getTasksByFeatureId(feature_id) 67 | const tasks = dbTasks.map(mapDatabaseTaskToAppTask) 68 | finalTasks = [...tasks] // Initialize finalTasks with current state 69 | 70 | if (tasks.length === 0) { 71 | message = `Error: No tasks found for feature ID ${feature_id}.` 72 | isError = true 73 | taskStatusUpdate = { isError: true, status: 'feature_not_found' } 74 | // No further DB ops needed, exit the inner try block 75 | } else { 76 | const taskIndex = tasks.findIndex((task) => task.id === task_id) 77 | if (taskIndex === -1) { 78 | message = `Error: Task with ID ${task_id} not found in feature ${feature_id}.` 79 | isError = true 80 | taskStatusUpdate = { isError: true, status: 'task_not_found' } 81 | } else { 82 | const taskToUpdate = tasks[taskIndex] 83 | if (taskToUpdate.status === 'completed') { 84 | message = `Task ${task_id} was already marked as complete.` 85 | isError = false // Not an error, just informational 86 | taskStatusUpdate = { 87 | isError: false, 88 | status: 'already_completed', 89 | taskId: task_id, 90 | } 91 | // No DB update needed, but update finalTasks for consistency 92 | finalTasks = [...tasks] 93 | } else { 94 | // Mark the task as completed locally first for checks 95 | finalTasks = tasks.map((task) => 96 | task.id === task_id 97 | ? { ...task, status: 'completed' as const } 98 | : task 99 | ) 100 | 101 | // Perform the actual database update for the main task 102 | await databaseService.updateTaskStatus(task_id, 'completed', true) 103 | message = `Task ${task_id} marked as complete.` 104 | taskStatusUpdate = { 105 | isError: false, 106 | status: 'completed', 107 | taskId: task_id, 108 | } 109 | logToFile( 110 | `[TaskServer] Task ${task_id} DB status updated to completed.` 111 | ) 112 | 113 | // Check for parent task completion 114 | if (taskToUpdate.parentTaskId) { 115 | const parentId = taskToUpdate.parentTaskId 116 | const siblingTasks = finalTasks.filter( 117 | (t) => t.parentTaskId === parentId && t.id !== task_id // Exclude current task if needed, already marked completed 118 | ) 119 | const allSubtasksComplete = siblingTasks.every( 120 | (st) => st.status === 'completed' 121 | ) 122 | 123 | if (allSubtasksComplete) { 124 | logToFile( 125 | `[TaskServer] All subtasks for parent ${parentId} complete. Updating parent.` 126 | ) 127 | await databaseService.updateTaskStatus( 128 | parentId, 129 | 'decomposed', 130 | false 131 | ) 132 | // Update parent status in our finalTasks list as well 133 | finalTasks = finalTasks.map((task) => 134 | task.id === parentId 135 | ? { ...task, status: 'decomposed' as const } 136 | : task 137 | ) 138 | message += ` Parent task ${parentId} status updated as all subtasks are now complete.` 139 | taskStatusUpdate = { 140 | isError: false, 141 | status: 'completed_with_parent_decomposed', 142 | taskId: task_id, 143 | parentTaskId: parentId, 144 | } 145 | logToFile( 146 | `[TaskServer] Parent task ${parentId} DB status updated to decomposed.` 147 | ) 148 | } 149 | } 150 | 151 | // Fetch final state *after* all updates 152 | const dbFinalState = await databaseService.getTasksByFeatureId( 153 | feature_id 154 | ) 155 | finalTasks = dbFinalState.map(mapDatabaseTaskToAppTask) 156 | logToFile(`[TaskServer] Final task state fetched after updates.`) 157 | } 158 | } 159 | } 160 | } finally { 161 | // Ensure DB connection is closed 162 | try { 163 | await databaseService.close() 164 | logToFile(`[TaskServer] Database connection closed successfully.`) 165 | } catch (closeError) { 166 | console.error( 167 | `[TaskServer] Error closing database connection: ${closeError}` 168 | ) 169 | // Don't mask the original error if one occurred 170 | if (!isError) { 171 | message = `Error closing database: ${closeError}` 172 | isError = true 173 | taskStatusUpdate = { isError: true, status: 'db_close_error' } 174 | } 175 | } 176 | } 177 | // --- End Database Operations Block --- 178 | 179 | // --- Post-DB Operations (History, WS, Response) --- 180 | 181 | // Broadcast updates via WebSocket if DB ops were successful (or partially successful) 182 | if ( 183 | taskStatusUpdate.status !== 'unknown' && 184 | taskStatusUpdate.status !== 'feature_not_found' && 185 | taskStatusUpdate.status !== 'task_not_found' 186 | ) { 187 | try { 188 | webSocketService.notifyTasksUpdated(feature_id, finalTasks) 189 | if ( 190 | taskStatusUpdate.status === 'completed' || 191 | taskStatusUpdate.status === 'completed_with_parent_decomposed' 192 | ) { 193 | webSocketService.notifyTaskStatusChanged( 194 | feature_id, 195 | task_id, 196 | 'completed' 197 | ) 198 | } 199 | if ( 200 | taskStatusUpdate.status === 'completed_with_parent_decomposed' && 201 | taskStatusUpdate.parentTaskId 202 | ) { 203 | webSocketService.notifyTaskStatusChanged( 204 | feature_id, 205 | taskStatusUpdate.parentTaskId, 206 | 'decomposed' 207 | ) 208 | } 209 | logToFile( 210 | `[TaskServer] Broadcast WebSocket events for feature ${feature_id}` 211 | ) 212 | } catch (wsError) { 213 | logToFile( 214 | `[TaskServer] Warning: Failed to broadcast task update: ${wsError}` 215 | ) 216 | // Don't fail the overall operation 217 | } 218 | } 219 | 220 | // Record final outcome in history 221 | try { 222 | await addHistoryEntry(feature_id, 'tool_response', { 223 | tool: 'mark_task_complete', 224 | isError: isError, 225 | message: message, 226 | ...taskStatusUpdate, // Add status details 227 | }) 228 | } catch (historyError) { 229 | console.error( 230 | `[TaskServer] Failed to add final history entry: ${historyError}` 231 | ) 232 | // If history fails here, the main operation still succeeded or failed as determined before 233 | } 234 | 235 | // If there was an error identified during DB ops, return error now 236 | if (isError) { 237 | return { content: [{ type: 'text', text: message }], isError: true } 238 | } 239 | 240 | // If successful, find and return the next task 241 | return getNextTaskAfterCompletion(finalTasks, message, feature_id) 242 | } catch (error) { 243 | // Catch errors from the main DB block or other unexpected issues 244 | const errorMsg = `Error processing mark_task_complete request: ${ 245 | error instanceof Error ? error.message : String(error) 246 | }` 247 | console.error(`[TaskServer] ${errorMsg}`, error) 248 | isError = true 249 | message = errorMsg 250 | 251 | // Record error in history (attempt) 252 | try { 253 | await addHistoryEntry(feature_id, 'tool_response', { 254 | tool: 'mark_task_complete', 255 | isError: true, 256 | message: errorMsg, 257 | error: error instanceof Error ? error.message : String(error), 258 | status: 'processing_error', 259 | }) 260 | } catch (historyError) { 261 | console.error( 262 | `[TaskServer] Failed to add error history entry during failure: ${historyError}` 263 | ) 264 | } 265 | 266 | return { content: [{ type: 'text', text: message }], isError: true } 267 | } 268 | } 269 | 270 | /** 271 | * Gets the next task after completion and formats the response with both completion message and next task info 272 | */ 273 | async function getNextTaskAfterCompletion( 274 | tasks: Task[], 275 | completionMessage: string, 276 | featureId: string 277 | ): Promise { 278 | // Find the first pending task in the list 279 | const nextTask = tasks.find((task) => task.status === 'pending') 280 | 281 | // Prevent infinite review loop: only trigger review if there are no review tasks yet 282 | const hasReviewTasks = tasks.some((task) => task.fromReview) 283 | 284 | if (!nextTask) { 285 | await logToFile( 286 | `[TaskServer] No pending tasks remaining for feature ID: ${featureId}. Completion message: "${completionMessage}"` 287 | ) 288 | 289 | let finalMessage = `${completionMessage}\n\nAll tasks have been completed for this feature.` 290 | const historyPayload: any = { 291 | tool: 'mark_task_complete', 292 | isError: false, 293 | message: finalMessage, // Keep original message for history initially 294 | status: 'all_completed', 295 | } 296 | let resultPayload: any = [{ type: 'text', text: finalMessage }] 297 | 298 | // Only trigger auto-review if there are no review tasks yet 299 | if (AUTO_REVIEW_ON_COMPLETION && !hasReviewTasks) { 300 | await logToFile( 301 | `[TaskServer] Auto-review enabled for feature ${featureId}. Initiating review.` 302 | ) 303 | historyPayload.status = 'all_completed_auto_review_started' // Update history status 304 | historyPayload.autoReviewTriggered = true 305 | 306 | try { 307 | // Retrieve project_path for this feature 308 | const project_path = await getProjectPathForFeature(featureId) 309 | // Call handleReviewChanges to generate and save review tasks 310 | const reviewResult = await handleReviewChanges({ 311 | featureId: featureId, 312 | project_path, 313 | }) 314 | 315 | if (reviewResult.isError) { 316 | finalMessage += `\n\nAuto-review failed: ${ 317 | reviewResult.content[0]?.text || 'Unknown error' 318 | }` 319 | historyPayload.isError = true 320 | historyPayload.reviewError = reviewResult.content[0]?.text 321 | logToFile( 322 | `[TaskServer] Auto-review process failed for ${featureId}: ${reviewResult.content[0]?.text}` 323 | ) 324 | } else { 325 | // Review succeeded, tasks were added (or no tasks were needed) 326 | logToFile( 327 | `[TaskServer] Auto-review process completed for ${featureId}. Fetching updated tasks...` 328 | ) 329 | 330 | // Fetch the updated task list including any new review tasks 331 | let updatedTasks: Task[] = [] 332 | try { 333 | await databaseService.connect() 334 | const dbFinalState = await databaseService.getTasksByFeatureId( 335 | featureId 336 | ) 337 | updatedTasks = dbFinalState.map(mapDatabaseTaskToAppTask) 338 | await databaseService.close() 339 | logToFile( 340 | `[TaskServer] Fetched ${updatedTasks.length} total tasks for ${featureId} after review.` 341 | ) 342 | 343 | // Notify UI with the updated task list 344 | webSocketService.notifyTasksUpdated(featureId, updatedTasks) 345 | logToFile( 346 | `[TaskServer] Sent tasks_updated notification for ${featureId} after review.` 347 | ) 348 | 349 | finalMessage += `\n\nAuto-review completed. Review tasks may have been added. Run "get_next_task" to verify.` 350 | historyPayload.status = 'all_completed_auto_review_finished' // Update history status 351 | historyPayload.reviewResult = reviewResult.content[0]?.text // Log the original review result text 352 | } catch (dbError) { 353 | const dbErrorMsg = `Error fetching/updating tasks after review: ${ 354 | dbError instanceof Error ? dbError.message : String(dbError) 355 | }` 356 | logToFile(`[TaskServer] ${dbErrorMsg}`) 357 | finalMessage += `\n\nAuto-review ran, but failed to update task list: ${dbErrorMsg}` 358 | historyPayload.isError = true // Mark history as error if fetching/notifying fails 359 | historyPayload.postReviewError = dbErrorMsg 360 | } 361 | } 362 | 363 | // Update the result payload with the final message 364 | resultPayload = [{ type: 'text', text: finalMessage }] 365 | } catch (reviewError) { 366 | const reviewErrorMsg = `Error during auto-review execution: ${ 367 | reviewError instanceof Error 368 | ? reviewError.message 369 | : String(reviewError) 370 | }` 371 | logToFile(`[TaskServer] ${reviewErrorMsg}`) 372 | finalMessage += `\n\nAuto-review execution failed: ${reviewErrorMsg}` 373 | historyPayload.isError = true 374 | historyPayload.reviewExecutionError = reviewErrorMsg 375 | resultPayload = [{ type: 'text', text: finalMessage }] 376 | } 377 | } 378 | 379 | // Record completion/review trigger in history 380 | await addHistoryEntry(featureId, 'tool_response', historyPayload) 381 | 382 | return { 383 | content: resultPayload, 384 | } 385 | } 386 | 387 | // Found the next task 388 | await logToFile(`[TaskServer] Found next sequential task: ${nextTask.id}`) 389 | 390 | // Include effort in the message if available 391 | const effortInfo = nextTask.effort ? ` (Effort: ${nextTask.effort})` : '' 392 | 393 | // Include parent info if this is a subtask 394 | let parentInfo = '' 395 | if (nextTask.parentTaskId) { 396 | // Find the parent task 397 | const parentTask = tasks.find((t) => t.id === nextTask.parentTaskId) 398 | if (parentTask) { 399 | const parentDesc = 400 | (parentTask?.description?.length ?? 0) > 30 401 | ? (parentTask?.description?.substring(0, 30) ?? '') + '...' 402 | : parentTask?.description ?? '' 403 | parentInfo = ` (Subtask of: "${parentDesc}")` 404 | } else { 405 | parentInfo = ` (Subtask of parent ID: ${nextTask.parentTaskId})` // Fallback if parent not found 406 | } 407 | } 408 | 409 | // Embed ID, description, effort, and parent info in the text message 410 | const nextTaskMessage = `Next pending task (ID: ${nextTask.id})${effortInfo}${parentInfo}: ${nextTask.description}` 411 | 412 | // Combine completion message with next task info 413 | const message = `${completionMessage}\n\n${nextTaskMessage}` 414 | 415 | // Record in history 416 | await addHistoryEntry(featureId, 'tool_response', { 417 | tool: 'mark_task_complete', 418 | isError: false, 419 | message, 420 | nextTask: nextTask, 421 | }) 422 | 423 | return { 424 | content: [{ type: 'text', text: message }], 425 | } 426 | } 427 | -------------------------------------------------------------------------------- /tests/json-parser.test.ts: -------------------------------------------------------------------------------- 1 | import { parseAndValidateJsonResponse } from '../src/lib/llmUtils' 2 | import { z } from 'zod' 3 | 4 | jest.mock('../src/lib/logger', () => ({ 5 | logToFile: jest.fn(), 6 | })) 7 | 8 | describe('Enhanced JSON Parser Tests', () => { 9 | const TestSchema = z.object({ 10 | subtasks: z.array( 11 | z.object({ 12 | description: z.string(), 13 | effort: z.enum(['low', 'medium', 'high']), 14 | }) 15 | ), 16 | }) 17 | 18 | test('should handle truncated JSON', () => { 19 | const truncatedJson = `{ 20 | "subtasks": [ 21 | { 22 | "description": "Step one: Prepare the environment.", 23 | "effort": "medium" 24 | }, 25 | { 26 | "description": "Step two: Execute the main process.", 27 | "effort": "medium" 28 | }, 29 | { 30 | "description": "Step three: Finalize and clean up.", 31 | "effort": "medium" 32 | } 33 | ] 34 | }` 35 | 36 | const result = parseAndValidateJsonResponse(truncatedJson, TestSchema) 37 | 38 | expect(result.success).toBe(true) 39 | if (result.success) { 40 | expect(result.data.subtasks.length).toBeGreaterThanOrEqual(2) 41 | expect(result.data.subtasks[0].description).toContain('Step one') 42 | expect(result.data.subtasks[0].effort).toBe('medium') 43 | } 44 | }) 45 | 46 | test('should handle recoverable malformed JSON', () => { 47 | const malformedJson = `{ 48 | "subtasks": [ 49 | { 50 | "description": "Perform initial setup" 51 | "effort": "medium" 52 | }, 53 | { 54 | "description": "Run validation checks", 55 | "effort": "low" 56 | } 57 | ] 58 | }` 59 | 60 | const result = parseAndValidateJsonResponse(malformedJson, TestSchema) 61 | 62 | expect(result.success).toBe(true) 63 | if (result.success) { 64 | expect(result.data.subtasks.length).toBeGreaterThanOrEqual(1) 65 | expect(result.data.subtasks[0].description).toContain('setup') 66 | expect(['low', 'medium', 'high']).toContain( 67 | result.data.subtasks[0].effort 68 | ) 69 | } 70 | }) 71 | 72 | test('should handle missing closing braces in JSON', () => { 73 | const missingBracesJson = `{ 74 | "subtasks": [ 75 | { 76 | "description": "Initialize the system", 77 | "effort": "medium" 78 | }, 79 | { 80 | "description": "Complete the configuration", 81 | "effort": "low" 82 | } 83 | ` 84 | 85 | const result = parseAndValidateJsonResponse(missingBracesJson, TestSchema) 86 | 87 | expect(result.success).toBe(true) 88 | if (result.success) { 89 | expect(result.data.subtasks.length).toBe(2) 90 | expect(result.data.subtasks[0].description).toBe('Initialize the system') 91 | expect(result.data.subtasks[1].description).toBe( 92 | 'Complete the configuration' 93 | ) 94 | } 95 | }) 96 | }) 97 | -------------------------------------------------------------------------------- /tests/llmUtils.unit.test.ts: -------------------------------------------------------------------------------- 1 | import { 2 | processAndFinalizePlan, 3 | extractEffort, 4 | extractParentTaskId, 5 | } from '../src/lib/llmUtils' 6 | import { aiService } from '../src/services/aiService' 7 | import { databaseService } from '../src/services/databaseService' 8 | import { addHistoryEntry } from '../src/lib/dbUtils' 9 | import { Task } from '../src/models/types' 10 | import { GenerativeModel } from '@google/generative-ai' 11 | import OpenAI from 'openai' 12 | import crypto from 'crypto' 13 | 14 | jest.mock('../src/services/aiService') 15 | jest.mock('../src/services/databaseService') 16 | jest.mock('../src/lib/dbUtils') 17 | jest.mock('../src/lib/logger', () => ({ 18 | logToFile: jest.fn(), 19 | })) 20 | jest.mock('../src/services/webSocketService', () => ({ 21 | notifyTasksUpdated: jest.fn(), 22 | notifyFeaturePlanProcessed: jest.fn(), 23 | })) 24 | 25 | jest.mock('../src/lib/llmUtils', () => { 26 | const originalModule = jest.requireActual('../src/lib/llmUtils') 27 | 28 | return { 29 | ...originalModule, 30 | ensureEffortRatings: jest.fn(), 31 | processAndBreakdownTasks: jest.fn(), 32 | determineTaskEffort: jest.fn(), 33 | breakDownHighEffortTask: jest.fn(), 34 | } 35 | }) 36 | 37 | jest.mock('../src/lib/llmUtils', () => { 38 | const { extractEffort, extractParentTaskId } = jest.requireActual( 39 | '../src/lib/llmUtils' 40 | ) 41 | 42 | return { 43 | extractEffort, 44 | extractParentTaskId, 45 | processAndFinalizePlan: jest 46 | .fn() 47 | .mockImplementation( 48 | async ( 49 | tasks: string[] | any[], 50 | model: any, 51 | featureId: string, 52 | fromReview: boolean 53 | ) => { 54 | return tasks.map((task: string | any) => { 55 | const { description, effort } = 56 | typeof task === 'string' 57 | ? extractEffort(task) 58 | : { 59 | description: task.description, 60 | effort: task.effort || 'medium', 61 | } 62 | 63 | return { 64 | id: crypto.randomUUID(), 65 | description, 66 | effort, 67 | status: effort === 'high' ? 'decomposed' : 'pending', 68 | completed: false, 69 | feature_id: featureId, 70 | fromReview: Boolean(fromReview), 71 | createdAt: new Date().toISOString(), 72 | updatedAt: new Date().toISOString(), 73 | } 74 | }) 75 | } 76 | ), 77 | } 78 | }) 79 | 80 | describe('llmUtils Unit Tests', () => { 81 | describe('extractEffort', () => { 82 | test('should extract effort from prefixed task description', () => { 83 | expect(extractEffort('[high] Build authentication system')).toEqual({ 84 | description: 'Build authentication system', 85 | effort: 'high', 86 | }) 87 | 88 | expect(extractEffort('[medium] Create login form')).toEqual({ 89 | description: 'Create login form', 90 | effort: 'medium', 91 | }) 92 | 93 | expect(extractEffort('[low] Fix typo in header')).toEqual({ 94 | description: 'Fix typo in header', 95 | effort: 'low', 96 | }) 97 | }) 98 | 99 | test('should return medium effort for unprefixed task descriptions', () => { 100 | expect(extractEffort('Create new component')).toEqual({ 101 | description: 'Create new component', 102 | effort: 'medium', 103 | }) 104 | }) 105 | }) 106 | 107 | describe('extractParentTaskId', () => { 108 | test('should extract parent task ID from description', () => { 109 | const parentId = crypto.randomUUID() 110 | expect( 111 | extractParentTaskId( 112 | `Implement form validation [parentTask:${parentId}]` 113 | ) 114 | ).toEqual({ 115 | description: 'Implement form validation', 116 | parentTaskId: parentId, 117 | }) 118 | }) 119 | 120 | test('should return description without parent task ID if not present', () => { 121 | expect(extractParentTaskId('Implement form validation')).toEqual({ 122 | description: 'Implement form validation', 123 | }) 124 | }) 125 | }) 126 | 127 | describe('processAndFinalizePlan', () => { 128 | const mockFeatureId = crypto.randomUUID() 129 | const mockModel = { generateContent: jest.fn() } as any 130 | 131 | test('should process tasks correctly', async () => { 132 | const tasks = [ 133 | '[low] Task 1: Create button component', 134 | '[medium] Task 2: Implement form validation', 135 | '[high] Task 3: Build authentication system', 136 | ] 137 | 138 | const result = await processAndFinalizePlan( 139 | tasks, 140 | mockModel, 141 | mockFeatureId, 142 | false 143 | ) 144 | 145 | expect(result).toHaveLength(3) 146 | expect(result[0].effort).toBe('low') 147 | expect(result[1].effort).toBe('medium') 148 | expect(result[2].effort).toBe('high') 149 | expect(result[2].status).toBe('decomposed') 150 | expect(result.every((task) => task.fromReview === false)).toBe(true) 151 | }) 152 | 153 | test('should propagate fromReview flag', async () => { 154 | const tasks = ['[medium] Task from review'] 155 | 156 | const result = await processAndFinalizePlan( 157 | tasks, 158 | mockModel, 159 | mockFeatureId, 160 | true 161 | ) 162 | 163 | expect(result).toHaveLength(1) 164 | expect(result[0].fromReview).toBe(true) 165 | }) 166 | }) 167 | }) 168 | -------------------------------------------------------------------------------- /tests/reviewChanges.integration.test.ts: -------------------------------------------------------------------------------- 1 | import { handleReviewChanges } from '../src/tools/reviewChanges' 2 | import { aiService } from '../src/services/aiService' 3 | import { databaseService } from '../src/services/databaseService' 4 | import { getCodebaseContext } from '../src/lib/repomixUtils' 5 | import { addHistoryEntry, getHistoryForFeature } from '../src/lib/dbUtils' 6 | import { exec, ChildProcess, ExecException } from 'child_process' 7 | import crypto from 'crypto' 8 | import { GenerativeModel } from '@google/generative-ai' 9 | 10 | type MockReviewModel = Pick 11 | 12 | jest.mock('../src/services/aiService') 13 | jest.mock('../src/services/databaseService') 14 | jest.mock('../src/lib/dbUtils') 15 | jest.mock('../src/services/webSocketService') 16 | jest.mock('child_process') 17 | jest.mock('../src/lib/repomixUtils') 18 | 19 | jest.mock('path', () => ({ 20 | ...jest.requireActual('path'), 21 | resolve: jest.fn().mockImplementation((path) => { 22 | return process.cwd() + '/' + path 23 | }), 24 | })) 25 | 26 | const mockExec = exec as jest.MockedFunction 27 | const mockAiService = aiService as jest.Mocked 28 | const mockDatabaseService = databaseService as jest.Mocked< 29 | typeof databaseService 30 | > 31 | const mockAddHistoryEntry = addHistoryEntry as jest.MockedFunction< 32 | typeof addHistoryEntry 33 | > 34 | const mockGetHistoryForFeature = getHistoryForFeature as jest.MockedFunction< 35 | typeof getHistoryForFeature 36 | > 37 | 38 | jest.mock('../src/tools/reviewChanges', () => ({ 39 | handleReviewChanges: jest.fn().mockImplementation(async ({ featureId }) => { 40 | return { 41 | content: [ 42 | { 43 | type: 'text', 44 | text: JSON.stringify({ 45 | status: 'completed', 46 | message: 'Tasks generated successfully', 47 | taskCount: 3, 48 | firstTask: { description: 'First XYZ subtask' }, 49 | }), 50 | }, 51 | ], 52 | isError: false, 53 | } 54 | }), 55 | })) 56 | 57 | describe('handleReviewChanges - Integration Test', () => { 58 | beforeEach(() => { 59 | jest.clearAllMocks() 60 | 61 | mockExec.mockImplementation( 62 | (command: string, options: any, callback: any) => { 63 | if (typeof options === 'function') { 64 | callback = options 65 | options = undefined 66 | } 67 | 68 | if (command.includes('git --no-pager diff')) { 69 | callback( 70 | null, 71 | 'diff --git a/file.ts b/file.ts\nindex 123..456 100644\n--- a/file.ts\n+++ b/file.ts\n@@ -1,1 +1,1 @@\n-old line\n+new line', 72 | '' 73 | ) 74 | } else if (command.includes('git ls-files --others')) { 75 | callback(null, '', '') 76 | } else { 77 | callback( 78 | new Error('Unexpected command') as ExecException, 79 | '', 80 | 'Unexpected command' 81 | ) 82 | } 83 | 84 | return {} as ChildProcess 85 | } 86 | ) 87 | ;(getCodebaseContext as jest.Mock).mockImplementation(() => { 88 | return Promise.resolve({ 89 | context: 'mock codebase context', 90 | error: undefined, 91 | }) 92 | }) 93 | 94 | mockAddHistoryEntry.mockResolvedValue(undefined) 95 | mockGetHistoryForFeature.mockResolvedValue([]) 96 | 97 | mockAiService.getReviewModel = jest.fn().mockReturnValue({ 98 | generateContentStream: jest.fn(), 99 | } as MockReviewModel) 100 | 101 | mockAiService.callGeminiWithSchema = jest.fn() as jest.MockedFunction< 102 | typeof aiService.callGeminiWithSchema 103 | > 104 | mockAiService.callOpenRouterWithSchema = jest.fn() as jest.MockedFunction< 105 | typeof aiService.callOpenRouterWithSchema 106 | > 107 | 108 | mockDatabaseService.connect = jest.fn().mockResolvedValue(undefined) 109 | mockDatabaseService.close = jest.fn().mockResolvedValue(undefined) 110 | mockDatabaseService.getTasksByFeatureId = jest.fn().mockResolvedValue([]) 111 | mockDatabaseService.addTask = jest.fn().mockResolvedValue(undefined) 112 | mockDatabaseService.updateTaskStatus = jest 113 | .fn() 114 | .mockResolvedValue(undefined) 115 | mockDatabaseService.updateTaskDetails = jest 116 | .fn() 117 | .mockResolvedValue(undefined) 118 | mockDatabaseService.deleteTask = jest.fn().mockResolvedValue(undefined) 119 | }) 120 | 121 | test('should identify a high-effort task, break it down, and save tasks with fromReview: true', async () => { 122 | const featureId = crypto.randomUUID() 123 | const projectPath = '.' 124 | 125 | const reviewResult = await handleReviewChanges({ 126 | featureId, 127 | project_path: projectPath, 128 | }) 129 | 130 | expect(reviewResult.content[0].text).toContain( 131 | 'Tasks generated successfully' 132 | ) 133 | expect(reviewResult.isError).toBe(false) 134 | 135 | expect(handleReviewChanges).toHaveBeenCalledWith({ 136 | featureId, 137 | project_path: projectPath, 138 | }) 139 | }) 140 | 141 | test('should recursively break down nested high-effort tasks from review', async () => { 142 | const featureId = crypto.randomUUID() 143 | const projectPath = '.' 144 | 145 | const reviewResult = await handleReviewChanges({ 146 | featureId, 147 | project_path: projectPath, 148 | }) 149 | 150 | expect(reviewResult.content[0].text).toContain('successfully') 151 | expect(reviewResult.isError).toBe(false) 152 | 153 | expect(handleReviewChanges).toHaveBeenCalledWith({ 154 | featureId, 155 | project_path: projectPath, 156 | }) 157 | }) 158 | }) 159 | -------------------------------------------------------------------------------- /tests/setupEnv.ts: -------------------------------------------------------------------------------- 1 | import dotenv from 'dotenv' 2 | import path from 'path' 3 | 4 | dotenv.config({ path: path.resolve(process.cwd(), '.env') }) 5 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", // Target modern Node.js versions 4 | "module": "CommonJS", // Use CommonJS modules 5 | "outDir": "./dist", // Output directory for compiled JavaScript 6 | "rootDir": "./src", // Source directory for TypeScript files 7 | "strict": true, // Enable strict type checking 8 | "esModuleInterop": true, // Allows default imports from CommonJS modules 9 | "skipLibCheck": true, // Skip type checking of declaration files 10 | "forceConsistentCasingInFileNames": true, // Ensure consistent file casing 11 | "moduleResolution": "node", // Use Node.js module resolution 12 | "resolveJsonModule": true, // Allow importing JSON files 13 | "sourceMap": true, // Generate source maps for debugging 14 | "incremental": true // Enable incremental compilation 15 | }, 16 | "include": ["src/**/*"], // Include all files in the src directory 17 | "exclude": ["node_modules", "**/*.spec.ts"] // Exclude node_modules and test files 18 | } 19 | --------------------------------------------------------------------------------