├── .vscode └── extensions.json ├── output.json ├── .changeset ├── red-suns-wash.md ├── beige-doodles-type.md ├── red-oranges-attend.md ├── pre.json └── config.json ├── .prettierignore ├── .rooignore ├── .clineignore ├── .cursorignore ├── .prettierrc ├── tests ├── fixture │ └── test-tasks.json ├── fixtures │ ├── .taskmasterconfig │ ├── sample-claude-response.js │ └── sample-tasks.js ├── setup.js ├── README.md ├── e2e │ └── test_llm_analysis.sh ├── unit │ ├── task-finder.test.js │ └── parse-prd.test.js └── integration │ ├── roo-files-inclusion.test.js │ └── roo-init-functionality.test.js ├── scripts ├── modules │ ├── index.js │ ├── task-manager │ │ ├── task-exists.js │ │ └── is-task-dependent.js │ └── task-manager.js ├── dev.js └── example_prd.txt ├── assets ├── gitignore ├── env.example ├── .taskmasterconfig └── example_prd.txt ├── tasks ├── task_068.txt ├── task_074.txt ├── task_003.txt ├── task_002.txt ├── task_001.txt ├── task_070.txt ├── task_071.txt ├── task_065.txt ├── task_056.txt ├── task_030.txt ├── task_054.txt ├── task_048.txt ├── task_029.txt ├── task_043.txt ├── task_073.txt ├── task_045.txt ├── task_033.txt ├── task_037.txt ├── task_036.txt ├── task_013.txt ├── task_058.txt ├── task_038.txt ├── task_055.txt ├── task_046.txt ├── task_035.txt ├── task_072.txt └── task_044.txt ├── docs ├── licensing.md ├── README.md ├── examples.md └── contributor-docs │ └── testing-roo-integration.md ├── .cursor ├── mcp.json └── rules │ ├── cursor_rules.mdc │ ├── glossary.mdc │ └── self_improve.mdc ├── .npmignore ├── .env.example ├── mcp-server ├── server.js └── src │ ├── core │ ├── direct-functions │ │ ├── cache-stats.js │ │ ├── validate-dependencies.js │ │ └── fix-dependencies.js │ └── utils │ │ └── env-utils.js │ ├── tools │ ├── get-operation-status.js │ ├── initialize-project.js │ ├── fix-dependencies.js │ ├── next-task.js │ ├── complexity-report.js │ ├── validate-dependencies.js │ ├── remove-dependency.js │ ├── remove-task.js │ ├── generate.js │ ├── models.js │ ├── get-tasks.js │ ├── clear-subtasks.js │ ├── add-dependency.js │ ├── set-task-status.js │ ├── remove-subtask.js │ ├── expand-task.js │ ├── update-subtask.js │ ├── update-task.js │ └── update.js │ └── index.js ├── .github ├── ISSUE_TEMPLATE │ ├── feedback.md │ ├── bug_report.md │ └── enhancements---feature-requests.md └── workflows │ ├── release.yml │ ├── pre-release.yml │ └── ci.yml ├── .taskmasterconfig ├── .roo ├── mcp.json └── rules │ ├── changeset.md │ ├── architecture.md │ ├── package-manager.md │ ├── glossary.md │ ├── commands.md │ └── self_improve.md ├── .gitignore ├── .clinerules ├── changeset.md ├── architecture.md ├── package-manager.md ├── glossary.md ├── commands.md └── self_improve.md ├── test-version-check.js ├── jest.config.js ├── test-config-manager.js ├── LICENSE ├── mcp-test.js └── test-version-check-full.js /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": ["esbenp.prettier-vscode"] 3 | } 4 | -------------------------------------------------------------------------------- /output.json: -------------------------------------------------------------------------------- 1 | { 2 | "key": "value", 3 | "nested": { 4 | "prop": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /.changeset/red-suns-wash.md: -------------------------------------------------------------------------------- 1 | --- 2 | 'task-master-ai': patch 3 | --- 4 | 5 | Add src directory to exports 6 | -------------------------------------------------------------------------------- /.changeset/beige-doodles-type.md: -------------------------------------------------------------------------------- 1 | --- 2 | 'task-master-ai': patch 3 | --- 4 | 5 | Resolve all issues related to MCP 6 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Ignore artifacts: 2 | build 3 | coverage 4 | .changeset 5 | tasks 6 | package-lock.json 7 | tests/fixture/*.json 8 | -------------------------------------------------------------------------------- /.changeset/red-oranges-attend.md: -------------------------------------------------------------------------------- 1 | --- 2 | 'task-master-ai': patch 3 | --- 4 | 5 | Fix ERR_MODULE_NOT_FOUND when trying to run MCP Server 6 | -------------------------------------------------------------------------------- /.rooignore: -------------------------------------------------------------------------------- 1 | package-lock.json 2 | 3 | # Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv) 4 | 5 | node_modules/ -------------------------------------------------------------------------------- /.clineignore: -------------------------------------------------------------------------------- 1 | package-lock.json 2 | 3 | # Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv) 4 | 5 | node_modules/ -------------------------------------------------------------------------------- /.cursorignore: -------------------------------------------------------------------------------- 1 | package-lock.json 2 | 3 | # Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv) 4 | 5 | node_modules/ 6 | 7 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 80, 3 | "tabWidth": 2, 4 | "useTabs": true, 5 | "semi": true, 6 | "singleQuote": true, 7 | "trailingComma": "none", 8 | "bracketSpacing": true, 9 | "arrowParens": "always", 10 | "endOfLine": "lf" 11 | } 12 | -------------------------------------------------------------------------------- /tests/fixture/test-tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "tasks": [ 3 | { 4 | "id": 1, 5 | "dependencies": [], 6 | "subtasks": [ 7 | { 8 | "id": 1, 9 | "dependencies": [] 10 | } 11 | ] 12 | } 13 | ] 14 | } -------------------------------------------------------------------------------- /.changeset/pre.json: -------------------------------------------------------------------------------- 1 | { 2 | "mode": "pre", 3 | "tag": "rc", 4 | "initialVersions": { 5 | "task-master-ai": "0.13.1" 6 | }, 7 | "changesets": [ 8 | "beige-doodles-type", 9 | "red-oranges-attend", 10 | "red-suns-wash" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /scripts/modules/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * index.js 3 | * Main export point for all Task Master CLI modules 4 | */ 5 | 6 | // Export all modules 7 | export * from './utils.js'; 8 | export * from './ui.js'; 9 | export * from './task-manager.js'; 10 | export * from './commands.js'; 11 | -------------------------------------------------------------------------------- /tests/fixtures/.taskmasterconfig: -------------------------------------------------------------------------------- 1 | { 2 | "models": { 3 | "main": { 4 | "provider": "openai", 5 | "modelId": "gpt-4o" 6 | }, 7 | "research": { 8 | "provider": "perplexity", 9 | "modelId": "sonar-pro" 10 | }, 11 | "fallback": { 12 | "provider": "anthropic", 13 | "modelId": "claude-3-haiku-20240307" 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /.changeset/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://unpkg.com/@changesets/config@3.1.1/schema.json", 3 | "changelog": [ 4 | "@changesets/changelog-github", 5 | { "repo": "LousyBook94/pollinations-task-master" } 6 | ], 7 | "commit": false, 8 | "fixed": [], 9 | "linked": [], 10 | "access": "public", 11 | "baseBranch": "main", 12 | "updateInternalDependencies": "patch", 13 | "ignore": [] 14 | } 15 | -------------------------------------------------------------------------------- /assets/gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | dev-debug.log 8 | 9 | # Dependency directories 10 | node_modules/ 11 | 12 | # Environment variables 13 | .env 14 | 15 | # Editor directories and files 16 | .idea 17 | .vscode 18 | *.suo 19 | *.ntvs* 20 | *.njsproj 21 | *.sln 22 | *.sw? 23 | 24 | # OS specific 25 | .DS_Store 26 | 27 | # Task files 28 | tasks.json 29 | tasks/ -------------------------------------------------------------------------------- /tasks/task_068.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 68 2 | # Title: Ability to create tasks without parsing PRD 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Which just means that when we create a task, if there's no tasks.json, we should create it calling the same function that is done by parse-prd. this lets taskmaster be used without a prd as a starding point. 7 | # Details: 8 | 9 | 10 | # Test Strategy: 11 | 12 | -------------------------------------------------------------------------------- /tasks/task_074.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 74 2 | # Title: PR Review: better-model-management 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: will add subtasks 7 | # Details: 8 | 9 | 10 | # Test Strategy: 11 | 12 | 13 | # Subtasks: 14 | ## 1. pull out logWrapper into utils [done] 15 | ### Dependencies: None 16 | ### Description: its being used a lot across direct functions and repeated right now 17 | ### Details: 18 | 19 | 20 | -------------------------------------------------------------------------------- /docs/licensing.md: -------------------------------------------------------------------------------- 1 | # Licensing 2 | 3 | Task Master is licensed under the MIT License with Commons Clause. This means you can: 4 | 5 | ## ✅ Allowed: 6 | 7 | - Use Task Master for any purpose (personal, commercial, academic) 8 | - Modify the code 9 | - Distribute copies 10 | - Create and sell products built using Task Master 11 | 12 | ## ❌ Not Allowed: 13 | 14 | - Sell Task Master itself 15 | - Offer Task Master as a hosted service 16 | - Create competing products based on Task Master 17 | 18 | See the [LICENSE](../LICENSE) file for the complete license text. 19 | -------------------------------------------------------------------------------- /.cursor/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "task-master-ai": { 4 | "command": "node", 5 | "args": ["./mcp-server/server.js"], 6 | "env": { 7 | "ANTHROPIC_API_KEY": "ANTHROPIC_API_KEY_HERE", 8 | "PERPLEXITY_API_KEY": "PERPLEXITY_API_KEY_HERE", 9 | "OPENAI_API_KEY": "OPENAI_API_KEY_HERE", 10 | "GOOGLE_API_KEY": "GOOGLE_API_KEY_HERE", 11 | "XAI_API_KEY": "XAI_API_KEY_HERE", 12 | "OPENROUTER_API_KEY": "OPENROUTER_API_KEY_HERE", 13 | "MISTRAL_API_KEY": "MISTRAL_API_KEY_HERE", 14 | "AZURE_OPENAI_API_KEY": "AZURE_OPENAI_API_KEY_HERE", 15 | "OLLAMA_API_KEY": "OLLAMA_API_KEY_HERE" 16 | } 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | # Development files 2 | .git 3 | .github 4 | .vscode 5 | .idea 6 | .DS_Store 7 | 8 | # Logs 9 | logs 10 | *.log 11 | npm-debug.log* 12 | dev-debug.log 13 | init-debug.log 14 | 15 | # Source files not needed in the package 16 | src 17 | test 18 | tests 19 | docs 20 | examples 21 | .editorconfig 22 | .eslintrc 23 | .prettierrc 24 | .travis.yml 25 | .gitlab-ci.yml 26 | tsconfig.json 27 | jest.config.js 28 | 29 | # Original project files 30 | tasks.json 31 | tasks/ 32 | prd.txt 33 | scripts/prd.txt 34 | .env 35 | 36 | # Temporary files 37 | .tmp 38 | .temp 39 | *.swp 40 | *.swo 41 | 42 | # Node modules 43 | node_modules/ 44 | 45 | # Debug files 46 | *.debug -------------------------------------------------------------------------------- /scripts/dev.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * dev.js 5 | * Task Master CLI - AI-driven development task management 6 | * 7 | * This is the refactored entry point that uses the modular architecture. 8 | * It imports functionality from the modules directory and provides a CLI. 9 | */ 10 | 11 | import dotenv from 'dotenv'; 12 | dotenv.config(); 13 | 14 | // Add at the very beginning of the file 15 | if (process.env.DEBUG === '1') { 16 | console.error('DEBUG - dev.js received args:', process.argv.slice(2)); 17 | } 18 | 19 | import { runCLI } from './modules/commands.js'; 20 | 21 | // Run the CLI with the process arguments 22 | runCLI(process.argv); 23 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # API Keys (Required for using in any role i.e. main/research/fallback -- see `task-master models`) 2 | ANTHROPIC_API_KEY=YOUR_ANTHROPIC_KEY_HERE 3 | PERPLEXITY_API_KEY=YOUR_PERPLEXITY_KEY_HERE 4 | OPENAI_API_KEY=YOUR_OPENAI_KEY_HERE 5 | GOOGLE_API_KEY=YOUR_GOOGLE_KEY_HERE 6 | MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE 7 | OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE 8 | XAI_API_KEY=YOUR_XAI_KEY_HERE 9 | AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE 10 | 11 | # Custom Provider (for src/ai-providers/custom.js) 12 | # Used for 'custom' provider in .taskmasterconfig. Set your OpenAI-compatible endpoint and secret. 13 | CUSTOM_BASE=https://your-custom-endpoint.com/openai 14 | CUSTOM_API_KEY=YOUR_CUSTOM_API_KEY_HERE 15 | -------------------------------------------------------------------------------- /assets/env.example: -------------------------------------------------------------------------------- 1 | # API Keys (Required to enable respective provider) 2 | ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-... 3 | PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-... 4 | OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-... 5 | GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models. 6 | MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models. 7 | XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models. 8 | AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). -------------------------------------------------------------------------------- /tasks/task_003.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 3 2 | # Title: Implement Basic Task Operations 3 | # Status: done 4 | # Dependencies: 1 5 | # Priority: high 6 | # Description: Create core functionality for managing tasks including listing, creating, updating, and deleting tasks. 7 | # Details: 8 | Implement the following task operations: 9 | - List tasks with filtering options 10 | - Create new tasks with required fields 11 | - Update existing task properties 12 | - Delete tasks 13 | - Change task status (pending/done/deferred) 14 | - Handle dependencies between tasks 15 | - Manage task priorities 16 | 17 | # Test Strategy: 18 | Test each operation with valid and invalid inputs. Verify that dependencies are properly tracked and that status changes are reflected correctly in the tasks.json file. 19 | -------------------------------------------------------------------------------- /tasks/task_002.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 2 2 | # Title: Develop Command Line Interface Foundation 3 | # Status: done 4 | # Dependencies: 1 5 | # Priority: high 6 | # Description: Create the basic CLI structure using Commander.js with command parsing and help documentation. 7 | # Details: 8 | Implement the CLI foundation including: 9 | - Set up Commander.js for command parsing 10 | - Create help documentation for all commands 11 | - Implement colorized console output for better readability 12 | - Add logging system with configurable levels 13 | - Handle global options (--help, --version, --file, --quiet, --debug, --json) 14 | 15 | # Test Strategy: 16 | Test each command with various parameters to ensure proper parsing. Verify help documentation is comprehensive and accurate. Test logging at different verbosity levels. 17 | -------------------------------------------------------------------------------- /assets/.taskmasterconfig: -------------------------------------------------------------------------------- 1 | { 2 | "models": { 3 | "main": { 4 | "provider": "anthropic", 5 | "modelId": "claude-3-7-sonnet-20250219", 6 | "maxTokens": 120000, 7 | "temperature": 0.2 8 | }, 9 | "research": { 10 | "provider": "perplexity", 11 | "modelId": "sonar-pro", 12 | "maxTokens": 8700, 13 | "temperature": 0.1 14 | }, 15 | "fallback": { 16 | "provider": "anthropic", 17 | "modelId": "claude-3.5-sonnet-20240620", 18 | "maxTokens": 120000, 19 | "temperature": 0.1 20 | } 21 | }, 22 | "global": { 23 | "logLevel": "info", 24 | "debug": false, 25 | "defaultSubtasks": 5, 26 | "defaultPriority": "medium", 27 | "projectName": "Taskmaster", 28 | "ollamaBaseUrl": "http://localhost:11434/api", 29 | "azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /mcp-server/server.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import TaskMasterMCPServer from './src/index.js'; 4 | import dotenv from 'dotenv'; 5 | import logger from './src/logger.js'; 6 | 7 | // Load environment variables 8 | dotenv.config(); 9 | 10 | /** 11 | * Start the MCP server 12 | */ 13 | async function startServer() { 14 | const server = new TaskMasterMCPServer(); 15 | 16 | // Handle graceful shutdown 17 | process.on('SIGINT', async () => { 18 | await server.stop(); 19 | process.exit(0); 20 | }); 21 | 22 | process.on('SIGTERM', async () => { 23 | await server.stop(); 24 | process.exit(0); 25 | }); 26 | 27 | try { 28 | await server.start(); 29 | } catch (error) { 30 | logger.error(`Failed to start MCP server: ${error.message}`); 31 | process.exit(1); 32 | } 33 | } 34 | 35 | // Start the server 36 | startServer(); 37 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feedback.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feedback 3 | about: Give us specific feedback on the product/approach/tech 4 | title: 'feedback: ' 5 | labels: feedback 6 | assignees: '' 7 | --- 8 | 9 | ### Feedback Summary 10 | 11 | Provide a clear summary or direct quote from user feedback. 12 | 13 | ### User Context 14 | 15 | Explain the user's context or scenario in which this feedback was provided. 16 | 17 | ### User Impact 18 | 19 | Describe how this feedback affects the user experience or workflow. 20 | 21 | ### Suggestions 22 | 23 | Provide any initial thoughts, potential solutions, or improvements based on the feedback. 24 | 25 | ### Relevant Screenshots or Examples 26 | 27 | Attach screenshots, logs, or examples that illustrate the feedback. 28 | 29 | ### Additional Notes 30 | 31 | Any additional context or related information. 32 | -------------------------------------------------------------------------------- /.taskmasterconfig: -------------------------------------------------------------------------------- 1 | { 2 | "models": { 3 | "main": { 4 | "provider": "pollinations", 5 | "modelId": "openai-large", 6 | "maxTokens": 4096, 7 | "temperature": 0.2 8 | }, 9 | "research": { 10 | "provider": "pollinations", 11 | "modelId": "searchgpt", 12 | "maxTokens": 4096, 13 | "temperature": 0.1 14 | }, 15 | "fallback": { 16 | "provider": "pollinations", 17 | "modelId": "openai-large", 18 | "maxTokens": 4096, 19 | "temperature": 0.2 20 | } 21 | }, 22 | "global": { 23 | "logLevel": "info", 24 | "debug": false, 25 | "defaultSubtasks": 5, 26 | "defaultPriority": "medium", 27 | "projectName": "Taskmaster", 28 | "ollamaBaseUrl": "http://localhost:11434/api", 29 | "azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /tasks/task_001.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 1 2 | # Title: Implement Task Data Structure 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: high 6 | # Description: Design and implement the core tasks.json structure that will serve as the single source of truth for the system. 7 | # Details: 8 | Create the foundational data structure including: 9 | - JSON schema for tasks.json 10 | - Task model with all required fields (id, title, description, status, dependencies, priority, details, testStrategy, subtasks) 11 | - Validation functions for the task model 12 | - Basic file system operations for reading/writing tasks.json 13 | - Error handling for file operations 14 | 15 | # Test Strategy: 16 | Verify that the tasks.json structure can be created, read, and validated. Test with sample data to ensure all fields are properly handled and that validation correctly identifies invalid structures. 17 | -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/cache-stats.js: -------------------------------------------------------------------------------- 1 | /** 2 | * cache-stats.js 3 | * Direct function implementation for retrieving cache statistics 4 | */ 5 | 6 | import { contextManager } from '../context-manager.js'; 7 | 8 | /** 9 | * Get cache statistics for monitoring 10 | * @param {Object} args - Command arguments 11 | * @param {Object} log - Logger object 12 | * @returns {Object} - Cache statistics 13 | */ 14 | export async function getCacheStatsDirect(args, log) { 15 | try { 16 | log.info('Retrieving cache statistics'); 17 | const stats = contextManager.getStats(); 18 | return { 19 | success: true, 20 | data: stats 21 | }; 22 | } catch (error) { 23 | log.error(`Error getting cache stats: ${error.message}`); 24 | return { 25 | success: false, 26 | error: { 27 | code: 'CACHE_STATS_ERROR', 28 | message: error.message || 'Unknown error occurred' 29 | } 30 | }; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: 'bug: ' 5 | labels: bug 6 | assignees: '' 7 | --- 8 | 9 | ### Description 10 | 11 | Detailed description of the problem, including steps to reproduce the issue. 12 | 13 | ### Steps to Reproduce 14 | 15 | 1. Step-by-step instructions to reproduce the issue 16 | 2. Include command examples or UI interactions 17 | 18 | ### Expected Behavior 19 | 20 | Describe clearly what the expected outcome or behavior should be. 21 | 22 | ### Actual Behavior 23 | 24 | Describe clearly what the actual outcome or behavior is. 25 | 26 | ### Screenshots or Logs 27 | 28 | Provide screenshots, logs, or error messages if applicable. 29 | 30 | ### Environment 31 | 32 | - Task Master version: 33 | - Node.js version: 34 | - Operating system: 35 | - IDE (if applicable): 36 | 37 | ### Additional Context 38 | 39 | Any additional information or context that might help diagnose the issue. 40 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Task Master Documentation 2 | 3 | Welcome to the Task Master documentation. Use the links below to navigate to the information you need: 4 | 5 | ## Getting Started 6 | 7 | - [Configuration Guide](configuration.md) - Set up environment variables and customize Task Master 8 | - [Tutorial](tutorial.md) - Step-by-step guide to getting started with Task Master 9 | 10 | ## Reference 11 | 12 | - [Command Reference](command-reference.md) - Complete list of all available commands 13 | - [Task Structure](task-structure.md) - Understanding the task format and features 14 | 15 | ## Examples & Licensing 16 | 17 | - [Example Interactions](examples.md) - Common Cursor AI interaction examples 18 | - [Licensing Information](licensing.md) - Detailed information about the license 19 | 20 | ## Need More Help? 21 | 22 | If you can't find what you're looking for in these docs, please check the [main README](../README.md) or visit our [GitHub repository](https://github.com/LousyBook94/pollinations-task-master). 23 | -------------------------------------------------------------------------------- /scripts/modules/task-manager/task-exists.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Checks if a task with the given ID exists 3 | * @param {Array} tasks - Array of tasks to search 4 | * @param {string|number} taskId - ID of task or subtask to check 5 | * @returns {boolean} Whether the task exists 6 | */ 7 | function taskExists(tasks, taskId) { 8 | // Handle subtask IDs (e.g., "1.2") 9 | if (typeof taskId === 'string' && taskId.includes('.')) { 10 | const [parentIdStr, subtaskIdStr] = taskId.split('.'); 11 | const parentId = parseInt(parentIdStr, 10); 12 | const subtaskId = parseInt(subtaskIdStr, 10); 13 | 14 | // Find the parent task 15 | const parentTask = tasks.find((t) => t.id === parentId); 16 | 17 | // If parent exists, check if subtask exists 18 | return ( 19 | parentTask && 20 | parentTask.subtasks && 21 | parentTask.subtasks.some((st) => st.id === subtaskId) 22 | ); 23 | } 24 | 25 | // Handle regular task IDs 26 | const id = parseInt(taskId, 10); 27 | return tasks.some((t) => t.id === id); 28 | } 29 | 30 | export default taskExists; 31 | -------------------------------------------------------------------------------- /.roo/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "taskmaster-ai": { 4 | "command": "node", 5 | "args": [ 6 | "./mcp-server/server.js" 7 | ], 8 | "env": { 9 | "MAX_TOKENS": "64000", 10 | "TEMPERATURE": "0.2", 11 | "DEFAULT_SUBTASKS": "5", 12 | "DEFAULT_PRIORITY": "medium" 13 | }, 14 | "alwaysAllow": [ 15 | "get_tasks", 16 | "set_task_status", 17 | "parse_prd", 18 | "update", 19 | "update_task", 20 | "update_subtask", 21 | "generate", 22 | "get_task", 23 | "next_task", 24 | "expand_task", 25 | "add_task", 26 | "add_subtask", 27 | "remove_subtask", 28 | "analyze_project_complexity", 29 | "clear_subtasks", 30 | "expand_all", 31 | "remove_dependency", 32 | "validate_dependencies", 33 | "fix_dependencies", 34 | "complexity_report", 35 | "add_dependency", 36 | "remove_task", 37 | "initialize_project" 38 | ], 39 | "disabled": true 40 | } 41 | } 42 | } -------------------------------------------------------------------------------- /tests/setup.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Jest setup file 3 | * 4 | * This file is run before each test suite to set up the test environment. 5 | */ 6 | 7 | // Mock environment variables 8 | process.env.MODEL = 'sonar-pro'; 9 | process.env.MAX_TOKENS = '64000'; 10 | process.env.TEMPERATURE = '0.2'; 11 | process.env.DEBUG = 'false'; 12 | process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests 13 | process.env.DEFAULT_SUBTASKS = '5'; 14 | process.env.DEFAULT_PRIORITY = 'medium'; 15 | process.env.PROJECT_NAME = 'Test Project'; 16 | process.env.PROJECT_VERSION = '1.0.0'; 17 | // Ensure tests don't make real API calls by setting mock API keys 18 | process.env.ANTHROPIC_API_KEY = 'test-mock-api-key-for-tests'; 19 | process.env.PERPLEXITY_API_KEY = 'test-mock-perplexity-key-for-tests'; 20 | 21 | // Add global test helpers if needed 22 | global.wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms)); 23 | 24 | // If needed, silence console during tests 25 | if (process.env.SILENCE_CONSOLE === 'true') { 26 | global.console = { 27 | ...console, 28 | log: () => {}, 29 | info: () => {}, 30 | warn: () => {}, 31 | error: () => {} 32 | }; 33 | } 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependency directories 2 | node_modules/ 3 | jspm_packages/ 4 | 5 | # Environment variables 6 | .env 7 | .env.local 8 | .env.development.local 9 | .env.test.local 10 | .env.production.local 11 | 12 | # Cursor configuration -- might have ENV variables. Included by default 13 | # .cursor/mcp.json 14 | 15 | # Logs 16 | logs 17 | *.log 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | lerna-debug.log* 22 | tests/e2e/_runs/ 23 | tests/e2e/log/ 24 | 25 | # Coverage directory used by tools like istanbul 26 | coverage 27 | *.lcov 28 | 29 | # Optional npm cache directory 30 | .npm 31 | 32 | # Optional eslint cache 33 | .eslintcache 34 | 35 | # Optional REPL history 36 | .node_repl_history 37 | 38 | # Output of 'npm pack' 39 | *.tgz 40 | 41 | # Yarn Integrity file 42 | .yarn-integrity 43 | 44 | # dotenv environment variables file 45 | .env.test 46 | 47 | # parcel-bundler cache 48 | .cache 49 | 50 | # Next.js build output 51 | .next 52 | 53 | # Nuxt.js build / generate output 54 | .nuxt 55 | dist 56 | 57 | # Mac files 58 | .DS_Store 59 | 60 | # Debug files 61 | *.debug 62 | init-debug.log 63 | dev-debug.log 64 | 65 | # NPMRC 66 | .npmrc 67 | -------------------------------------------------------------------------------- /.roo/rules/changeset.md: -------------------------------------------------------------------------------- 1 | # Changesets Workflow Guidelines 2 | 3 | Use Changesets to track meaningful, user- or workflow-impacting changes. Add a changeset file if you: 4 | 5 | - Add features, bug fixes, breaking changes, major refactors, public documentation updates, dependency or build/tooling changes. 6 | 7 | Do not add a changeset for: 8 | - Internal docs only 9 | - Trivial code cleanup/comments/typos 10 | - Test refactoring only 11 | - Local/personal config 12 | 13 | **Workflow:** 14 | 1. Stage your work: `git add .` 15 | 2. Run: `npm run changeset` or `npx changeset add` 16 | 3. Select affected package(s), bump type (Patch, Minor, Major), and write a changelog summary (concise, imperative, user-facing, not a commit message). 17 | 4. Stage and commit: `git add .changeset/*.md`, then `git commit -m "feat(...): ..."` 18 | 5. `.changeset/*.md` drives automatic changelogs and versioning on release. 19 | 20 | **Best practices:** 21 | - Provide a concise summary for users (changelog) AND a detailed Git commit message for maintainers. 22 | - Only add a changeset for changes that concern users/contributors following public docs. 23 | 24 | **Release:** 25 | On release, changesets files are processed to update `package.json` and `CHANGELOG.md`, then deleted. 26 | -------------------------------------------------------------------------------- /.clinerules/changeset.md: -------------------------------------------------------------------------------- 1 | # Changesets Workflow Guidelines 2 | 3 | Use Changesets to track meaningful, user- or workflow-impacting changes. Add a changeset file if you: 4 | 5 | - Add features, bug fixes, breaking changes, major refactors, public documentation updates, dependency or build/tooling changes. 6 | 7 | Do not add a changeset for: 8 | - Internal docs only 9 | - Trivial code cleanup/comments/typos 10 | - Test refactoring only 11 | - Local/personal config 12 | 13 | **Workflow:** 14 | 1. Stage your work: `git add .` 15 | 2. Run: `npm run changeset` or `npx changeset add` 16 | 3. Select affected package(s), bump type (Patch, Minor, Major), and write a changelog summary (concise, imperative, user-facing, not a commit message). 17 | 4. Stage and commit: `git add .changeset/*.md`, then `git commit -m "feat(...): ..."` 18 | 5. `.changeset/*.md` drives automatic changelogs and versioning on release. 19 | 20 | **Best practices:** 21 | - Provide a concise summary for users (changelog) AND a detailed Git commit message for maintainers. 22 | - Only add a changeset for changes that concern users/contributors following public docs. 23 | 24 | **Release:** 25 | On release, changesets files are processed to update `package.json` and `CHANGELOG.md`, then deleted. 26 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | concurrency: ${{ github.workflow }}-${{ github.ref }} 8 | 9 | jobs: 10 | release: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | with: 15 | fetch-depth: 0 16 | 17 | - uses: actions/setup-node@v4 18 | with: 19 | node-version: 20 20 | cache: 'npm' 21 | 22 | - name: Cache node_modules 23 | uses: actions/cache@v4 24 | with: 25 | path: | 26 | node_modules 27 | */*/node_modules 28 | key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} 29 | restore-keys: | 30 | ${{ runner.os }}-node- 31 | 32 | - name: Install Dependencies 33 | run: npm ci 34 | timeout-minutes: 2 35 | 36 | - name: Exit pre-release mode (safety check) 37 | run: npx changeset pre exit || true 38 | 39 | - name: Create Release Pull Request or Publish to npm 40 | uses: changesets/action@v1 41 | with: 42 | publish: npm run release 43 | env: 44 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 45 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 46 | -------------------------------------------------------------------------------- /test-version-check.js: -------------------------------------------------------------------------------- 1 | import { 2 | displayUpgradeNotification, 3 | compareVersions 4 | } from './scripts/modules/commands.js'; 5 | 6 | // Simulate different version scenarios 7 | console.log('=== Simulating version check ===\n'); 8 | 9 | // 1. Current version is older than latest (should show update notice) 10 | console.log('Scenario 1: Current version older than latest'); 11 | displayUpgradeNotification('0.9.30', '1.0.0'); 12 | 13 | // 2. Current version same as latest (no update needed) 14 | console.log( 15 | '\nScenario 2: Current version same as latest (this would not normally show a notice)' 16 | ); 17 | console.log('Current: 1.0.0, Latest: 1.0.0'); 18 | console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0')); 19 | console.log( 20 | 'Update needed:', 21 | compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No' 22 | ); 23 | 24 | // 3. Current version newer than latest (e.g., development version, would not show notice) 25 | console.log( 26 | '\nScenario 3: Current version newer than latest (this would not normally show a notice)' 27 | ); 28 | console.log('Current: 1.1.0, Latest: 1.0.0'); 29 | console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0')); 30 | console.log( 31 | 'Update needed:', 32 | compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No' 33 | ); 34 | 35 | console.log('\n=== Test complete ==='); 36 | -------------------------------------------------------------------------------- /scripts/modules/task-manager/is-task-dependent.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Check if a task is dependent on another task (directly or indirectly) 3 | * Used to prevent circular dependencies 4 | * @param {Array} allTasks - Array of all tasks 5 | * @param {Object} task - The task to check 6 | * @param {number} targetTaskId - The task ID to check dependency against 7 | * @returns {boolean} Whether the task depends on the target task 8 | */ 9 | function isTaskDependentOn(allTasks, task, targetTaskId) { 10 | // If the task is a subtask, check if its parent is the target 11 | if (task.parentTaskId === targetTaskId) { 12 | return true; 13 | } 14 | 15 | // Check direct dependencies 16 | if (task.dependencies && task.dependencies.includes(targetTaskId)) { 17 | return true; 18 | } 19 | 20 | // Check dependencies of dependencies (recursive) 21 | if (task.dependencies) { 22 | for (const depId of task.dependencies) { 23 | const depTask = allTasks.find((t) => t.id === depId); 24 | if (depTask && isTaskDependentOn(allTasks, depTask, targetTaskId)) { 25 | return true; 26 | } 27 | } 28 | } 29 | 30 | // Check subtasks for dependencies 31 | if (task.subtasks) { 32 | for (const subtask of task.subtasks) { 33 | if (isTaskDependentOn(allTasks, subtask, targetTaskId)) { 34 | return true; 35 | } 36 | } 37 | } 38 | 39 | return false; 40 | } 41 | 42 | export default isTaskDependentOn; 43 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | // Use Node.js environment for testing 3 | testEnvironment: 'node', 4 | 5 | // Automatically clear mock calls between every test 6 | clearMocks: true, 7 | 8 | // Indicates whether the coverage information should be collected while executing the test 9 | collectCoverage: false, 10 | 11 | // The directory where Jest should output its coverage files 12 | coverageDirectory: 'coverage', 13 | 14 | // A list of paths to directories that Jest should use to search for files in 15 | roots: ['/tests'], 16 | 17 | // The glob patterns Jest uses to detect test files 18 | testMatch: ['**/__tests__/**/*.js', '**/?(*.)+(spec|test).js'], 19 | 20 | // Transform files 21 | transform: {}, 22 | 23 | // Disable transformations for node_modules 24 | transformIgnorePatterns: ['/node_modules/'], 25 | 26 | // Set moduleNameMapper for absolute paths 27 | moduleNameMapper: { 28 | '^@/(.*)$': '/$1' 29 | }, 30 | 31 | // Setup module aliases 32 | moduleDirectories: ['node_modules', ''], 33 | 34 | // Configure test coverage thresholds 35 | coverageThreshold: { 36 | global: { 37 | branches: 80, 38 | functions: 80, 39 | lines: 80, 40 | statements: 80 41 | } 42 | }, 43 | 44 | // Generate coverage report in these formats 45 | coverageReporters: ['text', 'lcov'], 46 | 47 | // Verbose output 48 | verbose: true, 49 | 50 | // Setup file 51 | setupFilesAfterEnv: ['/tests/setup.js'] 52 | }; 53 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancements---feature-requests.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Enhancements & feature requests 3 | about: Suggest an idea for this project 4 | title: 'feat: ' 5 | labels: enhancement 6 | assignees: '' 7 | --- 8 | 9 | > "Direct quote or clear summary of user request or need or user story." 10 | 11 | ### Motivation 12 | 13 | Detailed explanation of why this feature is important. Describe the problem it solves or the benefit it provides. 14 | 15 | ### Proposed Solution 16 | 17 | Clearly describe the proposed feature, including: 18 | 19 | - High-level overview of the feature 20 | - Relevant technologies or integrations 21 | - How it fits into the existing workflow or architecture 22 | 23 | ### High-Level Workflow 24 | 25 | 1. Step-by-step description of how the feature will be implemented 26 | 2. Include necessary intermediate milestones 27 | 28 | ### Key Elements 29 | 30 | - Bullet-point list of technical or UX/UI enhancements 31 | - Mention specific integrations or APIs 32 | - Highlight changes needed in existing data models or commands 33 | 34 | ### Example Workflow 35 | 36 | Provide a clear, concrete example demonstrating the feature: 37 | 38 | ```shell 39 | $ task-master [action] 40 | → Expected response/output 41 | ``` 42 | 43 | ### Implementation Considerations 44 | 45 | - Dependencies on external components or APIs 46 | - Backward compatibility requirements 47 | - Potential performance impacts or resource usage 48 | 49 | ### Out of Scope (Future Considerations) 50 | 51 | Clearly list any features or improvements not included but relevant for future iterations. 52 | -------------------------------------------------------------------------------- /.clinerules/architecture.md: -------------------------------------------------------------------------------- 1 | # Task Master CLI Architecture (Shortened) 2 | 3 | Task Master CLI uses a modular structure for separation of concerns, maintainability, and testability. 4 | 5 | ## Main Modules 6 | 7 | - **commands.js:** Registers CLI commands (Commander.js), delegates to core modules, handles validation. 8 | - **task-manager.js:** Manages task data (CRUD, status, PRD parsing, expansion, complexity analysis). 9 | - **dependency-manager.js:** Manages dependencies, validation, fixes, cycle prevention. 10 | - **ui.js:** Handles all CLI/user output, formatting, colored output, spinners, tables, suggestions. 11 | - **ai-services.js:** (Conceptual) Handles AI calls for PRD parsing, subtask expansion, complexity analysis. 12 | - **utils.js:** Provides config, logging, file IO, utilities, silent mode control. 13 | - **mcp-server/**: Bridges CLI logic to external clients (e.g. Roo) via FastMCP. Implements silent mode and cache as needed. 14 | - **init.js:** Sets up new Task Master projects. 15 | 16 | ## Execution/Data Flow 17 | 18 | - CLI commands → core modules (`task-manager.js`, `dependency-manager.js`, `init.js`) → UI for output 19 | - MCP server exposes core functions as tools/direct functions 20 | 21 | ## Conventions 22 | 23 | - Kebab-case filenames, camelCase direct functions, snake_case MCP tool names 24 | - Direct functions wrap core logic with silent mode and logging wrappers 25 | - Standardized error handling and file path composition 26 | 27 | ## Testing 28 | 29 | - Modules support unit, integration, E2E tests via dependency injection & logical separation 30 | - Mocking and test structure follow clear division per component -------------------------------------------------------------------------------- /.roo/rules/architecture.md: -------------------------------------------------------------------------------- 1 | # Task Master CLI Architecture (Shortened) 2 | 3 | Task Master CLI uses a modular structure for separation of concerns, maintainability, and testability. 4 | 5 | ## Main Modules 6 | 7 | - **commands.js:** Registers CLI commands (Commander.js), delegates to core modules, handles validation. 8 | - **task-manager.js:** Manages task data (CRUD, status, PRD parsing, expansion, complexity analysis). 9 | - **dependency-manager.js:** Manages dependencies, validation, fixes, cycle prevention. 10 | - **ui.js:** Handles all CLI/user output, formatting, colored output, spinners, tables, suggestions. 11 | - **ai-services.js:** (Conceptual) Handles AI calls for PRD parsing, subtask expansion, complexity analysis. 12 | - **utils.js:** Provides config, logging, file IO, utilities, silent mode control. 13 | - **mcp-server/**: Bridges CLI logic to external clients (e.g. Roo) via FastMCP. Implements silent mode and cache as needed. 14 | - **init.js:** Sets up new Task Master projects. 15 | 16 | ## Execution/Data Flow 17 | 18 | - CLI commands → core modules (`task-manager.js`, `dependency-manager.js`, `init.js`) → UI for output 19 | - MCP server exposes core functions as tools/direct functions 20 | 21 | ## Conventions 22 | 23 | - Kebab-case filenames, camelCase direct functions, snake_case MCP tool names 24 | - Direct functions wrap core logic with silent mode and logging wrappers 25 | - Standardized error handling and file path composition 26 | 27 | ## Testing 28 | 29 | - Modules support unit, integration, E2E tests via dependency injection & logical separation 30 | - Mocking and test structure follow clear division per component -------------------------------------------------------------------------------- /mcp-server/src/core/utils/env-utils.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Temporarily sets environment variables from session.env, executes an action, 3 | * and restores the original environment variables. 4 | * @param {object | undefined} sessionEnv - The environment object from the session. 5 | * @param {Function} actionFn - An async function to execute with the temporary environment. 6 | * @returns {Promise} The result of the actionFn. 7 | */ 8 | export async function withSessionEnv(sessionEnv, actionFn) { 9 | if ( 10 | !sessionEnv || 11 | typeof sessionEnv !== 'object' || 12 | Object.keys(sessionEnv).length === 0 13 | ) { 14 | // If no sessionEnv is provided, just run the action directly 15 | return await actionFn(); 16 | } 17 | 18 | const originalEnv = {}; 19 | const keysToRestore = []; 20 | 21 | // Set environment variables from sessionEnv 22 | for (const key in sessionEnv) { 23 | if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) { 24 | // Store original value if it exists, otherwise mark for deletion 25 | if (process.env[key] !== undefined) { 26 | originalEnv[key] = process.env[key]; 27 | } 28 | keysToRestore.push(key); 29 | process.env[key] = sessionEnv[key]; 30 | } 31 | } 32 | 33 | try { 34 | // Execute the provided action function 35 | return await actionFn(); 36 | } finally { 37 | // Restore original environment variables 38 | for (const key of keysToRestore) { 39 | if (Object.prototype.hasOwnProperty.call(originalEnv, key)) { 40 | process.env[key] = originalEnv[key]; 41 | } else { 42 | // If the key didn't exist originally, delete it 43 | delete process.env[key]; 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /assets/example_prd.txt: -------------------------------------------------------------------------------- 1 | 2 | # Overview 3 | [Provide a high-level overview of your product here. Explain what problem it solves, who it's for, and why it's valuable.] 4 | 5 | # Core Features 6 | [List and describe the main features of your product. For each feature, include: 7 | - What it does 8 | - Why it's important 9 | - How it works at a high level] 10 | 11 | # User Experience 12 | [Describe the user journey and experience. Include: 13 | - User personas 14 | - Key user flows 15 | - UI/UX considerations] 16 | 17 | 18 | # Technical Architecture 19 | [Outline the technical implementation details: 20 | - System components 21 | - Data models 22 | - APIs and integrations 23 | - Infrastructure requirements] 24 | 25 | # Development Roadmap 26 | [Break down the development process into phases: 27 | - MVP requirements 28 | - Future enhancements 29 | - Do not think about timelines whatsoever -- all that matters is scope and detailing exactly what needs to be build in each phase so it can later be cut up into tasks] 30 | 31 | # Logical Dependency Chain 32 | [Define the logical order of development: 33 | - Which features need to be built first (foundation) 34 | - Getting as quickly as possible to something usable/visible front end that works 35 | - Properly pacing and scoping each feature so it is atomic but can also be built upon and improved as development approaches] 36 | 37 | # Risks and Mitigations 38 | [Identify potential risks and how they'll be addressed: 39 | - Technical challenges 40 | - Figuring out the MVP that we can build upon 41 | - Resource constraints] 42 | 43 | # Appendix 44 | [Include any additional information: 45 | - Research findings 46 | - Technical specifications] 47 | -------------------------------------------------------------------------------- /scripts/example_prd.txt: -------------------------------------------------------------------------------- 1 | 2 | # Overview 3 | [Provide a high-level overview of your product here. Explain what problem it solves, who it's for, and why it's valuable.] 4 | 5 | # Core Features 6 | [List and describe the main features of your product. For each feature, include: 7 | - What it does 8 | - Why it's important 9 | - How it works at a high level] 10 | 11 | # User Experience 12 | [Describe the user journey and experience. Include: 13 | - User personas 14 | - Key user flows 15 | - UI/UX considerations] 16 | 17 | 18 | # Technical Architecture 19 | [Outline the technical implementation details: 20 | - System components 21 | - Data models 22 | - APIs and integrations 23 | - Infrastructure requirements] 24 | 25 | # Development Roadmap 26 | [Break down the development process into phases: 27 | - MVP requirements 28 | - Future enhancements 29 | - Do not think about timelines whatsoever -- all that matters is scope and detailing exactly what needs to be build in each phase so it can later be cut up into tasks] 30 | 31 | # Logical Dependency Chain 32 | [Define the logical order of development: 33 | - Which features need to be built first (foundation) 34 | - Getting as quickly as possible to something usable/visible front end that works 35 | - Properly pacing and scoping each feature so it is atomic but can also be built upon and improved as development approaches] 36 | 37 | # Risks and Mitigations 38 | [Identify potential risks and how they'll be addressed: 39 | - Technical challenges 40 | - Figuring out the MVP that we can build upon 41 | - Resource constraints] 42 | 43 | # Appendix 44 | [Include any additional information: 45 | - Research findings 46 | - Technical specifications] 47 | -------------------------------------------------------------------------------- /.github/workflows/pre-release.yml: -------------------------------------------------------------------------------- 1 | name: Pre-Release (RC) 2 | 3 | on: 4 | workflow_dispatch: # Allows manual triggering from GitHub UI/API 5 | push: 6 | branches: 7 | - 'next' 8 | 9 | concurrency: pre-release-${{ github.ref }} 10 | 11 | jobs: 12 | rc: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 18 | 19 | - uses: actions/setup-node@v4 20 | with: 21 | node-version: 20 22 | cache: 'npm' 23 | 24 | - name: Cache node_modules 25 | uses: actions/cache@v4 26 | with: 27 | path: | 28 | node_modules 29 | */*/node_modules 30 | key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} 31 | restore-keys: | 32 | ${{ runner.os }}-node- 33 | 34 | - name: Install dependencies 35 | run: npm ci 36 | timeout-minutes: 2 37 | 38 | - name: Enter RC mode 39 | run: | 40 | npx changeset pre exit || true 41 | npx changeset pre enter rc 42 | 43 | - name: Version RC packages 44 | run: | 45 | git config user.name "GitHub Actions" 46 | git config user.email "github-actions@example.com" 47 | npx changeset version 48 | git add . 49 | git commit -m "chore: rc version bump" || echo "No changes to commit" 50 | 51 | - name: Create Release Candidate Pull Request or Publish Release Candidate to npm 52 | uses: changesets/action@v1 53 | with: 54 | publish: npm run release 55 | env: 56 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 57 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 58 | -------------------------------------------------------------------------------- /.cursor/rules/cursor_rules.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: Guidelines for creating and maintaining Cursor rules to ensure consistency and effectiveness. 3 | globs: .cursor/rules/*.mdc 4 | alwaysApply: true 5 | --- 6 | 7 | - **Required Rule Structure:** 8 | ```markdown 9 | --- 10 | description: Clear, one-line description of what the rule enforces 11 | globs: path/to/files/*.ext, other/path/**/* 12 | alwaysApply: boolean 13 | --- 14 | 15 | - **Main Points in Bold** 16 | - Sub-points with details 17 | - Examples and explanations 18 | ``` 19 | 20 | - **File References:** 21 | - Use `[filename](mdc:path/to/file)` ([filename](mdc:filename)) to reference files 22 | - Example: [prisma.mdc](mdc:.cursor/rules/prisma.mdc) for rule references 23 | - Example: [schema.prisma](mdc:prisma/schema.prisma) for code references 24 | 25 | - **Code Examples:** 26 | - Use language-specific code blocks 27 | ```typescript 28 | // ✅ DO: Show good examples 29 | const goodExample = true; 30 | 31 | // ❌ DON'T: Show anti-patterns 32 | const badExample = false; 33 | ``` 34 | 35 | - **Rule Content Guidelines:** 36 | - Start with high-level overview 37 | - Include specific, actionable requirements 38 | - Show examples of correct implementation 39 | - Reference existing code when possible 40 | - Keep rules DRY by referencing other rules 41 | 42 | - **Rule Maintenance:** 43 | - Update rules when new patterns emerge 44 | - Add examples from actual codebase 45 | - Remove outdated patterns 46 | - Cross-reference related rules 47 | 48 | - **Best Practices:** 49 | - Use bullet points for clarity 50 | - Keep descriptions concise 51 | - Include both DO and DON'T examples 52 | - Reference actual code over theoretical examples 53 | - Use consistent formatting across rules -------------------------------------------------------------------------------- /mcp-server/src/tools/get-operation-status.js: -------------------------------------------------------------------------------- 1 | // mcp-server/src/tools/get-operation-status.js 2 | import { z } from 'zod'; 3 | import { createErrorResponse, createContentResponse } from './utils.js'; // Assuming these utils exist 4 | 5 | /** 6 | * Register the get_operation_status tool. 7 | * @param {FastMCP} server - FastMCP server instance. 8 | * @param {AsyncOperationManager} asyncManager - The async operation manager. 9 | */ 10 | export function registerGetOperationStatusTool(server, asyncManager) { 11 | server.addTool({ 12 | name: 'get_operation_status', 13 | description: 14 | 'Retrieves the status and result/error of a background operation.', 15 | parameters: z.object({ 16 | operationId: z.string().describe('The ID of the operation to check.') 17 | }), 18 | execute: async (args, { log }) => { 19 | try { 20 | const { operationId } = args; 21 | log.info(`Checking status for operation ID: ${operationId}`); 22 | 23 | const status = asyncManager.getStatus(operationId); 24 | 25 | // Status will now always return an object, but it might have status='not_found' 26 | if (status.status === 'not_found') { 27 | log.warn(`Operation ID not found: ${operationId}`); 28 | return createErrorResponse( 29 | status.error?.message || `Operation ID not found: ${operationId}`, 30 | status.error?.code || 'OPERATION_NOT_FOUND' 31 | ); 32 | } 33 | 34 | log.info(`Status for ${operationId}: ${status.status}`); 35 | return createContentResponse(status); 36 | } catch (error) { 37 | log.error(`Error in get_operation_status tool: ${error.message}`, { 38 | stack: error.stack 39 | }); 40 | return createErrorResponse( 41 | `Failed to get operation status: ${error.message}`, 42 | 'GET_STATUS_ERROR' 43 | ); 44 | } 45 | } 46 | }); 47 | } 48 | -------------------------------------------------------------------------------- /tasks/task_070.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 70 2 | # Title: Implement 'diagram' command for Mermaid diagram generation 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Develop a CLI command named 'diagram' that generates Mermaid diagrams to visualize task dependencies and workflows, with options to target specific tasks or generate comprehensive diagrams for all tasks. 7 | # Details: 8 | The task involves implementing a new command that accepts an optional '--id' parameter: if provided, the command generates a diagram illustrating the chosen task and its dependencies; if omitted, it produces a diagram that includes all tasks. The diagrams should use color coding to reflect task status and arrows to denote dependencies. In addition to CLI rendering, the command should offer an option to save the output as a Markdown (.md) file. Consider integrating with the existing task management system to pull task details and status. Pay attention to formatting consistency and error handling for invalid or missing task IDs. Comments should be added to the code to improve maintainability, and unit tests should cover edge cases such as cyclic dependencies, missing tasks, and invalid input formats. 9 | 10 | # Test Strategy: 11 | Verify the command functionality by testing with both specific task IDs and general invocation: 1) Run the command with a valid '--id' and ensure the resulting diagram accurately depicts the specified task's dependencies with correct color codings for statuses. 2) Execute the command without '--id' to ensure a complete workflow diagram is generated for all tasks. 3) Check that arrows correctly represent dependency relationships. 4) Validate the Markdown (.md) file export option by confirming the file format and content after saving. 5) Test error responses for non-existent task IDs and malformed inputs. 12 | -------------------------------------------------------------------------------- /test-config-manager.js: -------------------------------------------------------------------------------- 1 | // test-config-manager.js 2 | console.log('=== ENVIRONMENT TEST ==='); 3 | console.log('Working directory:', process.cwd()); 4 | console.log('NODE_PATH:', process.env.NODE_PATH); 5 | 6 | // Test basic imports 7 | try { 8 | console.log('Importing config-manager'); 9 | // Use dynamic import for ESM 10 | const configManagerModule = await import( 11 | './scripts/modules/config-manager.js' 12 | ); 13 | const configManager = configManagerModule.default || configManagerModule; 14 | console.log('Config manager loaded successfully'); 15 | 16 | console.log('Loading supported models'); 17 | // Add after line 14 (after "Config manager loaded successfully") 18 | console.log('Config manager exports:', Object.keys(configManager)); 19 | } catch (error) { 20 | console.error('Import error:', error.message); 21 | console.error(error.stack); 22 | } 23 | 24 | // Test file access 25 | try { 26 | console.log('Checking for .taskmasterconfig'); 27 | // Use dynamic import for ESM 28 | const { readFileSync, existsSync } = await import('fs'); 29 | const { resolve } = await import('path'); 30 | 31 | const configExists = existsSync('./.taskmasterconfig'); 32 | console.log('.taskmasterconfig exists:', configExists); 33 | 34 | if (configExists) { 35 | const config = JSON.parse(readFileSync('./.taskmasterconfig', 'utf-8')); 36 | console.log('Config keys:', Object.keys(config)); 37 | } 38 | 39 | console.log('Checking for supported-models.json'); 40 | const modelsPath = resolve('./scripts/modules/supported-models.json'); 41 | console.log('Models path:', modelsPath); 42 | const modelsExists = existsSync(modelsPath); 43 | console.log('supported-models.json exists:', modelsExists); 44 | } catch (error) { 45 | console.error('File access error:', error.message); 46 | } 47 | 48 | console.log('=== TEST COMPLETE ==='); 49 | -------------------------------------------------------------------------------- /.roo/rules/package-manager.md: -------------------------------------------------------------------------------- 1 | # Package Manager & Tech Stack Rules 2 | 3 | - **Preferred Package Manager: Bun + bunx** 4 | - All JS/TS tasks and Node projects in this repo use [Bun](https://bun.sh/) (`bun` & `bunx`) for all scripting, module execution, and dev workflows. 5 | - Use `bun install` for dependency installation (never `npm i` or `yarn`). 6 | - Use `bunx` for running CLIs/tools (never `npx`). 7 | 8 | - **Module Runner: FastMCP** 9 | - Task Master MCP tooling is built with [FastMCP](https://github.com/modelcontextprotocol/fastmcp). 10 | - When creating MCP servers, always use FastMCP as the base. 11 | - All server scripts are started via `bunx fastmcp ...` (or `bunx mcp-inspector` for inspection tools). 12 | 13 | - **AI & Generation: Pollinations.AI** 14 | - All AI (task generation, expansion, complexity analysis) uses [Pollinations.AI](https://pollinations.ai/) at `https://text.pollinations.ai/openai`, with: 15 | - **Default model**: `deepseek` 16 | - Configuration: See `POLLINATIONS_API_URL`, `POLLINATIONS_MODEL`, etc. in `.env` and README. 17 | - No code in the main repo should reference Anthropic/Claude/Perplexity/OpenAI SDK keys or dependencies. 18 | 19 | - **Scripts & Automation** 20 | - All CLI scripts for tooling, init, dev, etc., use Bun shebang (`#!/usr/bin/env bun`). 21 | - When in doubt, prefer Bun over Node. 22 | 23 | - **General Guidelines** 24 | - Document any new dependencies in the relevant `bun.lockb` and `package.json`. 25 | - For devops/dev tooling in CI, always prefer fast, cross-platform solutions that work with Bun. 26 | - If a dependency is not Bun-compatible, document required workaround or migration. 27 | 28 | - **Deployment** 29 | - All deployment/build scripts should assume Bun as the runtime. 30 | - Any future MCP extension servers or automations are to be written for Bun + FastMCP. -------------------------------------------------------------------------------- /.clinerules/package-manager.md: -------------------------------------------------------------------------------- 1 | # Package Manager & Tech Stack Rules 2 | 3 | - **Preferred Package Manager: Bun + bunx** 4 | - All JS/TS tasks and Node projects in this repo use [Bun](https://bun.sh/) (`bun` & `bunx`) for all scripting, module execution, and dev workflows. 5 | - Use `bun install` for dependency installation (never `npm i` or `yarn`). 6 | - Use `bunx` for running CLIs/tools (never `npx`). 7 | 8 | - **Module Runner: FastMCP** 9 | - Task Master MCP tooling is built with [FastMCP](https://github.com/modelcontextprotocol/fastmcp). 10 | - When creating MCP servers, always use FastMCP as the base. 11 | - All server scripts are started via `bunx fastmcp ...` (or `bunx mcp-inspector` for inspection tools). 12 | 13 | - **AI & Generation: Pollinations.AI** 14 | - All AI (task generation, expansion, complexity analysis) uses [Pollinations.AI](https://pollinations.ai/) at `https://text.pollinations.ai/openai`, with: 15 | - **Default model**: `deepseek` 16 | - Configuration: See `POLLINATIONS_API_URL`, `POLLINATIONS_MODEL`, etc. in `.env` and README. 17 | - No code in the main repo should reference Anthropic/Claude/Perplexity/OpenAI SDK keys or dependencies. 18 | 19 | - **Scripts & Automation** 20 | - All CLI scripts for tooling, init, dev, etc., use Bun shebang (`#!/usr/bin/env bun`). 21 | - When in doubt, prefer Bun over Node. 22 | 23 | - **General Guidelines** 24 | - Document any new dependencies in the relevant `bun.lockb` and `package.json`. 25 | - For devops/dev tooling in CI, always prefer fast, cross-platform solutions that work with Bun. 26 | - If a dependency is not Bun-compatible, document required workaround or migration. 27 | 28 | - **Deployment** 29 | - All deployment/build scripts should assume Bun as the runtime. 30 | - Any future MCP extension servers or automations are to be written for Bun + FastMCP. -------------------------------------------------------------------------------- /tasks/task_071.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 71 2 | # Title: Add Model-Specific maxTokens Override Configuration 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: high 6 | # Description: Implement functionality to allow specifying a maximum token limit for individual AI models within .taskmasterconfig, overriding the role-based maxTokens if the model-specific limit is lower. 7 | # Details: 8 | 1. **Modify `.taskmasterconfig` Structure:** Add a new top-level section `modelOverrides` (e.g., `"modelOverrides": { "o3-mini": { "maxTokens": 100000 } }`). 9 | 2. **Update `config-manager.js`:** 10 | - Modify config loading to read the new `modelOverrides` section. 11 | - Update `getParametersForRole(role)` logic: Fetch role defaults (roleMaxTokens, temperature). Get the modelId for the role. Look up `modelOverrides[modelId].maxTokens` (modelSpecificMaxTokens). Calculate `effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens ?? Infinity)`. Return `{ maxTokens: effectiveMaxTokens, temperature }`. 12 | 3. **Update Documentation:** Add an example of `modelOverrides` to `.taskmasterconfig.example` or relevant documentation. 13 | 14 | # Test Strategy: 15 | 1. **Unit Tests (`config-manager.js`):** 16 | - Verify `getParametersForRole` returns role defaults when no override exists. 17 | - Verify `getParametersForRole` returns the lower model-specific limit when an override exists and is lower. 18 | - Verify `getParametersForRole` returns the role limit when an override exists but is higher. 19 | - Verify handling of missing `modelOverrides` section. 20 | 2. **Integration Tests (`ai-services-unified.js`):** 21 | - Call an AI service (e.g., `generateTextService`) with a config having a model override. 22 | - Mock the underlying provider function. 23 | - Assert that the `maxTokens` value passed to the mocked provider function matches the expected (potentially overridden) minimum value. 24 | -------------------------------------------------------------------------------- /scripts/modules/task-manager.js: -------------------------------------------------------------------------------- 1 | /** 2 | * task-manager.js 3 | * Task management functions for the Task Master CLI 4 | */ 5 | 6 | import { findTaskById } from './utils.js'; 7 | import parsePRD from './task-manager/parse-prd.js'; 8 | import updateTasks from './task-manager/update-tasks.js'; 9 | import updateTaskById from './task-manager/update-task-by-id.js'; 10 | import generateTaskFiles from './task-manager/generate-task-files.js'; 11 | import setTaskStatus from './task-manager/set-task-status.js'; 12 | import updateSingleTaskStatus from './task-manager/update-single-task-status.js'; 13 | import listTasks from './task-manager/list-tasks.js'; 14 | import expandTask from './task-manager/expand-task.js'; 15 | import expandAllTasks from './task-manager/expand-all-tasks.js'; 16 | import clearSubtasks from './task-manager/clear-subtasks.js'; 17 | import addTask from './task-manager/add-task.js'; 18 | import analyzeTaskComplexity from './task-manager/analyze-task-complexity.js'; 19 | import findNextTask from './task-manager/find-next-task.js'; 20 | import addSubtask from './task-manager/add-subtask.js'; 21 | import removeSubtask from './task-manager/remove-subtask.js'; 22 | import updateSubtaskById from './task-manager/update-subtask-by-id.js'; 23 | import removeTask from './task-manager/remove-task.js'; 24 | import taskExists from './task-manager/task-exists.js'; 25 | import isTaskDependentOn from './task-manager/is-task-dependent.js'; 26 | 27 | // Export task manager functions 28 | export { 29 | parsePRD, 30 | updateTasks, 31 | updateTaskById, 32 | updateSubtaskById, 33 | generateTaskFiles, 34 | setTaskStatus, 35 | updateSingleTaskStatus, 36 | listTasks, 37 | expandTask, 38 | expandAllTasks, 39 | clearSubtasks, 40 | addTask, 41 | addSubtask, 42 | removeSubtask, 43 | findNextTask, 44 | analyzeTaskComplexity, 45 | removeTask, 46 | findTaskById, 47 | taskExists, 48 | isTaskDependentOn 49 | }; 50 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Task Master Test Suite 2 | 3 | This directory contains tests for the Task Master CLI. The tests are organized into different categories to ensure comprehensive test coverage. 4 | 5 | ## Test Structure 6 | 7 | - `unit/`: Unit tests for individual functions and components 8 | - `integration/`: Integration tests for testing interactions between components 9 | - `e2e/`: End-to-end tests for testing complete workflows 10 | - `fixtures/`: Test fixtures and sample data 11 | 12 | ## Running Tests 13 | 14 | To run all tests: 15 | 16 | ```bash 17 | npm test 18 | ``` 19 | 20 | To run tests in watch mode (for development): 21 | 22 | ```bash 23 | npm run test:watch 24 | ``` 25 | 26 | To run tests with coverage reporting: 27 | 28 | ```bash 29 | npm run test:coverage 30 | ``` 31 | 32 | ## Testing Approach 33 | 34 | ### Unit Tests 35 | 36 | Unit tests focus on testing individual functions and components in isolation. These tests should be fast and should mock external dependencies. 37 | 38 | ### Integration Tests 39 | 40 | Integration tests focus on testing interactions between components. These tests ensure that components work together correctly. 41 | 42 | ### End-to-End Tests 43 | 44 | End-to-end tests focus on testing complete workflows from a user's perspective. These tests ensure that the CLI works correctly as a whole. 45 | 46 | ## Test Fixtures 47 | 48 | Test fixtures provide sample data for tests. Fixtures should be small, focused, and representative of real-world data. 49 | 50 | ## Mocking 51 | 52 | For external dependencies like file system operations and API calls, we use mocking to isolate the code being tested. 53 | 54 | - File system operations: Use `mock-fs` to mock the file system 55 | - API calls: Use Jest's mocking capabilities to mock API responses 56 | 57 | ## Test Coverage 58 | 59 | We aim for at least 80% test coverage for all code paths. Coverage reports can be generated with: 60 | 61 | ```bash 62 | npm run test:coverage 63 | ``` 64 | -------------------------------------------------------------------------------- /tasks/task_065.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 65 2 | # Title: Add Bun Support for Taskmaster Installation 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Implement full support for installing and managing Taskmaster using the Bun package manager, ensuring the installation process and user experience are identical to npm, pnpm, and Yarn. 7 | # Details: 8 | Update the Taskmaster installation scripts and documentation to support Bun as a first-class package manager. Ensure that users can install Taskmaster and run all CLI commands (including 'init' via scripts/init.js) using Bun, with the same directory structure, template copying, package.json merging, and MCP config setup as with npm, pnpm, and Yarn. Verify that all dependencies are compatible with Bun and that any Bun-specific configuration (such as lockfile handling or binary linking) is handled correctly. If the installation process includes a website or account setup, document and test these flows for parity; if not, explicitly confirm and document that no such steps are required. Update all relevant documentation and installation guides to include Bun instructions for macOS, Linux, and Windows (including WSL and PowerShell). Address any known Bun-specific issues (e.g., sporadic install hangs) with clear troubleshooting guidance. 9 | 10 | # Test Strategy: 11 | 1. Install Taskmaster using Bun on macOS, Linux, and Windows (including WSL and PowerShell), following the updated documentation. 2. Run the full installation and initialization process, verifying that the directory structure, templates, and MCP config are set up identically to npm, pnpm, and Yarn. 3. Execute all CLI commands (including 'init') and confirm functional parity. 4. If a website or account setup is required, test these flows for consistency; if not, confirm and document this. 5. Check for Bun-specific issues (e.g., install hangs) and verify that troubleshooting steps are effective. 6. Ensure the documentation is clear, accurate, and up to date for all supported platforms. 12 | -------------------------------------------------------------------------------- /.clinerules/glossary.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Glossary of other Roo rules 3 | globs: **/* 4 | alwaysApply: true 5 | --- 6 | 7 | # Glossary of Task Master Roo Rules 8 | 9 | This file provides a quick reference to the purpose of each rule file located in the `.roo/rules` directory. 10 | 11 | - **[`architecture.md`](:.roo/rules/architecture.md)**: Describes the high-level architecture of the Task Master CLI application. 12 | - **[`changeset.md`](:.roo/rules/changeset.md)**: Guidelines for using Changesets (npm run changeset) to manage versioning and changelogs. 13 | - **[`commands.md`](:.roo/rules/commands.md)**: Guidelines for implementing CLI commands using Commander.js. 14 | - **[`roo_rules.md`](:.roo/rules/roo_rules.md)**: Guidelines for creating and maintaining Roo rules to ensure consistency and effectiveness. 15 | - **[`dependencies.md`](:.roo/rules/dependencies.md)**: Guidelines for managing task dependencies and relationships. 16 | - **[`dev_workflow.md`](:.roo/rules/dev_workflow.md)**: Guide for using Task Master to manage task-driven development workflows. 17 | - **[`glossary.md`](:.roo/rules/glossary.md)**: This file; provides a glossary of other Roo rules. 18 | - **[`mcp.md`](:.roo/rules/mcp.md)**: Guidelines for implementing and interacting with the Task Master MCP Server. 19 | - **[`new_features.md`](:.roo/rules/new_features.md)**: Guidelines for integrating new features into the Task Master CLI. 20 | - **[`self_improve.md`](:.roo/rules/self_improve.md)**: Guidelines for continuously improving Roo rules based on emerging code patterns and best practices. 21 | - **[`taskmaster.md`](:.roo/rules/taskmaster.md)**: Comprehensive reference for Taskmaster MCP tools and CLI commands. 22 | - **[`tasks.md`](:.roo/rules/tasks.md)**: Guidelines for implementing task management operations. 23 | - **[`tests.md`](:.roo/rules/tests.md)**: Guidelines for implementing and maintaining tests for Task Master CLI. 24 | - **[`ui.md`](:.roo/rules/ui.md)**: Guidelines for implementing and maintaining user interface components. 25 | - **[`utilities.md`](:.roo/rules/utilities.md)**: Guidelines for implementing utility functions. 26 | 27 | -------------------------------------------------------------------------------- /.roo/rules/glossary.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Glossary of other Roo rules 3 | globs: **/* 4 | alwaysApply: true 5 | --- 6 | 7 | # Glossary of Task Master Roo Rules 8 | 9 | This file provides a quick reference to the purpose of each rule file located in the `.roo/rules` directory. 10 | 11 | - **[`architecture.md`](:.roo/rules/architecture.md)**: Describes the high-level architecture of the Task Master CLI application. 12 | - **[`changeset.md`](:.roo/rules/changeset.md)**: Guidelines for using Changesets (npm run changeset) to manage versioning and changelogs. 13 | - **[`commands.md`](:.roo/rules/commands.md)**: Guidelines for implementing CLI commands using Commander.js. 14 | - **[`roo_rules.md`](:.roo/rules/roo_rules.md)**: Guidelines for creating and maintaining Roo rules to ensure consistency and effectiveness. 15 | - **[`dependencies.md`](:.roo/rules/dependencies.md)**: Guidelines for managing task dependencies and relationships. 16 | - **[`dev_workflow.md`](:.roo/rules/dev_workflow.md)**: Guide for using Task Master to manage task-driven development workflows. 17 | - **[`glossary.md`](:.roo/rules/glossary.md)**: This file; provides a glossary of other Roo rules. 18 | - **[`mcp.md`](:.roo/rules/mcp.md)**: Guidelines for implementing and interacting with the Task Master MCP Server. 19 | - **[`new_features.md`](:.roo/rules/new_features.md)**: Guidelines for integrating new features into the Task Master CLI. 20 | - **[`self_improve.md`](:.roo/rules/self_improve.md)**: Guidelines for continuously improving Roo rules based on emerging code patterns and best practices. 21 | - **[`taskmaster.md`](:.roo/rules/taskmaster.md)**: Comprehensive reference for Taskmaster MCP tools and CLI commands. 22 | - **[`tasks.md`](:.roo/rules/tasks.md)**: Guidelines for implementing task management operations. 23 | - **[`tests.md`](:.roo/rules/tests.md)**: Guidelines for implementing and maintaining tests for Task Master CLI. 24 | - **[`ui.md`](:.roo/rules/ui.md)**: Guidelines for implementing and maintaining user interface components. 25 | - **[`utilities.md`](:.roo/rules/utilities.md)**: Guidelines for implementing utility functions. 26 | 27 | -------------------------------------------------------------------------------- /mcp-server/src/tools/initialize-project.js: -------------------------------------------------------------------------------- 1 | import { z } from 'zod'; 2 | import { 3 | createErrorResponse, 4 | handleApiResult, 5 | withNormalizedProjectRoot 6 | } from './utils.js'; 7 | import { initializeProjectDirect } from '../core/task-master-core.js'; 8 | 9 | export function registerInitializeProjectTool(server) { 10 | server.addTool({ 11 | name: 'initialize_project', 12 | description: 13 | 'Initializes a new Task Master project structure by calling the core initialization logic. Creates necessary folders and configuration files for Task Master in the current directory.', 14 | parameters: z.object({ 15 | skipInstall: z 16 | .boolean() 17 | .optional() 18 | .default(false) 19 | .describe( 20 | 'Skip installing dependencies automatically. Never do this unless you are sure the project is already installed.' 21 | ), 22 | addAliases: z 23 | .boolean() 24 | .optional() 25 | .default(false) 26 | .describe('Add shell aliases (tm, taskmaster) to shell config file.'), 27 | yes: z 28 | .boolean() 29 | .optional() 30 | .default(true) 31 | .describe( 32 | 'Skip prompts and use default values. Always set to true for MCP tools.' 33 | ), 34 | projectRoot: z 35 | .string() 36 | .describe( 37 | 'The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.' 38 | ) 39 | }), 40 | execute: withNormalizedProjectRoot(async (args, context) => { 41 | const { log } = context; 42 | const session = context.session; 43 | 44 | try { 45 | log.info( 46 | `Executing initialize_project tool with args: ${JSON.stringify(args)}` 47 | ); 48 | 49 | const result = await initializeProjectDirect(args, log, { session }); 50 | 51 | return handleApiResult(result, log, 'Initialization failed'); 52 | } catch (error) { 53 | const errorMessage = `Project initialization tool failed: ${error.message || 'Unknown error'}`; 54 | log.error(errorMessage, error); 55 | return createErrorResponse(errorMessage, { details: error.stack }); 56 | } 57 | }) 58 | }); 59 | } 60 | -------------------------------------------------------------------------------- /tests/fixtures/sample-claude-response.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Sample Claude API response for testing 3 | */ 4 | 5 | export const sampleClaudeResponse = { 6 | tasks: [ 7 | { 8 | id: 1, 9 | title: 'Setup Task Data Structure', 10 | description: 'Implement the core task data structure and file operations', 11 | status: 'pending', 12 | dependencies: [], 13 | priority: 'high', 14 | details: 15 | 'Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.', 16 | testStrategy: 17 | 'Verify tasks.json is created with the correct structure and that task data can be read from and written to the file.' 18 | }, 19 | { 20 | id: 2, 21 | title: 'Implement CLI Foundation', 22 | description: 23 | 'Create the command-line interface foundation with basic commands', 24 | status: 'pending', 25 | dependencies: [1], 26 | priority: 'high', 27 | details: 28 | 'Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.', 29 | testStrategy: 30 | 'Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly.' 31 | }, 32 | { 33 | id: 3, 34 | title: 'Develop Task Management Operations', 35 | description: 36 | 'Implement core operations for creating, reading, updating, and deleting tasks', 37 | status: 'pending', 38 | dependencies: [1], 39 | priority: 'medium', 40 | details: 41 | 'Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.', 42 | testStrategy: 43 | 'Create unit tests for each CRUD operation to verify they correctly modify the task data.' 44 | } 45 | ], 46 | metadata: { 47 | projectName: 'Task Management CLI', 48 | totalTasks: 3, 49 | sourceFile: 'tests/fixtures/sample-prd.txt', 50 | generatedAt: '2023-12-15' 51 | } 52 | }; 53 | -------------------------------------------------------------------------------- /.clinerules/commands.md: -------------------------------------------------------------------------------- 1 | # Command-Line Interface Implementation Guidelines 2 | 3 | **General CLI Rules:** 4 | - Keep action handlers concise; core logic lives in modules, not in CLI wiring. 5 | - Validate required parameters before running. 6 | - Descriptions and option names: use kebab-case (`--file`, `--output`, etc). 7 | - Use confirmation by default on destructive commands; `--yes` skips for scripts. 8 | - Clean up dependencies and references after destructive commands. 9 | - Regenerate files after removal unless `--skip-generate` is set. 10 | - Prefer non-destructive alternatives, e.g., status changes for cancel/defer. 11 | - Use path.join, standard naming for task files. 12 | 13 | **Testing and Error Handling:** 14 | - Handle errors with clear feedback and color. 15 | - Use boxen for key messages and next-step hints. 16 | - Group related commands together. 17 | 18 | **Command Patterns:** 19 | ```js 20 | program 21 | .command('example') 22 | .description('...') 23 | .option('--flag', '...') 24 | .action(async (opts) => { /* Call implementation */ }); 25 | ``` 26 | - Removal commands: always prompt unless `--yes` specified. 27 | - Edit/removal always cleans up references and regenerates files if not skipped. 28 | 29 | **Subtask Patterns:** 30 | - Add subtask requires parent (`--parent `) and title/description if not converting. 31 | - Remove subtask: can convert to parent, or delete (clean up, regenerate). 32 | 33 | **Flags:** 34 | - Use positive enable/skip styles: `--skip-generate` 35 | - Do not use negated flags like `--no-generate` 36 | 37 | **Version Checking:** 38 | - Asynchronously check version and notify for upgrades after execution. 39 | 40 | **Input Checks:** 41 | - Validate all required params early; exit nonzero on fatal missing input. 42 | 43 | **Error Display:** 44 | - Use colors: info (blue), error (red), warnings (yellow) 45 | - Show available options and help when unknown or missing options used. 46 | 47 | **Import patterns:** 48 | - Only import what's needed, group by module. 49 | - Never create circular dependencies. 50 | 51 | **See also:** add-subtask, remove-subtask, and version check examples for complete patterns. 52 | -------------------------------------------------------------------------------- /.roo/rules/commands.md: -------------------------------------------------------------------------------- 1 | # Command-Line Interface Implementation Guidelines 2 | 3 | **General CLI Rules:** 4 | - Keep action handlers concise; core logic lives in modules, not in CLI wiring. 5 | - Validate required parameters before running. 6 | - Descriptions and option names: use kebab-case (`--file`, `--output`, etc). 7 | - Use confirmation by default on destructive commands; `--yes` skips for scripts. 8 | - Clean up dependencies and references after destructive commands. 9 | - Regenerate files after removal unless `--skip-generate` is set. 10 | - Prefer non-destructive alternatives, e.g., status changes for cancel/defer. 11 | - Use path.join, standard naming for task files. 12 | 13 | **Testing and Error Handling:** 14 | - Handle errors with clear feedback and color. 15 | - Use boxen for key messages and next-step hints. 16 | - Group related commands together. 17 | 18 | **Command Patterns:** 19 | ```js 20 | program 21 | .command('example') 22 | .description('...') 23 | .option('--flag', '...') 24 | .action(async (opts) => { /* Call implementation */ }); 25 | ``` 26 | - Removal commands: always prompt unless `--yes` specified. 27 | - Edit/removal always cleans up references and regenerates files if not skipped. 28 | 29 | **Subtask Patterns:** 30 | - Add subtask requires parent (`--parent `) and title/description if not converting. 31 | - Remove subtask: can convert to parent, or delete (clean up, regenerate). 32 | 33 | **Flags:** 34 | - Use positive enable/skip styles: `--skip-generate` 35 | - Do not use negated flags like `--no-generate` 36 | 37 | **Version Checking:** 38 | - Asynchronously check version and notify for upgrades after execution. 39 | 40 | **Input Checks:** 41 | - Validate all required params early; exit nonzero on fatal missing input. 42 | 43 | **Error Display:** 44 | - Use colors: info (blue), error (red), warnings (yellow) 45 | - Show available options and help when unknown or missing options used. 46 | 47 | **Import patterns:** 48 | - Only import what's needed, group by module. 49 | - Never create circular dependencies. 50 | 51 | **See also:** add-subtask, remove-subtask, and version check examples for complete patterns. 52 | -------------------------------------------------------------------------------- /mcp-server/src/index.js: -------------------------------------------------------------------------------- 1 | import { FastMCP } from 'fastmcp'; 2 | import path from 'path'; 3 | import dotenv from 'dotenv'; 4 | import { fileURLToPath } from 'url'; 5 | import fs from 'fs'; 6 | import logger from './logger.js'; 7 | import { registerTaskMasterTools } from './tools/index.js'; 8 | 9 | // Load environment variables 10 | dotenv.config(); 11 | 12 | // Constants 13 | const __filename = fileURLToPath(import.meta.url); 14 | const __dirname = path.dirname(__filename); 15 | 16 | /** 17 | * Main MCP server class that integrates with Task Master 18 | */ 19 | class TaskMasterMCPServer { 20 | constructor() { 21 | // Get version from package.json using synchronous fs 22 | const packagePath = path.join(__dirname, '../../package.json'); 23 | const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8')); 24 | 25 | this.options = { 26 | name: 'Task Master MCP Server', 27 | version: packageJson.version 28 | }; 29 | 30 | this.server = new FastMCP(this.options); 31 | this.initialized = false; 32 | 33 | this.server.addResource({}); 34 | 35 | this.server.addResourceTemplate({}); 36 | 37 | // Bind methods 38 | this.init = this.init.bind(this); 39 | this.start = this.start.bind(this); 40 | this.stop = this.stop.bind(this); 41 | 42 | // Setup logging 43 | this.logger = logger; 44 | } 45 | 46 | /** 47 | * Initialize the MCP server with necessary tools and routes 48 | */ 49 | async init() { 50 | if (this.initialized) return; 51 | 52 | // Pass the manager instance to the tool registration function 53 | registerTaskMasterTools(this.server, this.asyncManager); 54 | 55 | this.initialized = true; 56 | 57 | return this; 58 | } 59 | 60 | /** 61 | * Start the MCP server 62 | */ 63 | async start() { 64 | if (!this.initialized) { 65 | await this.init(); 66 | } 67 | 68 | // Start the FastMCP server with increased timeout 69 | await this.server.start({ 70 | transportType: 'stdio', 71 | timeout: 120000 // 2 minutes timeout (in milliseconds) 72 | }); 73 | 74 | return this; 75 | } 76 | 77 | /** 78 | * Stop the MCP server 79 | */ 80 | async stop() { 81 | if (this.server) { 82 | await this.server.stop(); 83 | } 84 | } 85 | } 86 | 87 | export default TaskMasterMCPServer; 88 | -------------------------------------------------------------------------------- /.cursor/rules/glossary.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: Glossary of other Cursor rules 3 | globs: **/* 4 | alwaysApply: true 5 | --- 6 | 7 | # Glossary of Task Master Cursor Rules 8 | 9 | This file provides a quick reference to the purpose of each rule file located in the `.cursor/rules` directory. 10 | 11 | - **[`architecture.mdc`](mdc:.cursor/rules/architecture.mdc)**: Describes the high-level architecture of the Task Master CLI application. 12 | - **[`changeset.mdc`](mdc:.cursor/rules/changeset.mdc)**: Guidelines for using Changesets (npm run changeset) to manage versioning and changelogs. 13 | - **[`commands.mdc`](mdc:.cursor/rules/commands.mdc)**: Guidelines for implementing CLI commands using Commander.js. 14 | - **[`cursor_rules.mdc`](mdc:.cursor/rules/cursor_rules.mdc)**: Guidelines for creating and maintaining Cursor rules to ensure consistency and effectiveness. 15 | - **[`dependencies.mdc`](mdc:.cursor/rules/dependencies.mdc)**: Guidelines for managing task dependencies and relationships. 16 | - **[`dev_workflow.mdc`](mdc:.cursor/rules/dev_workflow.mdc)**: Guide for using Task Master to manage task-driven development workflows. 17 | - **[`glossary.mdc`](mdc:.cursor/rules/glossary.mdc)**: This file; provides a glossary of other Cursor rules. 18 | - **[`mcp.mdc`](mdc:.cursor/rules/mcp.mdc)**: Guidelines for implementing and interacting with the Task Master MCP Server. 19 | - **[`new_features.mdc`](mdc:.cursor/rules/new_features.mdc)**: Guidelines for integrating new features into the Task Master CLI. 20 | - **[`self_improve.mdc`](mdc:.cursor/rules/self_improve.mdc)**: Guidelines for continuously improving Cursor rules based on emerging code patterns and best practices. 21 | - **[`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)**: Comprehensive reference for Taskmaster MCP tools and CLI commands. 22 | - **[`tasks.mdc`](mdc:.cursor/rules/tasks.mdc)**: Guidelines for implementing task management operations. 23 | - **[`tests.mdc`](mdc:.cursor/rules/tests.mdc)**: Guidelines for implementing and maintaining tests for Task Master CLI. 24 | - **[`ui.mdc`](mdc:.cursor/rules/ui.mdc)**: Guidelines for implementing and maintaining user interface components. 25 | - **[`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)**: Guidelines for implementing utility functions. 26 | 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Task Master License 2 | 3 | MIT License 4 | 5 | Copyright (c) 2025 — Eyal Toledano, Ralph Khreish 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | 9 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 12 | 13 | "Commons Clause" License Condition v1.0 14 | 15 | The Software is provided to you by the Licensor under the License (defined below), subject to the following condition: 16 | 17 | Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software. 18 | 19 | For purposes of the foregoing, "Sell" means practicing any or all of the rights granted to you under the License to provide the Software to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/support services related to the Software), as part of a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice. 20 | 21 | Software: All Task Master associated files (including all files in the GitHub repository "claude-task-master" and in the npm package "task-master-ai"). 22 | 23 | License: MIT 24 | 25 | Licensor: Eyal Toledano, Ralph Khreish 26 | -------------------------------------------------------------------------------- /mcp-server/src/tools/fix-dependencies.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/fix-dependencies.js 3 | * Tool for automatically fixing invalid task dependencies 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { fixDependenciesDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the fixDependencies tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerFixDependenciesTool(server) { 20 | server.addTool({ 21 | name: 'fix_dependencies', 22 | description: 'Fix invalid dependencies in tasks automatically', 23 | parameters: z.object({ 24 | file: z.string().optional().describe('Absolute path to the tasks file'), 25 | projectRoot: z 26 | .string() 27 | .describe('The directory of the project. Must be an absolute path.') 28 | }), 29 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 30 | try { 31 | log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`); 32 | 33 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 34 | let tasksJsonPath; 35 | try { 36 | tasksJsonPath = findTasksJsonPath( 37 | { projectRoot: args.projectRoot, file: args.file }, 38 | log 39 | ); 40 | } catch (error) { 41 | log.error(`Error finding tasks.json: ${error.message}`); 42 | return createErrorResponse( 43 | `Failed to find tasks.json: ${error.message}` 44 | ); 45 | } 46 | 47 | const result = await fixDependenciesDirect( 48 | { 49 | tasksJsonPath: tasksJsonPath 50 | }, 51 | log 52 | ); 53 | 54 | if (result.success) { 55 | log.info(`Successfully fixed dependencies: ${result.data.message}`); 56 | } else { 57 | log.error(`Failed to fix dependencies: ${result.error.message}`); 58 | } 59 | 60 | return handleApiResult(result, log, 'Error fixing dependencies'); 61 | } catch (error) { 62 | log.error(`Error in fixDependencies tool: ${error.message}`); 63 | return createErrorResponse(error.message); 64 | } 65 | }) 66 | }); 67 | } 68 | -------------------------------------------------------------------------------- /mcp-test.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Config } from 'fastmcp'; 4 | import path from 'path'; 5 | import fs from 'fs'; 6 | 7 | // Log the current directory 8 | console.error(`Current working directory: ${process.cwd()}`); 9 | 10 | try { 11 | console.error('Attempting to load FastMCP Config...'); 12 | 13 | // Check if .cursor/mcp.json exists 14 | const mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json'); 15 | console.error(`Checking if mcp.json exists at: ${mcpPath}`); 16 | 17 | if (fs.existsSync(mcpPath)) { 18 | console.error('mcp.json file found'); 19 | console.error( 20 | `File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}` 21 | ); 22 | } else { 23 | console.error('mcp.json file not found'); 24 | } 25 | 26 | // Try to create Config 27 | const config = new Config(); 28 | console.error('Config created successfully'); 29 | 30 | // Check if env property exists 31 | if (config.env) { 32 | console.error( 33 | `Config.env exists with keys: ${Object.keys(config.env).join(', ')}` 34 | ); 35 | 36 | // Print each env var value (careful with sensitive values) 37 | for (const [key, value] of Object.entries(config.env)) { 38 | if (key.includes('KEY')) { 39 | console.error(`${key}: [value hidden]`); 40 | } else { 41 | console.error(`${key}: ${value}`); 42 | } 43 | } 44 | } else { 45 | console.error('Config.env does not exist'); 46 | } 47 | } catch (error) { 48 | console.error(`Error loading Config: ${error.message}`); 49 | console.error(`Stack trace: ${error.stack}`); 50 | } 51 | 52 | // Log process.env to see if values from mcp.json were loaded automatically 53 | console.error('\nChecking if process.env already has values from mcp.json:'); 54 | const envVars = [ 55 | 'ANTHROPIC_API_KEY', 56 | 'PERPLEXITY_API_KEY', 57 | 'MODEL', 58 | 'PERPLEXITY_MODEL', 59 | 'MAX_TOKENS', 60 | 'TEMPERATURE', 61 | 'DEFAULT_SUBTASKS', 62 | 'DEFAULT_PRIORITY' 63 | ]; 64 | 65 | for (const varName of envVars) { 66 | if (process.env[varName]) { 67 | if (varName.includes('KEY')) { 68 | console.error(`${varName}: [value hidden]`); 69 | } else { 70 | console.error(`${varName}: ${process.env[varName]}`); 71 | } 72 | } else { 73 | console.error(`${varName}: not set`); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /mcp-server/src/tools/next-task.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/next-task.js 3 | * Tool to find the next task to work on 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { nextTaskDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the next-task tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerNextTaskTool(server) { 20 | server.addTool({ 21 | name: 'next_task', 22 | description: 23 | 'Find the next task to work on based on dependencies and status', 24 | parameters: z.object({ 25 | file: z.string().optional().describe('Absolute path to the tasks file'), 26 | projectRoot: z 27 | .string() 28 | .describe('The directory of the project. Must be an absolute path.') 29 | }), 30 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 31 | try { 32 | log.info(`Finding next task with args: ${JSON.stringify(args)}`); 33 | 34 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 35 | let tasksJsonPath; 36 | try { 37 | tasksJsonPath = findTasksJsonPath( 38 | { projectRoot: args.projectRoot, file: args.file }, 39 | log 40 | ); 41 | } catch (error) { 42 | log.error(`Error finding tasks.json: ${error.message}`); 43 | return createErrorResponse( 44 | `Failed to find tasks.json: ${error.message}` 45 | ); 46 | } 47 | 48 | const result = await nextTaskDirect( 49 | { 50 | tasksJsonPath: tasksJsonPath 51 | }, 52 | log 53 | ); 54 | 55 | if (result.success) { 56 | log.info( 57 | `Successfully found next task: ${result.data?.task?.id || 'No available tasks'}` 58 | ); 59 | } else { 60 | log.error( 61 | `Failed to find next task: ${result.error?.message || 'Unknown error'}` 62 | ); 63 | } 64 | 65 | return handleApiResult(result, log, 'Error finding next task'); 66 | } catch (error) { 67 | log.error(`Error in nextTask tool: ${error.message}`); 68 | return createErrorResponse(error.message); 69 | } 70 | }) 71 | }); 72 | } 73 | -------------------------------------------------------------------------------- /tests/e2e/test_llm_analysis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to test the LLM analysis function independently 4 | 5 | # Exit on error 6 | set -u 7 | set -o pipefail 8 | 9 | # Source the helper functions 10 | HELPER_SCRIPT="tests/e2e/e2e_helpers.sh" 11 | if [ -f "$HELPER_SCRIPT" ]; then 12 | source "$HELPER_SCRIPT" 13 | echo "[INFO] Sourced helper script: $HELPER_SCRIPT" 14 | else 15 | echo "[ERROR] Helper script not found at $HELPER_SCRIPT. Exiting." >&2 16 | exit 1 17 | fi 18 | 19 | # --- Configuration --- 20 | # Get the absolute path to the project root (assuming this script is run from the root) 21 | PROJECT_ROOT="$(pwd)" 22 | 23 | # --- Argument Parsing --- 24 | if [ "$#" -ne 2 ]; then 25 | echo "Usage: $0 " >&2 26 | echo "Example: $0 tests/e2e/log/e2e_run_YYYYMMDD_HHMMSS.log tests/e2e/_runs/run_YYYYMMDD_HHMMSS" >&2 27 | exit 1 28 | fi 29 | 30 | LOG_FILE_REL="$1" # Relative path from project root 31 | TEST_RUN_DIR_REL="$2" # Relative path from project root 32 | 33 | # Construct absolute paths 34 | LOG_FILE_ABS="$PROJECT_ROOT/$LOG_FILE_REL" 35 | TEST_RUN_DIR_ABS="$PROJECT_ROOT/$TEST_RUN_DIR_REL" 36 | 37 | # --- Validation --- 38 | if [ ! -f "$LOG_FILE_ABS" ]; then 39 | echo "[ERROR] Log file not found: $LOG_FILE_ABS" >&2 40 | exit 1 41 | fi 42 | 43 | if [ ! -d "$TEST_RUN_DIR_ABS" ]; then 44 | echo "[ERROR] Test run directory not found: $TEST_RUN_DIR_ABS" >&2 45 | exit 1 46 | fi 47 | 48 | if [ ! -f "$TEST_RUN_DIR_ABS/.env" ]; then 49 | echo "[ERROR] .env file not found in test run directory: $TEST_RUN_DIR_ABS/.env" >&2 50 | exit 1 51 | fi 52 | 53 | 54 | # --- Execution --- 55 | echo "[INFO] Changing directory to test run directory: $TEST_RUN_DIR_ABS" 56 | cd "$TEST_RUN_DIR_ABS" || { echo "[ERROR] Failed to cd into $TEST_RUN_DIR_ABS"; exit 1; } 57 | 58 | echo "[INFO] Current directory: $(pwd)" 59 | echo "[INFO] Calling analyze_log_with_llm function with log file: $LOG_FILE_ABS" 60 | 61 | # Call the function (sourced earlier) 62 | analyze_log_with_llm "$LOG_FILE_ABS" 63 | ANALYSIS_EXIT_CODE=$? 64 | 65 | echo "[INFO] analyze_log_with_llm finished with exit code: $ANALYSIS_EXIT_CODE" 66 | 67 | # Optional: cd back to original directory 68 | # echo "[INFO] Changing back to project root: $PROJECT_ROOT" 69 | # cd "$PROJECT_ROOT" 70 | 71 | exit $ANALYSIS_EXIT_CODE -------------------------------------------------------------------------------- /tests/unit/task-finder.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Task finder tests 3 | */ 4 | 5 | import { findTaskById } from '../../scripts/modules/utils.js'; 6 | import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js'; 7 | 8 | describe('Task Finder', () => { 9 | describe('findTaskById function', () => { 10 | test('should find a task by numeric ID', () => { 11 | const result = findTaskById(sampleTasks.tasks, 2); 12 | expect(result.task).toBeDefined(); 13 | expect(result.task.id).toBe(2); 14 | expect(result.task.title).toBe('Create Core Functionality'); 15 | expect(result.originalSubtaskCount).toBeNull(); 16 | }); 17 | 18 | test('should find a task by string ID', () => { 19 | const result = findTaskById(sampleTasks.tasks, '2'); 20 | expect(result.task).toBeDefined(); 21 | expect(result.task.id).toBe(2); 22 | expect(result.originalSubtaskCount).toBeNull(); 23 | }); 24 | 25 | test('should find a subtask using dot notation', () => { 26 | const result = findTaskById(sampleTasks.tasks, '3.1'); 27 | expect(result.task).toBeDefined(); 28 | expect(result.task.id).toBe(1); 29 | expect(result.task.title).toBe('Create Header Component'); 30 | expect(result.task.isSubtask).toBe(true); 31 | expect(result.task.parentTask.id).toBe(3); 32 | expect(result.originalSubtaskCount).toBeNull(); 33 | }); 34 | 35 | test('should return null for non-existent task ID', () => { 36 | const result = findTaskById(sampleTasks.tasks, 99); 37 | expect(result.task).toBeNull(); 38 | expect(result.originalSubtaskCount).toBeNull(); 39 | }); 40 | 41 | test('should return null for non-existent subtask ID', () => { 42 | const result = findTaskById(sampleTasks.tasks, '3.99'); 43 | expect(result.task).toBeNull(); 44 | expect(result.originalSubtaskCount).toBeNull(); 45 | }); 46 | 47 | test('should return null for non-existent parent task ID in subtask notation', () => { 48 | const result = findTaskById(sampleTasks.tasks, '99.1'); 49 | expect(result.task).toBeNull(); 50 | expect(result.originalSubtaskCount).toBeNull(); 51 | }); 52 | 53 | test('should return null when tasks array is empty', () => { 54 | const result = findTaskById(emptySampleTasks.tasks, 1); 55 | expect(result.task).toBeNull(); 56 | expect(result.originalSubtaskCount).toBeNull(); 57 | }); 58 | }); 59 | }); 60 | -------------------------------------------------------------------------------- /mcp-server/src/tools/complexity-report.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/complexity-report.js 3 | * Tool for displaying the complexity analysis report 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { complexityReportDirect } from '../core/task-master-core.js'; 13 | import path from 'path'; 14 | 15 | /** 16 | * Register the complexityReport tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerComplexityReportTool(server) { 20 | server.addTool({ 21 | name: 'complexity_report', 22 | description: 'Display the complexity analysis report in a readable format', 23 | parameters: z.object({ 24 | file: z 25 | .string() 26 | .optional() 27 | .describe( 28 | 'Path to the report file (default: scripts/task-complexity-report.json)' 29 | ), 30 | projectRoot: z 31 | .string() 32 | .describe('The directory of the project. Must be an absolute path.') 33 | }), 34 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 35 | try { 36 | log.info( 37 | `Getting complexity report with args: ${JSON.stringify(args)}` 38 | ); 39 | 40 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 41 | const reportPath = args.file 42 | ? path.resolve(args.projectRoot, args.file) 43 | : path.resolve( 44 | args.projectRoot, 45 | 'scripts', 46 | 'task-complexity-report.json' 47 | ); 48 | 49 | const result = await complexityReportDirect( 50 | { 51 | reportPath: reportPath 52 | }, 53 | log 54 | ); 55 | 56 | if (result.success) { 57 | log.info( 58 | `Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}` 59 | ); 60 | } else { 61 | log.error( 62 | `Failed to retrieve complexity report: ${result.error.message}` 63 | ); 64 | } 65 | 66 | return handleApiResult( 67 | result, 68 | log, 69 | 'Error retrieving complexity report' 70 | ); 71 | } catch (error) { 72 | log.error(`Error in complexity-report tool: ${error.message}`); 73 | return createErrorResponse( 74 | `Failed to retrieve complexity report: ${error.message}` 75 | ); 76 | } 77 | }) 78 | }); 79 | } 80 | -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/validate-dependencies.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Direct function wrapper for validateDependenciesCommand 3 | */ 4 | 5 | import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js'; 6 | import { 7 | enableSilentMode, 8 | disableSilentMode 9 | } from '../../../../scripts/modules/utils.js'; 10 | import fs from 'fs'; 11 | 12 | /** 13 | * Validate dependencies in tasks.json 14 | * @param {Object} args - Function arguments 15 | * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file. 16 | * @param {Object} log - Logger object 17 | * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>} 18 | */ 19 | export async function validateDependenciesDirect(args, log) { 20 | // Destructure the explicit tasksJsonPath 21 | const { tasksJsonPath } = args; 22 | 23 | if (!tasksJsonPath) { 24 | log.error('validateDependenciesDirect called without tasksJsonPath'); 25 | return { 26 | success: false, 27 | error: { 28 | code: 'MISSING_ARGUMENT', 29 | message: 'tasksJsonPath is required' 30 | } 31 | }; 32 | } 33 | 34 | try { 35 | log.info(`Validating dependencies in tasks: ${tasksJsonPath}`); 36 | 37 | // Use the provided tasksJsonPath 38 | const tasksPath = tasksJsonPath; 39 | 40 | // Verify the file exists 41 | if (!fs.existsSync(tasksPath)) { 42 | return { 43 | success: false, 44 | error: { 45 | code: 'FILE_NOT_FOUND', 46 | message: `Tasks file not found at ${tasksPath}` 47 | } 48 | }; 49 | } 50 | 51 | // Enable silent mode to prevent console logs from interfering with JSON response 52 | enableSilentMode(); 53 | 54 | // Call the original command function using the provided tasksPath 55 | await validateDependenciesCommand(tasksPath); 56 | 57 | // Restore normal logging 58 | disableSilentMode(); 59 | 60 | return { 61 | success: true, 62 | data: { 63 | message: 'Dependencies validated successfully', 64 | tasksPath 65 | } 66 | }; 67 | } catch (error) { 68 | // Make sure to restore normal logging even if there's an error 69 | disableSilentMode(); 70 | 71 | log.error(`Error validating dependencies: ${error.message}`); 72 | return { 73 | success: false, 74 | error: { 75 | code: 'VALIDATION_ERROR', 76 | message: error.message 77 | } 78 | }; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/fix-dependencies.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Direct function wrapper for fixDependenciesCommand 3 | */ 4 | 5 | import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js'; 6 | import { 7 | enableSilentMode, 8 | disableSilentMode 9 | } from '../../../../scripts/modules/utils.js'; 10 | import fs from 'fs'; 11 | 12 | /** 13 | * Fix invalid dependencies in tasks.json automatically 14 | * @param {Object} args - Function arguments 15 | * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file. 16 | * @param {Object} log - Logger object 17 | * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>} 18 | */ 19 | export async function fixDependenciesDirect(args, log) { 20 | // Destructure expected args 21 | const { tasksJsonPath } = args; 22 | try { 23 | log.info(`Fixing invalid dependencies in tasks: ${tasksJsonPath}`); 24 | 25 | // Check if tasksJsonPath was provided 26 | if (!tasksJsonPath) { 27 | log.error('fixDependenciesDirect called without tasksJsonPath'); 28 | return { 29 | success: false, 30 | error: { 31 | code: 'MISSING_ARGUMENT', 32 | message: 'tasksJsonPath is required' 33 | } 34 | }; 35 | } 36 | 37 | // Use provided path 38 | const tasksPath = tasksJsonPath; 39 | 40 | // Verify the file exists 41 | if (!fs.existsSync(tasksPath)) { 42 | return { 43 | success: false, 44 | error: { 45 | code: 'FILE_NOT_FOUND', 46 | message: `Tasks file not found at ${tasksPath}` 47 | } 48 | }; 49 | } 50 | 51 | // Enable silent mode to prevent console logs from interfering with JSON response 52 | enableSilentMode(); 53 | 54 | // Call the original command function using the provided path 55 | await fixDependenciesCommand(tasksPath); 56 | 57 | // Restore normal logging 58 | disableSilentMode(); 59 | 60 | return { 61 | success: true, 62 | data: { 63 | message: 'Dependencies fixed successfully', 64 | tasksPath 65 | } 66 | }; 67 | } catch (error) { 68 | // Make sure to restore normal logging even if there's an error 69 | disableSilentMode(); 70 | 71 | log.error(`Error fixing dependencies: ${error.message}`); 72 | return { 73 | success: false, 74 | error: { 75 | code: 'FIX_DEPENDENCIES_ERROR', 76 | message: error.message 77 | } 78 | }; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /mcp-server/src/tools/validate-dependencies.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/validate-dependencies.js 3 | * Tool for validating task dependencies 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { validateDependenciesDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the validateDependencies tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerValidateDependenciesTool(server) { 20 | server.addTool({ 21 | name: 'validate_dependencies', 22 | description: 23 | 'Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.', 24 | parameters: z.object({ 25 | file: z.string().optional().describe('Absolute path to the tasks file'), 26 | projectRoot: z 27 | .string() 28 | .describe('The directory of the project. Must be an absolute path.') 29 | }), 30 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 31 | try { 32 | log.info(`Validating dependencies with args: ${JSON.stringify(args)}`); 33 | 34 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 35 | let tasksJsonPath; 36 | try { 37 | tasksJsonPath = findTasksJsonPath( 38 | { projectRoot: args.projectRoot, file: args.file }, 39 | log 40 | ); 41 | } catch (error) { 42 | log.error(`Error finding tasks.json: ${error.message}`); 43 | return createErrorResponse( 44 | `Failed to find tasks.json: ${error.message}` 45 | ); 46 | } 47 | 48 | const result = await validateDependenciesDirect( 49 | { 50 | tasksJsonPath: tasksJsonPath 51 | }, 52 | log 53 | ); 54 | 55 | if (result.success) { 56 | log.info( 57 | `Successfully validated dependencies: ${result.data.message}` 58 | ); 59 | } else { 60 | log.error(`Failed to validate dependencies: ${result.error.message}`); 61 | } 62 | 63 | return handleApiResult(result, log, 'Error validating dependencies'); 64 | } catch (error) { 65 | log.error(`Error in validateDependencies tool: ${error.message}`); 66 | return createErrorResponse(error.message); 67 | } 68 | }) 69 | }); 70 | } 71 | -------------------------------------------------------------------------------- /docs/examples.md: -------------------------------------------------------------------------------- 1 | # Example Cursor AI Interactions 2 | 3 | Here are some common interactions with Cursor AI when using Task Master: 4 | 5 | ## Starting a new project 6 | 7 | ``` 8 | I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt. 9 | Can you help me parse it and set up the initial tasks? 10 | ``` 11 | 12 | ## Working on tasks 13 | 14 | ``` 15 | What's the next task I should work on? Please consider dependencies and priorities. 16 | ``` 17 | 18 | ## Implementing a specific task 19 | 20 | ``` 21 | I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it? 22 | ``` 23 | 24 | ## Managing subtasks 25 | 26 | ``` 27 | I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them? 28 | ``` 29 | 30 | ## Handling changes 31 | 32 | ``` 33 | We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change? 34 | ``` 35 | 36 | ## Completing work 37 | 38 | ``` 39 | I've finished implementing the authentication system described in task 2. All tests are passing. 40 | Please mark it as complete and tell me what I should work on next. 41 | ``` 42 | 43 | ## Analyzing complexity 44 | 45 | ``` 46 | Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further? 47 | ``` 48 | 49 | ## Viewing complexity report 50 | 51 | ``` 52 | Can you show me the complexity report in a more readable format? 53 | ``` 54 | 55 | ### Breaking Down Complex Tasks 56 | 57 | ``` 58 | Task 5 seems complex. Can you break it down into subtasks? 59 | ``` 60 | 61 | (Agent runs: `task-master expand --id=5`) 62 | 63 | ``` 64 | Please break down task 5 using research-backed generation. 65 | ``` 66 | 67 | (Agent runs: `task-master expand --id=5 --research`) 68 | 69 | ### Updating Tasks with Research 70 | 71 | ``` 72 | We need to update task 15 based on the latest React Query v5 changes. Can you research this and update the task? 73 | ``` 74 | 75 | (Agent runs: `task-master update-task --id=15 --prompt="Update based on React Query v5 changes" --research`) 76 | 77 | ### Adding Tasks with Research 78 | 79 | ``` 80 | Please add a new task to implement user profile image uploads using Cloudinary, research the best approach. 81 | ``` 82 | 83 | (Agent runs: `task-master add-task --prompt="Implement user profile image uploads using Cloudinary" --research`) 84 | -------------------------------------------------------------------------------- /tests/integration/roo-files-inclusion.test.js: -------------------------------------------------------------------------------- 1 | import { jest } from '@jest/globals'; 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | import os from 'os'; 5 | import { execSync } from 'child_process'; 6 | 7 | describe('Roo Files Inclusion in Package', () => { 8 | // This test verifies that the required Roo files are included in the final package 9 | 10 | test('package.json includes assets/** in the "files" array for Roo source files', () => { 11 | // Read the package.json file 12 | const packageJsonPath = path.join(process.cwd(), 'package.json'); 13 | const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); 14 | 15 | // Check if assets/** is included in the files array (which contains Roo files) 16 | expect(packageJson.files).toContain('assets/**'); 17 | }); 18 | 19 | test('init.js creates Roo directories and copies files', () => { 20 | // Read the init.js file 21 | const initJsPath = path.join(process.cwd(), 'scripts', 'init.js'); 22 | const initJsContent = fs.readFileSync(initJsPath, 'utf8'); 23 | 24 | // Check for Roo directory creation (using more flexible pattern matching) 25 | const hasRooDir = initJsContent.includes( 26 | "ensureDirectoryExists(path.join(targetDir, '.roo" 27 | ); 28 | expect(hasRooDir).toBe(true); 29 | 30 | // Check for .roomodes file copying 31 | const hasRoomodes = initJsContent.includes("copyTemplateFile('.roomodes'"); 32 | expect(hasRoomodes).toBe(true); 33 | 34 | // Check for mode-specific patterns (using more flexible pattern matching) 35 | const hasArchitect = initJsContent.includes('architect'); 36 | const hasAsk = initJsContent.includes('ask'); 37 | const hasBoomerang = initJsContent.includes('boomerang'); 38 | const hasCode = initJsContent.includes('code'); 39 | const hasDebug = initJsContent.includes('debug'); 40 | const hasTest = initJsContent.includes('test'); 41 | 42 | expect(hasArchitect).toBe(true); 43 | expect(hasAsk).toBe(true); 44 | expect(hasBoomerang).toBe(true); 45 | expect(hasCode).toBe(true); 46 | expect(hasDebug).toBe(true); 47 | expect(hasTest).toBe(true); 48 | }); 49 | 50 | test('source Roo files exist in assets directory', () => { 51 | // Verify that the source files for Roo integration exist 52 | expect( 53 | fs.existsSync(path.join(process.cwd(), 'assets', 'roocode', '.roo')) 54 | ).toBe(true); 55 | expect( 56 | fs.existsSync(path.join(process.cwd(), 'assets', 'roocode', '.roomodes')) 57 | ).toBe(true); 58 | }); 59 | }); 60 | -------------------------------------------------------------------------------- /tasks/task_056.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 56 2 | # Title: Refactor Task-Master Files into Node Module Structure 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Restructure the task-master files by moving them from the project root into a proper node module structure to improve organization and maintainability. 7 | # Details: 8 | This task involves a significant refactoring of the task-master system to follow better Node.js module practices. Currently, task-master files are located in the project root, which creates clutter and doesn't follow best practices for Node.js applications. The refactoring should: 9 | 10 | 1. Create a dedicated directory structure within node_modules or as a local package 11 | 2. Update all import/require paths throughout the codebase to reference the new module location 12 | 3. Reorganize the files into a logical structure (lib/, utils/, commands/, etc.) 13 | 4. Ensure the module has a proper package.json with dependencies and exports 14 | 5. Update any build processes, scripts, or configuration files to reflect the new structure 15 | 6. Maintain backward compatibility where possible to minimize disruption 16 | 7. Document the new structure and any changes to usage patterns 17 | 18 | This is a high-risk refactoring as it touches many parts of the system, so it should be approached methodically with frequent testing. Consider using a feature branch and implementing the changes incrementally rather than all at once. 19 | 20 | # Test Strategy: 21 | Testing for this refactoring should be comprehensive to ensure nothing breaks during the restructuring: 22 | 23 | 1. Create a complete inventory of existing functionality through automated tests before starting 24 | 2. Implement unit tests for each module to verify they function correctly in the new structure 25 | 3. Create integration tests that verify the interactions between modules work as expected 26 | 4. Test all CLI commands to ensure they continue to function with the new module structure 27 | 5. Verify that all import/require statements resolve correctly 28 | 6. Test on different environments (development, staging) to ensure compatibility 29 | 7. Perform regression testing on all features that depend on task-master functionality 30 | 8. Create a rollback plan and test it to ensure we can revert changes if critical issues arise 31 | 9. Conduct performance testing to ensure the refactoring doesn't introduce overhead 32 | 10. Have multiple developers test the changes on their local environments before merging 33 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - next 8 | pull_request: 9 | branches: 10 | - main 11 | - next 12 | 13 | permissions: 14 | contents: read 15 | 16 | jobs: 17 | setup: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | with: 22 | fetch-depth: 0 23 | 24 | - uses: actions/setup-node@v4 25 | with: 26 | node-version: 20 27 | cache: 'npm' 28 | 29 | - name: Install Dependencies 30 | id: install 31 | run: npm ci 32 | timeout-minutes: 2 33 | 34 | - name: Cache node_modules 35 | uses: actions/cache@v4 36 | with: 37 | path: node_modules 38 | key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }} 39 | 40 | format-check: 41 | needs: setup 42 | runs-on: ubuntu-latest 43 | steps: 44 | - uses: actions/checkout@v4 45 | 46 | - uses: actions/setup-node@v4 47 | with: 48 | node-version: 20 49 | 50 | - name: Restore node_modules 51 | uses: actions/cache@v4 52 | with: 53 | path: node_modules 54 | key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }} 55 | 56 | - name: Format Check 57 | run: npm run format-check 58 | env: 59 | FORCE_COLOR: 1 60 | 61 | test: 62 | needs: setup 63 | runs-on: ubuntu-latest 64 | steps: 65 | - uses: actions/checkout@v4 66 | 67 | - uses: actions/setup-node@v4 68 | with: 69 | node-version: 20 70 | 71 | - name: Restore node_modules 72 | uses: actions/cache@v4 73 | with: 74 | path: node_modules 75 | key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }} 76 | 77 | - name: Run Tests 78 | run: | 79 | npm run test:coverage -- --coverageThreshold '{"global":{"branches":0,"functions":0,"lines":0,"statements":0}}' --detectOpenHandles --forceExit 80 | env: 81 | NODE_ENV: test 82 | CI: true 83 | FORCE_COLOR: 1 84 | timeout-minutes: 10 85 | 86 | - name: Upload Test Results 87 | if: always() 88 | uses: actions/upload-artifact@v4 89 | with: 90 | name: test-results 91 | path: | 92 | test-results 93 | coverage 94 | junit.xml 95 | retention-days: 30 96 | -------------------------------------------------------------------------------- /tasks/task_030.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 30 2 | # Title: Enhance parse-prd Command to Support Default PRD Path 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Modify the parse-prd command to automatically use a default PRD path when no path is explicitly provided, improving user experience by reducing the need for manual path specification. 7 | # Details: 8 | Currently, the parse-prd command requires users to explicitly specify the path to the PRD document. This enhancement should: 9 | 10 | 1. Implement a default PRD path configuration that can be set in the application settings or configuration file. 11 | 2. Update the parse-prd command to check for this default path when no path argument is provided. 12 | 3. Add a configuration option that allows users to set/update the default PRD path through a command like `config set default-prd-path `. 13 | 4. Ensure backward compatibility by maintaining support for explicit path specification. 14 | 5. Add appropriate error handling for cases where the default path is not set or the file doesn't exist. 15 | 6. Update the command's help text to indicate that a default path will be used if none is specified. 16 | 7. Consider implementing path validation to ensure the default path points to a valid PRD document. 17 | 8. If multiple PRD formats are supported (Markdown, PDF, etc.), ensure the default path handling works with all supported formats. 18 | 9. Add logging for default path usage to help with debugging and usage analytics. 19 | 20 | # Test Strategy: 21 | 1. Unit tests: 22 | - Test that the command correctly uses the default path when no path is provided 23 | - Test that explicit paths override the default path 24 | - Test error handling when default path is not set 25 | - Test error handling when default path is set but file doesn't exist 26 | 27 | 2. Integration tests: 28 | - Test the full workflow of setting a default path and then using the parse-prd command without arguments 29 | - Test with various file formats if multiple are supported 30 | 31 | 3. Manual testing: 32 | - Verify the command works in a real environment with actual PRD documents 33 | - Test the user experience of setting and using default paths 34 | - Verify help text correctly explains the default path behavior 35 | 36 | 4. Edge cases to test: 37 | - Relative vs. absolute paths for default path setting 38 | - Path with special characters or spaces 39 | - Very long paths approaching system limits 40 | - Permissions issues with the default path location 41 | -------------------------------------------------------------------------------- /tasks/task_054.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 54 2 | # Title: Add Research Flag to Add-Task Command 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Enhance the add-task command with a --research flag that allows users to perform quick research on the task topic before finalizing task creation. 7 | # Details: 8 | Modify the existing add-task command to accept a new optional flag '--research'. When this flag is provided, the system should pause the task creation process and invoke the Perplexity research functionality (similar to Task #51) to help users gather information about the task topic before finalizing the task details. The implementation should: 9 | 10 | 1. Update the command parser to recognize the new --research flag 11 | 2. When the flag is present, extract the task title/description as the research topic 12 | 3. Call the Perplexity research functionality with this topic 13 | 4. Display research results to the user 14 | 5. Allow the user to refine their task based on the research (modify title, description, etc.) 15 | 6. Continue with normal task creation flow after research is complete 16 | 7. Ensure the research results can be optionally attached to the task as reference material 17 | 8. Add appropriate help text explaining this feature in the command help 18 | 19 | The implementation should leverage the existing Perplexity research command from Task #51, ensuring code reuse where possible. 20 | 21 | # Test Strategy: 22 | Testing should verify both the functionality and usability of the new feature: 23 | 24 | 1. Unit tests: 25 | - Verify the command parser correctly recognizes the --research flag 26 | - Test that the research functionality is properly invoked with the correct topic 27 | - Ensure task creation proceeds correctly after research is complete 28 | 29 | 2. Integration tests: 30 | - Test the complete flow from command invocation to task creation with research 31 | - Verify research results are properly attached to the task when requested 32 | - Test error handling when research API is unavailable 33 | 34 | 3. Manual testing: 35 | - Run the command with --research flag and verify the user experience 36 | - Test with various task topics to ensure research is relevant 37 | - Verify the help documentation correctly explains the feature 38 | - Test the command without the flag to ensure backward compatibility 39 | 40 | 4. Edge cases: 41 | - Test with very short/vague task descriptions 42 | - Test with complex technical topics 43 | - Test cancellation of task creation during the research phase 44 | -------------------------------------------------------------------------------- /mcp-server/src/tools/remove-dependency.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/remove-dependency.js 3 | * Tool for removing a dependency from a task 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { removeDependencyDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the removeDependency tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerRemoveDependencyTool(server) { 20 | server.addTool({ 21 | name: 'remove_dependency', 22 | description: 'Remove a dependency from a task', 23 | parameters: z.object({ 24 | id: z.string().describe('Task ID to remove dependency from'), 25 | dependsOn: z.string().describe('Task ID to remove as a dependency'), 26 | file: z 27 | .string() 28 | .optional() 29 | .describe( 30 | 'Absolute path to the tasks file (default: tasks/tasks.json)' 31 | ), 32 | projectRoot: z 33 | .string() 34 | .describe('The directory of the project. Must be an absolute path.') 35 | }), 36 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 37 | try { 38 | log.info( 39 | `Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}` 40 | ); 41 | 42 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 43 | let tasksJsonPath; 44 | try { 45 | tasksJsonPath = findTasksJsonPath( 46 | { projectRoot: args.projectRoot, file: args.file }, 47 | log 48 | ); 49 | } catch (error) { 50 | log.error(`Error finding tasks.json: ${error.message}`); 51 | return createErrorResponse( 52 | `Failed to find tasks.json: ${error.message}` 53 | ); 54 | } 55 | 56 | const result = await removeDependencyDirect( 57 | { 58 | tasksJsonPath: tasksJsonPath, 59 | id: args.id, 60 | dependsOn: args.dependsOn 61 | }, 62 | log 63 | ); 64 | 65 | if (result.success) { 66 | log.info(`Successfully removed dependency: ${result.data.message}`); 67 | } else { 68 | log.error(`Failed to remove dependency: ${result.error.message}`); 69 | } 70 | 71 | return handleApiResult(result, log, 'Error removing dependency'); 72 | } catch (error) { 73 | log.error(`Error in removeDependency tool: ${error.message}`); 74 | return createErrorResponse(error.message); 75 | } 76 | }) 77 | }); 78 | } 79 | -------------------------------------------------------------------------------- /mcp-server/src/tools/remove-task.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/remove-task.js 3 | * Tool to remove a task by ID 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { removeTaskDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the remove-task tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerRemoveTaskTool(server) { 20 | server.addTool({ 21 | name: 'remove_task', 22 | description: 'Remove a task or subtask permanently from the tasks list', 23 | parameters: z.object({ 24 | id: z 25 | .string() 26 | .describe( 27 | "ID of the task or subtask to remove (e.g., '5' or '5.2'). Can be comma-separated to update multiple tasks/subtasks at once." 28 | ), 29 | file: z.string().optional().describe('Absolute path to the tasks file'), 30 | projectRoot: z 31 | .string() 32 | .describe('The directory of the project. Must be an absolute path.'), 33 | confirm: z 34 | .boolean() 35 | .optional() 36 | .describe('Whether to skip confirmation prompt (default: false)') 37 | }), 38 | execute: withNormalizedProjectRoot(async (args, { log }) => { 39 | try { 40 | log.info(`Removing task(s) with ID(s): ${args.id}`); 41 | 42 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 43 | let tasksJsonPath; 44 | try { 45 | tasksJsonPath = findTasksJsonPath( 46 | { projectRoot: args.projectRoot, file: args.file }, 47 | log 48 | ); 49 | } catch (error) { 50 | log.error(`Error finding tasks.json: ${error.message}`); 51 | return createErrorResponse( 52 | `Failed to find tasks.json: ${error.message}` 53 | ); 54 | } 55 | 56 | log.info(`Using tasks file path: ${tasksJsonPath}`); 57 | 58 | const result = await removeTaskDirect( 59 | { 60 | tasksJsonPath: tasksJsonPath, 61 | id: args.id 62 | }, 63 | log 64 | ); 65 | 66 | if (result.success) { 67 | log.info(`Successfully removed task: ${args.id}`); 68 | } else { 69 | log.error(`Failed to remove task: ${result.error.message}`); 70 | } 71 | 72 | return handleApiResult(result, log, 'Error removing task'); 73 | } catch (error) { 74 | log.error(`Error in remove-task tool: ${error.message}`); 75 | return createErrorResponse(`Failed to remove task: ${error.message}`); 76 | } 77 | }) 78 | }); 79 | } 80 | -------------------------------------------------------------------------------- /tasks/task_048.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 48 2 | # Title: Refactor Prompts into Centralized Structure 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Create a dedicated 'prompts' folder and move all prompt definitions from inline function implementations to individual files, establishing a centralized prompt management system. 7 | # Details: 8 | This task involves restructuring how prompts are managed in the codebase: 9 | 10 | 1. Create a new 'prompts' directory at the appropriate level in the project structure 11 | 2. For each existing prompt currently embedded in functions: 12 | - Create a dedicated file with a descriptive name (e.g., 'task_suggestion_prompt.js') 13 | - Extract the prompt text/object into this file 14 | - Export the prompt using the appropriate module pattern 15 | 3. Modify all functions that currently contain inline prompts to import them from the new centralized location 16 | 4. Establish a consistent naming convention for prompt files (e.g., feature_action_prompt.js) 17 | 5. Consider creating an index.js file in the prompts directory to provide a clean import interface 18 | 6. Document the new prompt structure in the project documentation 19 | 7. Ensure that any prompt that requires dynamic content insertion maintains this capability after refactoring 20 | 21 | This refactoring will improve maintainability by making prompts easier to find, update, and reuse across the application. 22 | 23 | # Test Strategy: 24 | Testing should verify that the refactoring maintains identical functionality while improving code organization: 25 | 26 | 1. Automated Tests: 27 | - Run existing test suite to ensure no functionality is broken 28 | - Create unit tests for the new prompt import mechanism 29 | - Verify that dynamically constructed prompts still receive their parameters correctly 30 | 31 | 2. Manual Testing: 32 | - Execute each feature that uses prompts and compare outputs before and after refactoring 33 | - Verify that all prompts are properly loaded from their new locations 34 | - Check that no prompt text is accidentally modified during the migration 35 | 36 | 3. Code Review: 37 | - Confirm all prompts have been moved to the new structure 38 | - Verify consistent naming conventions are followed 39 | - Check that no duplicate prompts exist 40 | - Ensure imports are correctly implemented in all files that previously contained inline prompts 41 | 42 | 4. Documentation: 43 | - Verify documentation is updated to reflect the new prompt organization 44 | - Confirm the index.js export pattern works as expected for importing prompts 45 | -------------------------------------------------------------------------------- /mcp-server/src/tools/generate.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/generate.js 3 | * Tool to generate individual task files from tasks.json 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { generateTaskFilesDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | import path from 'path'; 15 | 16 | /** 17 | * Register the generate tool with the MCP server 18 | * @param {Object} server - FastMCP server instance 19 | */ 20 | export function registerGenerateTool(server) { 21 | server.addTool({ 22 | name: 'generate', 23 | description: 24 | 'Generates individual task files in tasks/ directory based on tasks.json', 25 | parameters: z.object({ 26 | file: z.string().optional().describe('Absolute path to the tasks file'), 27 | output: z 28 | .string() 29 | .optional() 30 | .describe('Output directory (default: same directory as tasks file)'), 31 | projectRoot: z 32 | .string() 33 | .describe('The directory of the project. Must be an absolute path.') 34 | }), 35 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 36 | try { 37 | log.info(`Generating task files with args: ${JSON.stringify(args)}`); 38 | 39 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 40 | let tasksJsonPath; 41 | try { 42 | tasksJsonPath = findTasksJsonPath( 43 | { projectRoot: args.projectRoot, file: args.file }, 44 | log 45 | ); 46 | } catch (error) { 47 | log.error(`Error finding tasks.json: ${error.message}`); 48 | return createErrorResponse( 49 | `Failed to find tasks.json: ${error.message}` 50 | ); 51 | } 52 | 53 | const outputDir = args.output 54 | ? path.resolve(args.projectRoot, args.output) 55 | : path.dirname(tasksJsonPath); 56 | 57 | const result = await generateTaskFilesDirect( 58 | { 59 | tasksJsonPath: tasksJsonPath, 60 | outputDir: outputDir 61 | }, 62 | log 63 | ); 64 | 65 | if (result.success) { 66 | log.info(`Successfully generated task files: ${result.data.message}`); 67 | } else { 68 | log.error( 69 | `Failed to generate task files: ${result.error?.message || 'Unknown error'}` 70 | ); 71 | } 72 | 73 | return handleApiResult(result, log, 'Error generating task files'); 74 | } catch (error) { 75 | log.error(`Error in generate tool: ${error.message}`); 76 | return createErrorResponse(error.message); 77 | } 78 | }) 79 | }); 80 | } 81 | -------------------------------------------------------------------------------- /mcp-server/src/tools/models.js: -------------------------------------------------------------------------------- 1 | /** 2 | * models.js 3 | * MCP tool for managing AI model configurations 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { modelsDirect } from '../core/task-master-core.js'; 13 | 14 | /** 15 | * Register the models tool with the MCP server 16 | * @param {Object} server - FastMCP server instance 17 | */ 18 | export function registerModelsTool(server) { 19 | server.addTool({ 20 | name: 'models', 21 | description: 22 | 'Get information about available AI models or set model configurations. Run without arguments to get the current model configuration and API key status for the selected model providers.', 23 | parameters: z.object({ 24 | setMain: z 25 | .string() 26 | .optional() 27 | .describe( 28 | 'Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV.' 29 | ), 30 | setResearch: z 31 | .string() 32 | .optional() 33 | .describe( 34 | 'Set the model for research-backed operations. Model provider API key is required in the MCP config ENV.' 35 | ), 36 | setFallback: z 37 | .string() 38 | .optional() 39 | .describe( 40 | 'Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV.' 41 | ), 42 | listAvailableModels: z 43 | .boolean() 44 | .optional() 45 | .describe( 46 | 'List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00).' 47 | ), 48 | projectRoot: z 49 | .string() 50 | .optional() 51 | .describe('The directory of the project. Must be an absolute path.'), 52 | openrouter: z 53 | .boolean() 54 | .optional() 55 | .describe('Indicates the set model ID is a custom OpenRouter model.'), 56 | ollama: z 57 | .boolean() 58 | .optional() 59 | .describe('Indicates the set model ID is a custom Ollama model.') 60 | }), 61 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 62 | try { 63 | log.info(`Starting models tool with args: ${JSON.stringify(args)}`); 64 | 65 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 66 | const result = await modelsDirect( 67 | { ...args, projectRoot: args.projectRoot }, 68 | log, 69 | { session } 70 | ); 71 | 72 | return handleApiResult(result, log); 73 | } catch (error) { 74 | log.error(`Error in models tool: ${error.message}`); 75 | return createErrorResponse(error.message); 76 | } 77 | }) 78 | }); 79 | } 80 | -------------------------------------------------------------------------------- /tasks/task_029.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 29 2 | # Title: Update Claude 3.7 Sonnet Integration with Beta Header for 128k Token Output 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Modify the ai-services.js file to include the beta header 'output-128k-2025-02-19' in Claude 3.7 Sonnet API requests to increase the maximum output token length to 128k tokens. 7 | # Details: 8 | The task involves updating the Claude 3.7 Sonnet integration in the ai-services.js file to take advantage of the new 128k token output capability. Specifically: 9 | 10 | 1. Locate the Claude 3.7 Sonnet API request configuration in ai-services.js 11 | 2. Add the beta header 'output-128k-2025-02-19' to the request headers 12 | 3. Update any related configuration parameters that might need adjustment for the increased token limit 13 | 4. Ensure that token counting and management logic is updated to account for the new 128k token output limit 14 | 5. Update any documentation comments in the code to reflect the new capability 15 | 6. Consider implementing a configuration option to enable/disable this feature, as it may be a beta feature subject to change 16 | 7. Verify that the token management logic correctly handles the increased limit without causing unexpected behavior 17 | 8. Ensure backward compatibility with existing code that might assume lower token limits 18 | 19 | The implementation should be clean and maintainable, with appropriate error handling for cases where the beta header might not be supported in the future. 20 | 21 | # Test Strategy: 22 | Testing should verify that the beta header is correctly included and that the system properly handles the increased token limit: 23 | 24 | 1. Unit test: Verify that the API request to Claude 3.7 Sonnet includes the 'output-128k-2025-02-19' header 25 | 2. Integration test: Make an actual API call to Claude 3.7 Sonnet with the beta header and confirm a successful response 26 | 3. Test with a prompt designed to generate a very large response (>20k tokens but <128k tokens) and verify it completes successfully 27 | 4. Test the token counting logic with mock responses of various sizes to ensure it correctly handles responses approaching the 128k limit 28 | 5. Verify error handling by simulating API errors related to the beta header 29 | 6. Test any configuration options for enabling/disabling the feature 30 | 7. Performance test: Measure any impact on response time or system resources when handling very large responses 31 | 8. Regression test: Ensure existing functionality using Claude 3.7 Sonnet continues to work as expected 32 | 33 | Document all test results, including any limitations or edge cases discovered during testing. 34 | -------------------------------------------------------------------------------- /tests/fixtures/sample-tasks.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Sample task data for testing 3 | */ 4 | 5 | export const sampleTasks = { 6 | meta: { 7 | projectName: 'Test Project', 8 | projectVersion: '1.0.0', 9 | createdAt: '2023-01-01T00:00:00.000Z', 10 | updatedAt: '2023-01-01T00:00:00.000Z' 11 | }, 12 | tasks: [ 13 | { 14 | id: 1, 15 | title: 'Initialize Project', 16 | description: 'Set up the project structure and dependencies', 17 | status: 'done', 18 | dependencies: [], 19 | priority: 'high', 20 | details: 21 | 'Create directory structure, initialize package.json, and install dependencies', 22 | testStrategy: 'Verify all directories and files are created correctly' 23 | }, 24 | { 25 | id: 2, 26 | title: 'Create Core Functionality', 27 | description: 'Implement the main features of the application', 28 | status: 'in-progress', 29 | dependencies: [1], 30 | priority: 'high', 31 | details: 32 | 'Implement user authentication, data processing, and API endpoints', 33 | testStrategy: 'Write unit tests for all core functions', 34 | subtasks: [ 35 | { 36 | id: 1, 37 | title: 'Implement Authentication', 38 | description: 'Create user authentication system', 39 | status: 'done', 40 | dependencies: [] 41 | }, 42 | { 43 | id: 2, 44 | title: 'Set Up Database', 45 | description: 'Configure database connection and models', 46 | status: 'pending', 47 | dependencies: [1] 48 | } 49 | ] 50 | }, 51 | { 52 | id: 3, 53 | title: 'Implement UI Components', 54 | description: 'Create the user interface components', 55 | status: 'pending', 56 | dependencies: [2], 57 | priority: 'medium', 58 | details: 'Design and implement React components for the user interface', 59 | testStrategy: 'Test components with React Testing Library', 60 | subtasks: [ 61 | { 62 | id: 1, 63 | title: 'Create Header Component', 64 | description: 'Implement the header component', 65 | status: 'pending', 66 | dependencies: [], 67 | details: 'Create a responsive header with navigation links' 68 | }, 69 | { 70 | id: 2, 71 | title: 'Create Footer Component', 72 | description: 'Implement the footer component', 73 | status: 'pending', 74 | dependencies: [], 75 | details: 'Create a footer with copyright information and links' 76 | } 77 | ] 78 | } 79 | ] 80 | }; 81 | 82 | export const emptySampleTasks = { 83 | meta: { 84 | projectName: 'Empty Project', 85 | projectVersion: '1.0.0', 86 | createdAt: '2023-01-01T00:00:00.000Z', 87 | updatedAt: '2023-01-01T00:00:00.000Z' 88 | }, 89 | tasks: [] 90 | }; 91 | -------------------------------------------------------------------------------- /tasks/task_043.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 43 2 | # Title: Add Research Flag to Add-Task Command 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Implement a '--research' flag for the add-task command that enables users to automatically generate research-related subtasks when creating a new task. 7 | # Details: 8 | Modify the add-task command to accept a new optional flag '--research'. When this flag is provided, the system should automatically generate and attach a set of research-oriented subtasks to the newly created task. These subtasks should follow a standard research methodology structure: 9 | 10 | 1. Background Investigation: Research existing solutions and approaches 11 | 2. Requirements Analysis: Define specific requirements and constraints 12 | 3. Technology/Tool Evaluation: Compare potential technologies or tools for implementation 13 | 4. Proof of Concept: Create a minimal implementation to validate approach 14 | 5. Documentation: Document findings and recommendations 15 | 16 | The implementation should: 17 | - Update the command-line argument parser to recognize the new flag 18 | - Create a dedicated function to generate the research subtasks with appropriate descriptions 19 | - Ensure subtasks are properly linked to the parent task 20 | - Update help documentation to explain the new flag 21 | - Maintain backward compatibility with existing add-task functionality 22 | 23 | The research subtasks should be customized based on the main task's title and description when possible, rather than using generic templates. 24 | 25 | # Test Strategy: 26 | Testing should verify both the functionality and usability of the new feature: 27 | 28 | 1. Unit tests: 29 | - Test that the '--research' flag is properly parsed 30 | - Verify the correct number and structure of subtasks are generated 31 | - Ensure subtask IDs are correctly assigned and linked to the parent task 32 | 33 | 2. Integration tests: 34 | - Create a task with the research flag and verify all subtasks appear in the task list 35 | - Test that the research flag works with other existing flags (e.g., --priority, --depends-on) 36 | - Verify the task and subtasks are properly saved to the storage backend 37 | 38 | 3. Manual testing: 39 | - Run 'taskmaster add-task "Test task" --research' and verify the output 40 | - Check that the help documentation correctly describes the new flag 41 | - Verify the research subtasks have meaningful descriptions 42 | - Test the command with and without the flag to ensure backward compatibility 43 | 44 | 4. Edge cases: 45 | - Test with very short or very long task descriptions 46 | - Verify behavior when maximum task/subtask limits are reached 47 | -------------------------------------------------------------------------------- /mcp-server/src/tools/get-tasks.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/get-tasks.js 3 | * Tool to get all tasks from Task Master 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | createErrorResponse, 9 | handleApiResult, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { listTasksDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the getTasks tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerListTasksTool(server) { 20 | server.addTool({ 21 | name: 'get_tasks', 22 | description: 23 | 'Get all tasks from Task Master, optionally filtering by status and including subtasks.', 24 | parameters: z.object({ 25 | status: z 26 | .string() 27 | .optional() 28 | .describe("Filter tasks by status (e.g., 'pending', 'done')"), 29 | withSubtasks: z 30 | .boolean() 31 | .optional() 32 | .describe( 33 | 'Include subtasks nested within their parent tasks in the response' 34 | ), 35 | file: z 36 | .string() 37 | .optional() 38 | .describe( 39 | 'Path to the tasks file (relative to project root or absolute)' 40 | ), 41 | projectRoot: z 42 | .string() 43 | .describe('The directory of the project. Must be an absolute path.') 44 | }), 45 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 46 | try { 47 | log.info(`Getting tasks with filters: ${JSON.stringify(args)}`); 48 | 49 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 50 | let tasksJsonPath; 51 | try { 52 | tasksJsonPath = findTasksJsonPath( 53 | { projectRoot: args.projectRoot, file: args.file }, 54 | log 55 | ); 56 | } catch (error) { 57 | log.error(`Error finding tasks.json: ${error.message}`); 58 | return createErrorResponse( 59 | `Failed to find tasks.json: ${error.message}` 60 | ); 61 | } 62 | 63 | const result = await listTasksDirect( 64 | { 65 | tasksJsonPath: tasksJsonPath, 66 | status: args.status, 67 | withSubtasks: args.withSubtasks 68 | }, 69 | log 70 | ); 71 | 72 | log.info( 73 | `Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks${result.fromCache ? ' (from cache)' : ''}` 74 | ); 75 | return handleApiResult(result, log, 'Error getting tasks'); 76 | } catch (error) { 77 | log.error(`Error getting tasks: ${error.message}`); 78 | return createErrorResponse(error.message); 79 | } 80 | }) 81 | }); 82 | } 83 | 84 | // We no longer need the formatTasksResponse function as we're returning raw JSON data 85 | -------------------------------------------------------------------------------- /.roo/rules/self_improve.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Guidelines for continuously improving Roo rules based on emerging code patterns and best practices. 3 | globs: **/* 4 | alwaysApply: true 5 | --- 6 | 7 | - **Rule Improvement Triggers:** 8 | - New code patterns not covered by existing rules 9 | - Repeated similar implementations across files 10 | - Common error patterns that could be prevented 11 | - New libraries or tools being used consistently 12 | - Emerging best practices in the codebase 13 | 14 | - **Analysis Process:** 15 | - Compare new code with existing rules 16 | - Identify patterns that should be standardized 17 | - Look for references to external documentation 18 | - Check for consistent error handling patterns 19 | - Monitor test patterns and coverage 20 | 21 | - **Rule Updates:** 22 | - **Add New Rules When:** 23 | - A new technology/pattern is used in 3+ files 24 | - Common bugs could be prevented by a rule 25 | - Code reviews repeatedly mention the same feedback 26 | - New security or performance patterns emerge 27 | 28 | - **Modify Existing Rules When:** 29 | - Better examples exist in the codebase 30 | - Additional edge cases are discovered 31 | - Related rules have been updated 32 | - Implementation details have changed 33 | 34 | - **Example Pattern Recognition:** 35 | ```typescript 36 | // If you see repeated patterns like: 37 | const data = await prisma.user.findMany({ 38 | select: { id: true, email: true }, 39 | where: { status: 'ACTIVE' } 40 | }); 41 | 42 | // Consider adding to [prisma.md](:.roo/rules/prisma.md): 43 | // - Standard select fields 44 | // - Common where conditions 45 | // - Performance optimization patterns 46 | ``` 47 | 48 | - **Rule Quality Checks:** 49 | - Rules should be actionable and specific 50 | - Examples should come from actual code 51 | - References should be up to date 52 | - Patterns should be consistently enforced 53 | 54 | - **Continuous Improvement:** 55 | - Monitor code review comments 56 | - Track common development questions 57 | - Update rules after major refactors 58 | - Add links to relevant documentation 59 | - Cross-reference related rules 60 | 61 | - **Rule Deprecation:** 62 | - Mark outdated patterns as deprecated 63 | - Remove rules that no longer apply 64 | - Update references to deprecated rules 65 | - Document migration paths for old patterns 66 | 67 | - **Documentation Updates:** 68 | - Keep examples synchronized with code 69 | - Update references to external docs 70 | - Maintain links between related rules 71 | - Document breaking changes 72 | 73 | Follow [roo_rules.md](:.roo/rules/roo_rules.md) for proper rule formatting and structure. -------------------------------------------------------------------------------- /.clinerules/self_improve.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Guidelines for continuously improving Roo rules based on emerging code patterns and best practices. 3 | globs: **/* 4 | alwaysApply: true 5 | --- 6 | 7 | - **Rule Improvement Triggers:** 8 | - New code patterns not covered by existing rules 9 | - Repeated similar implementations across files 10 | - Common error patterns that could be prevented 11 | - New libraries or tools being used consistently 12 | - Emerging best practices in the codebase 13 | 14 | - **Analysis Process:** 15 | - Compare new code with existing rules 16 | - Identify patterns that should be standardized 17 | - Look for references to external documentation 18 | - Check for consistent error handling patterns 19 | - Monitor test patterns and coverage 20 | 21 | - **Rule Updates:** 22 | - **Add New Rules When:** 23 | - A new technology/pattern is used in 3+ files 24 | - Common bugs could be prevented by a rule 25 | - Code reviews repeatedly mention the same feedback 26 | - New security or performance patterns emerge 27 | 28 | - **Modify Existing Rules When:** 29 | - Better examples exist in the codebase 30 | - Additional edge cases are discovered 31 | - Related rules have been updated 32 | - Implementation details have changed 33 | 34 | - **Example Pattern Recognition:** 35 | ```typescript 36 | // If you see repeated patterns like: 37 | const data = await prisma.user.findMany({ 38 | select: { id: true, email: true }, 39 | where: { status: 'ACTIVE' } 40 | }); 41 | 42 | // Consider adding to [prisma.md](:.roo/rules/prisma.md): 43 | // - Standard select fields 44 | // - Common where conditions 45 | // - Performance optimization patterns 46 | ``` 47 | 48 | - **Rule Quality Checks:** 49 | - Rules should be actionable and specific 50 | - Examples should come from actual code 51 | - References should be up to date 52 | - Patterns should be consistently enforced 53 | 54 | - **Continuous Improvement:** 55 | - Monitor code review comments 56 | - Track common development questions 57 | - Update rules after major refactors 58 | - Add links to relevant documentation 59 | - Cross-reference related rules 60 | 61 | - **Rule Deprecation:** 62 | - Mark outdated patterns as deprecated 63 | - Remove rules that no longer apply 64 | - Update references to deprecated rules 65 | - Document migration paths for old patterns 66 | 67 | - **Documentation Updates:** 68 | - Keep examples synchronized with code 69 | - Update references to external docs 70 | - Maintain links between related rules 71 | - Document breaking changes 72 | 73 | Follow [roo_rules.md](:.roo/rules/roo_rules.md) for proper rule formatting and structure. -------------------------------------------------------------------------------- /mcp-server/src/tools/clear-subtasks.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/clear-subtasks.js 3 | * Tool for clearing subtasks from parent tasks 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { clearSubtasksDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the clearSubtasks tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerClearSubtasksTool(server) { 20 | server.addTool({ 21 | name: 'clear_subtasks', 22 | description: 'Clear subtasks from specified tasks', 23 | parameters: z 24 | .object({ 25 | id: z 26 | .string() 27 | .optional() 28 | .describe('Task IDs (comma-separated) to clear subtasks from'), 29 | all: z.boolean().optional().describe('Clear subtasks from all tasks'), 30 | file: z 31 | .string() 32 | .optional() 33 | .describe( 34 | 'Absolute path to the tasks file (default: tasks/tasks.json)' 35 | ), 36 | projectRoot: z 37 | .string() 38 | .describe('The directory of the project. Must be an absolute path.') 39 | }) 40 | .refine((data) => data.id || data.all, { 41 | message: "Either 'id' or 'all' parameter must be provided", 42 | path: ['id', 'all'] 43 | }), 44 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 45 | try { 46 | log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`); 47 | 48 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 49 | let tasksJsonPath; 50 | try { 51 | tasksJsonPath = findTasksJsonPath( 52 | { projectRoot: args.projectRoot, file: args.file }, 53 | log 54 | ); 55 | } catch (error) { 56 | log.error(`Error finding tasks.json: ${error.message}`); 57 | return createErrorResponse( 58 | `Failed to find tasks.json: ${error.message}` 59 | ); 60 | } 61 | 62 | const result = await clearSubtasksDirect( 63 | { 64 | tasksJsonPath: tasksJsonPath, 65 | id: args.id, 66 | all: args.all 67 | }, 68 | log 69 | ); 70 | 71 | if (result.success) { 72 | log.info(`Subtasks cleared successfully: ${result.data.message}`); 73 | } else { 74 | log.error(`Failed to clear subtasks: ${result.error.message}`); 75 | } 76 | 77 | return handleApiResult(result, log, 'Error clearing subtasks'); 78 | } catch (error) { 79 | log.error(`Error in clearSubtasks tool: ${error.message}`); 80 | return createErrorResponse(error.message); 81 | } 82 | }) 83 | }); 84 | } 85 | -------------------------------------------------------------------------------- /tests/unit/parse-prd.test.js: -------------------------------------------------------------------------------- 1 | // In tests/unit/parse-prd.test.js 2 | // Testing that parse-prd.js handles both .txt and .md files the same way 3 | 4 | import { jest } from '@jest/globals'; 5 | 6 | describe('parse-prd file extension compatibility', () => { 7 | // Test directly that the parse-prd functionality works with different extensions 8 | // by examining the parameter handling in mcp-server/src/tools/parse-prd.js 9 | 10 | test('Parameter description mentions support for .md files', () => { 11 | // The parameter description for 'input' in parse-prd.js includes .md files 12 | const description = 13 | 'Absolute path to the PRD document file (.txt, .md, etc.)'; 14 | 15 | // Verify the description explicitly mentions .md files 16 | expect(description).toContain('.md'); 17 | }); 18 | 19 | test('File extension validation is not restricted to .txt files', () => { 20 | // Check for absence of extension validation 21 | const fileValidator = (filePath) => { 22 | // Return a boolean value to ensure the test passes 23 | if (!filePath || filePath.length === 0) { 24 | return false; 25 | } 26 | return true; 27 | }; 28 | 29 | // Test with different extensions 30 | expect(fileValidator('/path/to/prd.txt')).toBe(true); 31 | expect(fileValidator('/path/to/prd.md')).toBe(true); 32 | 33 | // Invalid cases should still fail regardless of extension 34 | expect(fileValidator('')).toBe(false); 35 | }); 36 | 37 | test('Implementation handles all file types the same way', () => { 38 | // This test confirms that the implementation treats all file types equally 39 | // by simulating the core functionality 40 | 41 | const mockImplementation = (filePath) => { 42 | // The parse-prd.js implementation only checks file existence, 43 | // not the file extension, which is what we want to verify 44 | 45 | if (!filePath) { 46 | return { success: false, error: { code: 'MISSING_INPUT_FILE' } }; 47 | } 48 | 49 | // In the real implementation, this would check if the file exists 50 | // But for our test, we're verifying that the same logic applies 51 | // regardless of file extension 52 | 53 | // No special handling for different extensions 54 | return { success: true }; 55 | }; 56 | 57 | // Verify same behavior for different extensions 58 | const txtResult = mockImplementation('/path/to/prd.txt'); 59 | const mdResult = mockImplementation('/path/to/prd.md'); 60 | 61 | // Both should succeed since there's no extension-specific logic 62 | expect(txtResult.success).toBe(true); 63 | expect(mdResult.success).toBe(true); 64 | 65 | // Both should have the same structure 66 | expect(Object.keys(txtResult)).toEqual(Object.keys(mdResult)); 67 | }); 68 | }); 69 | -------------------------------------------------------------------------------- /docs/contributor-docs/testing-roo-integration.md: -------------------------------------------------------------------------------- 1 | # Testing Roo Integration 2 | 3 | This document provides instructions for testing the Roo integration in the Task Master package. 4 | 5 | ## Running Tests 6 | 7 | To run the tests for the Roo integration: 8 | 9 | ```bash 10 | # Run all tests 11 | npm test 12 | 13 | # Run only Roo integration tests 14 | npm test -- -t "Roo" 15 | 16 | # Run specific test file 17 | npm test -- tests/integration/roo-files-inclusion.test.js 18 | ``` 19 | 20 | ## Manual Testing 21 | 22 | To manually verify that the Roo files are properly included in the package: 23 | 24 | 1. Create a test directory: 25 | 26 | ```bash 27 | mkdir test-tm 28 | cd test-tm 29 | ``` 30 | 31 | 2. Create a package.json file: 32 | 33 | ```bash 34 | npm init -y 35 | ``` 36 | 37 | 3. Install the task-master-ai package locally: 38 | 39 | ```bash 40 | # From the root of the claude-task-master repository 41 | cd .. 42 | npm pack 43 | # This will create a file like task-master-ai-0.12.0.tgz 44 | 45 | # Move back to the test directory 46 | cd test-tm 47 | npm install ../task-master-ai-0.12.0.tgz 48 | ``` 49 | 50 | 4. Initialize a new Task Master project: 51 | 52 | ```bash 53 | npx task-master init --yes 54 | ``` 55 | 56 | 5. Verify that all Roo files and directories are created: 57 | 58 | ```bash 59 | # Check that .roomodes file exists 60 | ls -la | grep .roomodes 61 | 62 | # Check that .roo directory exists and contains all mode directories 63 | ls -la .roo 64 | ls -la .roo/rules 65 | ls -la .roo/rules-architect 66 | ls -la .roo/rules-ask 67 | ls -la .roo/rules-boomerang 68 | ls -la .roo/rules-code 69 | ls -la .roo/rules-debug 70 | ls -la .roo/rules-test 71 | ``` 72 | 73 | ## What to Look For 74 | 75 | When running the tests or performing manual verification, ensure that: 76 | 77 | 1. The package includes `.roo/**` and `.roomodes` in the `files` array in package.json 78 | 2. The `prepare-package.js` script verifies the existence of all required Roo files 79 | 3. The `init.js` script creates all necessary .roo directories and copies .roomodes file 80 | 4. All source files for Roo integration exist in `assets/roocode/.roo` and `assets/roocode/.roomodes` 81 | 82 | ## Compatibility 83 | 84 | Ensure that the Roo integration works alongside existing Cursor functionality: 85 | 86 | 1. Initialize a new project that uses both Cursor and Roo: 87 | 88 | ```bash 89 | npx task-master init --yes 90 | ``` 91 | 92 | 2. Verify that both `.cursor` and `.roo` directories are created 93 | 3. Verify that both `.windsurfrules` and `.roomodes` files are created 94 | 4. Confirm that existing functionality continues to work as expected 95 | -------------------------------------------------------------------------------- /tasks/task_073.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 73 2 | # Title: Implement Custom Model ID Support for Ollama/OpenRouter 3 | # Status: in-progress 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Allow users to specify custom model IDs for Ollama and OpenRouter providers via CLI flag and interactive setup, with appropriate validation and warnings. 7 | # Details: 8 | **CLI (`task-master models --set- --custom`):** 9 | - Modify `scripts/modules/task-manager/models.js`: `setModel` function. 10 | - Check internal `available_models.json` first. 11 | - If not found and `--custom` is provided: 12 | - Fetch `https://openrouter.ai/api/v1/models`. (Need to add `https` import). 13 | - If ID found in OpenRouter list: Set `provider: 'openrouter'`, `modelId: `. Warn user about lack of official validation. 14 | - If ID not found in OpenRouter: Assume Ollama. Set `provider: 'ollama'`, `modelId: `. Warn user strongly (model must be pulled, compatibility not guaranteed). 15 | - If not found and `--custom` is *not* provided: Fail with error message guiding user to use `--custom`. 16 | 17 | **Interactive Setup (`task-master models --setup`):** 18 | - Modify `scripts/modules/commands.js`: `runInteractiveSetup` function. 19 | - Add options to `inquirer` choices for each role: `OpenRouter (Enter Custom ID)` and `Ollama (Enter Custom ID)`. 20 | - If `__CUSTOM_OPENROUTER__` selected: 21 | - Prompt for custom ID. 22 | - Fetch OpenRouter list and validate ID exists. Fail setup for that role if not found. 23 | - Update config and show warning if found. 24 | - If `__CUSTOM_OLLAMA__` selected: 25 | - Prompt for custom ID. 26 | - Update config directly (no live validation). 27 | - Show strong Ollama warning. 28 | 29 | # Test Strategy: 30 | **Unit Tests:** 31 | - Test `setModel` logic for internal models, custom OpenRouter (valid/invalid), custom Ollama, missing `--custom` flag. 32 | - Test `runInteractiveSetup` for new custom options flow, including OpenRouter validation success/failure. 33 | 34 | **Integration Tests:** 35 | - Test the `task-master models` command with `--custom` flag variations. 36 | - Test the `task-master models --setup` interactive flow for custom options. 37 | 38 | **Manual Testing:** 39 | - Run `task-master models --setup` and select custom options. 40 | - Run `task-master models --set-main --custom`. Verify config and warning. 41 | - Run `task-master models --set-main --custom`. Verify error. 42 | - Run `task-master models --set-main --custom`. Verify config and warning. 43 | - Run `task-master models --set-main ` (without `--custom`). Verify error. 44 | - Check `getModelConfiguration` output reflects custom models correctly. 45 | -------------------------------------------------------------------------------- /.cursor/rules/self_improve.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: Guidelines for continuously improving Cursor rules based on emerging code patterns and best practices. 3 | globs: **/* 4 | alwaysApply: true 5 | --- 6 | 7 | - **Rule Improvement Triggers:** 8 | - New code patterns not covered by existing rules 9 | - Repeated similar implementations across files 10 | - Common error patterns that could be prevented 11 | - New libraries or tools being used consistently 12 | - Emerging best practices in the codebase 13 | 14 | - **Analysis Process:** 15 | - Compare new code with existing rules 16 | - Identify patterns that should be standardized 17 | - Look for references to external documentation 18 | - Check for consistent error handling patterns 19 | - Monitor test patterns and coverage 20 | 21 | - **Rule Updates:** 22 | - **Add New Rules When:** 23 | - A new technology/pattern is used in 3+ files 24 | - Common bugs could be prevented by a rule 25 | - Code reviews repeatedly mention the same feedback 26 | - New security or performance patterns emerge 27 | 28 | - **Modify Existing Rules When:** 29 | - Better examples exist in the codebase 30 | - Additional edge cases are discovered 31 | - Related rules have been updated 32 | - Implementation details have changed 33 | 34 | - **Example Pattern Recognition:** 35 | ```typescript 36 | // If you see repeated patterns like: 37 | const data = await prisma.user.findMany({ 38 | select: { id: true, email: true }, 39 | where: { status: 'ACTIVE' } 40 | }); 41 | 42 | // Consider adding to [prisma.mdc](mdc:.cursor/rules/prisma.mdc): 43 | // - Standard select fields 44 | // - Common where conditions 45 | // - Performance optimization patterns 46 | ``` 47 | 48 | - **Rule Quality Checks:** 49 | - Rules should be actionable and specific 50 | - Examples should come from actual code 51 | - References should be up to date 52 | - Patterns should be consistently enforced 53 | 54 | - **Continuous Improvement:** 55 | - Monitor code review comments 56 | - Track common development questions 57 | - Update rules after major refactors 58 | - Add links to relevant documentation 59 | - Cross-reference related rules 60 | 61 | - **Rule Deprecation:** 62 | - Mark outdated patterns as deprecated 63 | - Remove rules that no longer apply 64 | - Update references to deprecated rules 65 | - Document migration paths for old patterns 66 | 67 | - **Documentation Updates:** 68 | - Keep examples synchronized with code 69 | - Update references to external docs 70 | - Maintain links between related rules 71 | - Document breaking changes 72 | Follow [cursor_rules.mdc](mdc:.cursor/rules/cursor_rules.mdc) for proper rule formatting and structure. 73 | -------------------------------------------------------------------------------- /tests/integration/roo-init-functionality.test.js: -------------------------------------------------------------------------------- 1 | import { jest } from '@jest/globals'; 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | 5 | describe('Roo Initialization Functionality', () => { 6 | let initJsContent; 7 | 8 | beforeAll(() => { 9 | // Read the init.js file content once for all tests 10 | const initJsPath = path.join(process.cwd(), 'scripts', 'init.js'); 11 | initJsContent = fs.readFileSync(initJsPath, 'utf8'); 12 | }); 13 | 14 | test('init.js creates Roo directories in createProjectStructure function', () => { 15 | // Check if createProjectStructure function exists 16 | expect(initJsContent).toContain('function createProjectStructure'); 17 | 18 | // Check for the line that creates the .roo directory 19 | const hasRooDir = initJsContent.includes( 20 | "ensureDirectoryExists(path.join(targetDir, '.roo'))" 21 | ); 22 | expect(hasRooDir).toBe(true); 23 | 24 | // Check for the line that creates .roo/rules directory 25 | const hasRooRulesDir = initJsContent.includes( 26 | "ensureDirectoryExists(path.join(targetDir, '.roo', 'rules'))" 27 | ); 28 | expect(hasRooRulesDir).toBe(true); 29 | 30 | // Check for the for loop that creates mode-specific directories 31 | const hasRooModeLoop = 32 | initJsContent.includes( 33 | "for (const mode of ['architect', 'ask', 'boomerang', 'code', 'debug', 'test'])" 34 | ) || 35 | (initJsContent.includes('for (const mode of [') && 36 | initJsContent.includes('architect') && 37 | initJsContent.includes('ask') && 38 | initJsContent.includes('boomerang') && 39 | initJsContent.includes('code') && 40 | initJsContent.includes('debug') && 41 | initJsContent.includes('test')); 42 | expect(hasRooModeLoop).toBe(true); 43 | }); 44 | 45 | test('init.js copies Roo files from assets/roocode directory', () => { 46 | // Check for the .roomodes case in the copyTemplateFile function 47 | const casesRoomodes = initJsContent.includes("case '.roomodes':"); 48 | expect(casesRoomodes).toBe(true); 49 | 50 | // Check that assets/roocode appears somewhere in the file 51 | const hasRoocodePath = initJsContent.includes("'assets', 'roocode'"); 52 | expect(hasRoocodePath).toBe(true); 53 | 54 | // Check that roomodes file is copied 55 | const copiesRoomodes = initJsContent.includes( 56 | "copyTemplateFile('.roomodes'" 57 | ); 58 | expect(copiesRoomodes).toBe(true); 59 | }); 60 | 61 | test('init.js has code to copy rule files for each mode', () => { 62 | // Look for template copying for rule files 63 | const hasModeRulesCopying = 64 | initJsContent.includes('copyTemplateFile(') && 65 | initJsContent.includes('rules-') && 66 | initJsContent.includes('-rules'); 67 | expect(hasModeRulesCopying).toBe(true); 68 | }); 69 | }); 70 | -------------------------------------------------------------------------------- /mcp-server/src/tools/add-dependency.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/add-dependency.js 3 | * Tool for adding a dependency to a task 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | getProjectRootFromSession, 11 | withNormalizedProjectRoot 12 | } from './utils.js'; 13 | import { addDependencyDirect } from '../core/task-master-core.js'; 14 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 15 | 16 | /** 17 | * Register the addDependency tool with the MCP server 18 | * @param {Object} server - FastMCP server instance 19 | */ 20 | export function registerAddDependencyTool(server) { 21 | server.addTool({ 22 | name: 'add_dependency', 23 | description: 'Add a dependency relationship between two tasks', 24 | parameters: z.object({ 25 | id: z.string().describe('ID of task that will depend on another task'), 26 | dependsOn: z 27 | .string() 28 | .describe('ID of task that will become a dependency'), 29 | file: z 30 | .string() 31 | .optional() 32 | .describe( 33 | 'Absolute path to the tasks file (default: tasks/tasks.json)' 34 | ), 35 | projectRoot: z 36 | .string() 37 | .describe('The directory of the project. Must be an absolute path.') 38 | }), 39 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 40 | try { 41 | log.info( 42 | `Adding dependency for task ${args.id} to depend on ${args.dependsOn}` 43 | ); 44 | 45 | let tasksJsonPath; 46 | try { 47 | tasksJsonPath = findTasksJsonPath( 48 | { projectRoot: args.projectRoot, file: args.file }, 49 | log 50 | ); 51 | } catch (error) { 52 | log.error(`Error finding tasks.json: ${error.message}`); 53 | return createErrorResponse( 54 | `Failed to find tasks.json: ${error.message}` 55 | ); 56 | } 57 | 58 | // Call the direct function with the resolved path 59 | const result = await addDependencyDirect( 60 | { 61 | // Pass the explicitly resolved path 62 | tasksJsonPath: tasksJsonPath, 63 | // Pass other relevant args 64 | id: args.id, 65 | dependsOn: args.dependsOn 66 | }, 67 | log 68 | // Remove context object 69 | ); 70 | 71 | // Log result 72 | if (result.success) { 73 | log.info(`Successfully added dependency: ${result.data.message}`); 74 | } else { 75 | log.error(`Failed to add dependency: ${result.error.message}`); 76 | } 77 | 78 | // Use handleApiResult to format the response 79 | return handleApiResult(result, log, 'Error adding dependency'); 80 | } catch (error) { 81 | log.error(`Error in addDependency tool: ${error.message}`); 82 | return createErrorResponse(error.message); 83 | } 84 | }) 85 | }); 86 | } 87 | -------------------------------------------------------------------------------- /mcp-server/src/tools/set-task-status.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/setTaskStatus.js 3 | * Tool to set the status of a task 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { setTaskStatusDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the setTaskStatus tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerSetTaskStatusTool(server) { 20 | server.addTool({ 21 | name: 'set_task_status', 22 | description: 'Set the status of one or more tasks or subtasks.', 23 | parameters: z.object({ 24 | id: z 25 | .string() 26 | .describe( 27 | "Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once." 28 | ), 29 | status: z 30 | .string() 31 | .describe( 32 | "New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'." 33 | ), 34 | file: z.string().optional().describe('Absolute path to the tasks file'), 35 | projectRoot: z 36 | .string() 37 | .describe('The directory of the project. Must be an absolute path.') 38 | }), 39 | execute: withNormalizedProjectRoot(async (args, { log }) => { 40 | try { 41 | log.info(`Setting status of task(s) ${args.id} to: ${args.status}`); 42 | 43 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 44 | let tasksJsonPath; 45 | try { 46 | tasksJsonPath = findTasksJsonPath( 47 | { projectRoot: args.projectRoot, file: args.file }, 48 | log 49 | ); 50 | } catch (error) { 51 | log.error(`Error finding tasks.json: ${error.message}`); 52 | return createErrorResponse( 53 | `Failed to find tasks.json: ${error.message}` 54 | ); 55 | } 56 | 57 | const result = await setTaskStatusDirect( 58 | { 59 | tasksJsonPath: tasksJsonPath, 60 | id: args.id, 61 | status: args.status 62 | }, 63 | log 64 | ); 65 | 66 | if (result.success) { 67 | log.info( 68 | `Successfully updated status for task(s) ${args.id} to "${args.status}": ${result.data.message}` 69 | ); 70 | } else { 71 | log.error( 72 | `Failed to update task status: ${result.error?.message || 'Unknown error'}` 73 | ); 74 | } 75 | 76 | return handleApiResult(result, log, 'Error setting task status'); 77 | } catch (error) { 78 | log.error(`Error in setTaskStatus tool: ${error.message}`); 79 | return createErrorResponse( 80 | `Error setting task status: ${error.message}` 81 | ); 82 | } 83 | }) 84 | }); 85 | } 86 | -------------------------------------------------------------------------------- /tasks/task_045.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 45 2 | # Title: Implement GitHub Issue Import Feature 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Add a '--from-github' flag to the add-task command that accepts a GitHub issue URL and automatically generates a corresponding task with relevant details. 7 | # Details: 8 | Implement a new flag '--from-github' for the add-task command that allows users to create tasks directly from GitHub issues. The implementation should: 9 | 10 | 1. Accept a GitHub issue URL as an argument (e.g., 'taskmaster add-task --from-github https://github.com/owner/repo/issues/123') 11 | 2. Parse the URL to extract the repository owner, name, and issue number 12 | 3. Use the GitHub API to fetch the issue details including: 13 | - Issue title (to be used as task title) 14 | - Issue description (to be used as task description) 15 | - Issue labels (to be potentially used as tags) 16 | - Issue assignees (for reference) 17 | - Issue status (open/closed) 18 | 4. Generate a well-formatted task with this information 19 | 5. Include a reference link back to the original GitHub issue 20 | 6. Handle authentication for private repositories using GitHub tokens from environment variables or config file 21 | 7. Implement proper error handling for: 22 | - Invalid URLs 23 | - Non-existent issues 24 | - API rate limiting 25 | - Authentication failures 26 | - Network issues 27 | 8. Allow users to override or supplement the imported details with additional command-line arguments 28 | 9. Add appropriate documentation in help text and user guide 29 | 30 | # Test Strategy: 31 | Testing should cover the following scenarios: 32 | 33 | 1. Unit tests: 34 | - Test URL parsing functionality with valid and invalid GitHub issue URLs 35 | - Test GitHub API response parsing with mocked API responses 36 | - Test error handling for various failure cases 37 | 38 | 2. Integration tests: 39 | - Test with real GitHub public issues (use well-known repositories) 40 | - Test with both open and closed issues 41 | - Test with issues containing various elements (labels, assignees, comments) 42 | 43 | 3. Error case tests: 44 | - Invalid URL format 45 | - Non-existent repository 46 | - Non-existent issue number 47 | - API rate limit exceeded 48 | - Authentication failures for private repos 49 | 50 | 4. End-to-end tests: 51 | - Verify that a task created from a GitHub issue contains all expected information 52 | - Verify that the task can be properly managed after creation 53 | - Test the interaction with other flags and commands 54 | 55 | Create mock GitHub API responses for testing to avoid hitting rate limits during development and testing. Use environment variables to configure test credentials if needed. 56 | -------------------------------------------------------------------------------- /mcp-server/src/tools/remove-subtask.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/remove-subtask.js 3 | * Tool for removing subtasks from parent tasks 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { removeSubtaskDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the removeSubtask tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerRemoveSubtaskTool(server) { 20 | server.addTool({ 21 | name: 'remove_subtask', 22 | description: 'Remove a subtask from its parent task', 23 | parameters: z.object({ 24 | id: z 25 | .string() 26 | .describe( 27 | "Subtask ID to remove in format 'parentId.subtaskId' (required)" 28 | ), 29 | convert: z 30 | .boolean() 31 | .optional() 32 | .describe( 33 | 'Convert the subtask to a standalone task instead of deleting it' 34 | ), 35 | file: z 36 | .string() 37 | .optional() 38 | .describe( 39 | 'Absolute path to the tasks file (default: tasks/tasks.json)' 40 | ), 41 | skipGenerate: z 42 | .boolean() 43 | .optional() 44 | .describe('Skip regenerating task files'), 45 | projectRoot: z 46 | .string() 47 | .describe('The directory of the project. Must be an absolute path.') 48 | }), 49 | execute: withNormalizedProjectRoot(async (args, { log }) => { 50 | try { 51 | log.info(`Removing subtask with args: ${JSON.stringify(args)}`); 52 | 53 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 54 | let tasksJsonPath; 55 | try { 56 | tasksJsonPath = findTasksJsonPath( 57 | { projectRoot: args.projectRoot, file: args.file }, 58 | log 59 | ); 60 | } catch (error) { 61 | log.error(`Error finding tasks.json: ${error.message}`); 62 | return createErrorResponse( 63 | `Failed to find tasks.json: ${error.message}` 64 | ); 65 | } 66 | 67 | const result = await removeSubtaskDirect( 68 | { 69 | tasksJsonPath: tasksJsonPath, 70 | id: args.id, 71 | convert: args.convert, 72 | skipGenerate: args.skipGenerate 73 | }, 74 | log 75 | ); 76 | 77 | if (result.success) { 78 | log.info(`Subtask removed successfully: ${result.data.message}`); 79 | } else { 80 | log.error(`Failed to remove subtask: ${result.error.message}`); 81 | } 82 | 83 | return handleApiResult(result, log, 'Error removing subtask'); 84 | } catch (error) { 85 | log.error(`Error in removeSubtask tool: ${error.message}`); 86 | return createErrorResponse(error.message); 87 | } 88 | }) 89 | }); 90 | } 91 | -------------------------------------------------------------------------------- /tasks/task_033.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 33 2 | # Title: Create and Integrate Windsurf Rules Document from MDC Files 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Develop functionality to generate a .windsurfrules document by combining and refactoring content from three primary .mdc files used for Cursor Rules, ensuring it's properly integrated into the initialization pipeline. 7 | # Details: 8 | This task involves creating a mechanism to generate a Windsurf-specific rules document by combining three existing MDC (Markdown Content) files that are currently used for Cursor Rules. The implementation should: 9 | 10 | 1. Identify and locate the three primary .mdc files used for Cursor Rules 11 | 2. Extract content from these files and merge them into a single document 12 | 3. Refactor the content to make it Windsurf-specific, replacing Cursor-specific terminology and adapting guidelines as needed 13 | 4. Create a function that generates a .windsurfrules document from this content 14 | 5. Integrate this function into the initialization pipeline 15 | 6. Implement logic to check if a .windsurfrules document already exists: 16 | - If it exists, append the new content to it 17 | - If it doesn't exist, create a new document 18 | 7. Ensure proper error handling for file operations 19 | 8. Add appropriate logging to track the generation and modification of the .windsurfrules document 20 | 21 | The implementation should be modular and maintainable, with clear separation of concerns between content extraction, refactoring, and file operations. 22 | 23 | # Test Strategy: 24 | Testing should verify both the content generation and the integration with the initialization pipeline: 25 | 26 | 1. Unit Tests: 27 | - Test the content extraction function with mock .mdc files 28 | - Test the content refactoring function to ensure Cursor-specific terms are properly replaced 29 | - Test the file operation functions with mock filesystem 30 | 31 | 2. Integration Tests: 32 | - Test the creation of a new .windsurfrules document when none exists 33 | - Test appending to an existing .windsurfrules document 34 | - Test the complete initialization pipeline with the new functionality 35 | 36 | 3. Manual Verification: 37 | - Inspect the generated .windsurfrules document to ensure content is properly combined and refactored 38 | - Verify that Cursor-specific terminology has been replaced with Windsurf-specific terminology 39 | - Run the initialization process multiple times to verify idempotence (content isn't duplicated on multiple runs) 40 | 41 | 4. Edge Cases: 42 | - Test with missing or corrupted .mdc files 43 | - Test with an existing but empty .windsurfrules document 44 | - Test with an existing .windsurfrules document that already contains some of the content 45 | -------------------------------------------------------------------------------- /mcp-server/src/tools/expand-task.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/expand-task.js 3 | * Tool to expand a task into subtasks 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { expandTaskDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the expand-task tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerExpandTaskTool(server) { 20 | server.addTool({ 21 | name: 'expand_task', 22 | description: 'Expand a task into subtasks for detailed implementation', 23 | parameters: z.object({ 24 | id: z.string().describe('ID of task to expand'), 25 | num: z.string().optional().describe('Number of subtasks to generate'), 26 | research: z 27 | .boolean() 28 | .optional() 29 | .default(false) 30 | .describe('Use research role for generation'), 31 | prompt: z 32 | .string() 33 | .optional() 34 | .describe('Additional context for subtask generation'), 35 | file: z 36 | .string() 37 | .optional() 38 | .describe( 39 | 'Path to the tasks file relative to project root (e.g., tasks/tasks.json)' 40 | ), 41 | projectRoot: z 42 | .string() 43 | .describe('The directory of the project. Must be an absolute path.'), 44 | force: z 45 | .boolean() 46 | .optional() 47 | .default(false) 48 | .describe('Force expansion even if subtasks exist') 49 | }), 50 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 51 | try { 52 | log.info(`Starting expand-task with args: ${JSON.stringify(args)}`); 53 | 54 | // Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot) 55 | let tasksJsonPath; 56 | try { 57 | tasksJsonPath = findTasksJsonPath( 58 | { projectRoot: args.projectRoot, file: args.file }, 59 | log 60 | ); 61 | } catch (error) { 62 | log.error(`Error finding tasks.json: ${error.message}`); 63 | return createErrorResponse( 64 | `Failed to find tasks.json: ${error.message}` 65 | ); 66 | } 67 | 68 | const result = await expandTaskDirect( 69 | { 70 | tasksJsonPath: tasksJsonPath, 71 | id: args.id, 72 | num: args.num, 73 | research: args.research, 74 | prompt: args.prompt, 75 | force: args.force, 76 | projectRoot: args.projectRoot 77 | }, 78 | log, 79 | { session } 80 | ); 81 | 82 | return handleApiResult(result, log, 'Error expanding task'); 83 | } catch (error) { 84 | log.error(`Error in expand-task tool: ${error.message}`); 85 | return createErrorResponse(error.message); 86 | } 87 | }) 88 | }); 89 | } 90 | -------------------------------------------------------------------------------- /tasks/task_037.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 37 2 | # Title: Add Gemini Support for Main AI Services as Claude Alternative 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Implement Google's Gemini API integration as an alternative to Claude for all main AI services, allowing users to switch between different LLM providers. 7 | # Details: 8 | This task involves integrating Google's Gemini API across all main AI services that currently use Claude: 9 | 10 | 1. Create a new GeminiService class that implements the same interface as the existing ClaudeService 11 | 2. Implement authentication and API key management for Gemini API 12 | 3. Map our internal prompt formats to Gemini's expected input format 13 | 4. Handle Gemini-specific parameters (temperature, top_p, etc.) and response parsing 14 | 5. Update the AI service factory/provider to support selecting Gemini as an alternative 15 | 6. Add configuration options in settings to allow users to select Gemini as their preferred provider 16 | 7. Implement proper error handling for Gemini-specific API errors 17 | 8. Ensure streaming responses are properly supported if Gemini offers this capability 18 | 9. Update documentation to reflect the new Gemini option 19 | 10. Consider implementing model selection if Gemini offers multiple models (e.g., Gemini Pro, Gemini Ultra) 20 | 11. Ensure all existing AI capabilities (summarization, code generation, etc.) maintain feature parity when using Gemini 21 | 22 | The implementation should follow the same pattern as the recent Ollama integration (Task #36) to maintain consistency in how alternative AI providers are supported. 23 | 24 | # Test Strategy: 25 | Testing should verify Gemini integration works correctly across all AI services: 26 | 27 | 1. Unit tests: 28 | - Test GeminiService class methods with mocked API responses 29 | - Verify proper error handling for common API errors 30 | - Test configuration and model selection functionality 31 | 32 | 2. Integration tests: 33 | - Verify authentication and API connection with valid credentials 34 | - Test each AI service with Gemini to ensure proper functionality 35 | - Compare outputs between Claude and Gemini for the same inputs to verify quality 36 | 37 | 3. End-to-end tests: 38 | - Test the complete user flow of switching to Gemini and using various AI features 39 | - Verify streaming responses work correctly if supported 40 | 41 | 4. Performance tests: 42 | - Measure and compare response times between Claude and Gemini 43 | - Test with various input lengths to verify handling of context limits 44 | 45 | 5. Manual testing: 46 | - Verify the quality of Gemini responses across different use cases 47 | - Test edge cases like very long inputs or specialized domain knowledge 48 | 49 | All tests should pass with Gemini selected as the provider, and the user experience should be consistent regardless of which provider is selected. 50 | -------------------------------------------------------------------------------- /tasks/task_036.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 36 2 | # Title: Add Ollama Support for AI Services as Claude Alternative 3 | # Status: deferred 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Implement Ollama integration as an alternative to Claude for all main AI services, allowing users to run local language models instead of relying on cloud-based Claude API. 7 | # Details: 8 | This task involves creating a comprehensive Ollama integration that can replace Claude across all main AI services in the application. Implementation should include: 9 | 10 | 1. Create an OllamaService class that implements the same interface as the ClaudeService to ensure compatibility 11 | 2. Add configuration options to specify Ollama endpoint URL (default: http://localhost:11434) 12 | 3. Implement model selection functionality to allow users to choose which Ollama model to use (e.g., llama3, mistral, etc.) 13 | 4. Handle prompt formatting specific to Ollama models, ensuring proper system/user message separation 14 | 5. Implement proper error handling for cases where Ollama server is unavailable or returns errors 15 | 6. Add fallback mechanism to Claude when Ollama fails or isn't configured 16 | 7. Update the AI service factory to conditionally create either Claude or Ollama service based on configuration 17 | 8. Ensure token counting and rate limiting are appropriately handled for Ollama models 18 | 9. Add documentation for users explaining how to set up and use Ollama with the application 19 | 10. Optimize prompt templates specifically for Ollama models if needed 20 | 21 | The implementation should be toggled through a configuration option (useOllama: true/false) and should maintain all existing functionality currently provided by Claude. 22 | 23 | # Test Strategy: 24 | Testing should verify that Ollama integration works correctly as a drop-in replacement for Claude: 25 | 26 | 1. Unit tests: 27 | - Test OllamaService class methods in isolation with mocked responses 28 | - Verify proper error handling when Ollama server is unavailable 29 | - Test fallback mechanism to Claude when configured 30 | 31 | 2. Integration tests: 32 | - Test with actual Ollama server running locally with at least two different models 33 | - Verify all AI service functions work correctly with Ollama 34 | - Compare outputs between Claude and Ollama for quality assessment 35 | 36 | 3. Configuration tests: 37 | - Verify toggling between Claude and Ollama works as expected 38 | - Test with various model configurations 39 | 40 | 4. Performance tests: 41 | - Measure and compare response times between Claude and Ollama 42 | - Test with different load scenarios 43 | 44 | 5. Manual testing: 45 | - Verify all main AI features work correctly with Ollama 46 | - Test edge cases like very long inputs or specialized tasks 47 | 48 | Create a test document comparing output quality between Claude and various Ollama models to help users understand the tradeoffs. 49 | -------------------------------------------------------------------------------- /mcp-server/src/tools/update-subtask.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/update-subtask.js 3 | * Tool to append additional information to a specific subtask 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { updateSubtaskByIdDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the update-subtask tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerUpdateSubtaskTool(server) { 20 | server.addTool({ 21 | name: 'update_subtask', 22 | description: 23 | 'Appends timestamped information to a specific subtask without replacing existing content', 24 | parameters: z.object({ 25 | id: z 26 | .string() 27 | .describe( 28 | 'ID of the subtask to update in format "parentId.subtaskId" (e.g., "5.2"). Parent ID is the ID of the task that contains the subtask.' 29 | ), 30 | prompt: z.string().describe('Information to add to the subtask'), 31 | research: z 32 | .boolean() 33 | .optional() 34 | .describe('Use Perplexity AI for research-backed updates'), 35 | file: z.string().optional().describe('Absolute path to the tasks file'), 36 | projectRoot: z 37 | .string() 38 | .describe('The directory of the project. Must be an absolute path.') 39 | }), 40 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 41 | const toolName = 'update_subtask'; 42 | try { 43 | log.info(`Updating subtask with args: ${JSON.stringify(args)}`); 44 | 45 | let tasksJsonPath; 46 | try { 47 | tasksJsonPath = findTasksJsonPath( 48 | { projectRoot: args.projectRoot, file: args.file }, 49 | log 50 | ); 51 | } catch (error) { 52 | log.error(`${toolName}: Error finding tasks.json: ${error.message}`); 53 | return createErrorResponse( 54 | `Failed to find tasks.json: ${error.message}` 55 | ); 56 | } 57 | 58 | const result = await updateSubtaskByIdDirect( 59 | { 60 | tasksJsonPath: tasksJsonPath, 61 | id: args.id, 62 | prompt: args.prompt, 63 | research: args.research, 64 | projectRoot: args.projectRoot 65 | }, 66 | log, 67 | { session } 68 | ); 69 | 70 | if (result.success) { 71 | log.info(`Successfully updated subtask with ID ${args.id}`); 72 | } else { 73 | log.error( 74 | `Failed to update subtask: ${result.error?.message || 'Unknown error'}` 75 | ); 76 | } 77 | 78 | return handleApiResult(result, log, 'Error updating subtask'); 79 | } catch (error) { 80 | log.error( 81 | `Critical error in ${toolName} tool execute: ${error.message}` 82 | ); 83 | return createErrorResponse( 84 | `Internal tool error (${toolName}): ${error.message}` 85 | ); 86 | } 87 | }) 88 | }); 89 | } 90 | -------------------------------------------------------------------------------- /tasks/task_013.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 13 2 | # Title: Create Cursor Rules Implementation 3 | # Status: done 4 | # Dependencies: 1, 3 5 | # Priority: medium 6 | # Description: Develop the Cursor AI integration rules and documentation. 7 | # Details: 8 | Implement Cursor rules including: 9 | - Create dev_workflow.mdc documentation 10 | - Implement cursor_rules.mdc 11 | - Add self_improve.mdc 12 | - Design rule integration documentation 13 | - Set up .cursor directory structure 14 | - Document how Cursor AI should interact with the system 15 | 16 | # Test Strategy: 17 | Review rules documentation for clarity and completeness. Test with Cursor AI to verify the rules are properly interpreted and followed. 18 | 19 | # Subtasks: 20 | ## 1. Set up .cursor Directory Structure [done] 21 | ### Dependencies: None 22 | ### Description: Create the required directory structure for Cursor AI integration, including the .cursor folder and rules subfolder. This provides the foundation for storing all Cursor-related configuration files and rule documentation. Ensure proper permissions and gitignore settings are configured to maintain these files correctly. 23 | ### Details: 24 | 25 | 26 | ## 2. Create dev_workflow.mdc Documentation [done] 27 | ### Dependencies: 13.1 28 | ### Description: Develop the dev_workflow.mdc file that documents the development workflow for Cursor AI. This file should outline how Cursor AI should assist with task discovery, implementation, and verification within the project. Include specific examples of commands and interactions that demonstrate the optimal workflow. 29 | ### Details: 30 | 31 | 32 | ## 3. Implement cursor_rules.mdc [done] 33 | ### Dependencies: 13.1 34 | ### Description: Create the cursor_rules.mdc file that defines specific rules and guidelines for how Cursor AI should interact with the codebase. This should include code style preferences, architectural patterns to follow, documentation requirements, and any project-specific conventions that Cursor AI should adhere to when generating or modifying code. 35 | ### Details: 36 | 37 | 38 | ## 4. Add self_improve.mdc Documentation [done] 39 | ### Dependencies: 13.1, 13.2, 13.3 40 | ### Description: Develop the self_improve.mdc file that instructs Cursor AI on how to continuously improve its assistance capabilities within the project context. This document should outline how Cursor AI should learn from feedback, adapt to project evolution, and enhance its understanding of the codebase over time. 41 | ### Details: 42 | 43 | 44 | ## 5. Create Cursor AI Integration Documentation [done] 45 | ### Dependencies: 13.1, 13.2, 13.3, 13.4 46 | ### Description: Develop comprehensive documentation on how Cursor AI integrates with the task management system. This should include detailed instructions on how Cursor AI should interpret tasks.json, individual task files, and how it should assist with implementation. Document the specific commands and workflows that Cursor AI should understand and support. 47 | ### Details: 48 | 49 | 50 | -------------------------------------------------------------------------------- /tasks/task_058.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 58 2 | # Title: Implement Elegant Package Update Mechanism for Task-Master 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Create a robust update mechanism that handles package updates gracefully, ensuring all necessary files are updated when the global package is upgraded. 7 | # Details: 8 | Develop a comprehensive update system with these components: 9 | 10 | 1. **Update Detection**: When task-master runs, check if the current version matches the installed version. If not, notify the user an update is available. 11 | 12 | 2. **Update Command**: Implement a dedicated `task-master update` command that: 13 | - Updates the global package (`npm -g task-master-ai@latest`) 14 | - Automatically runs necessary initialization steps 15 | - Preserves user configurations while updating system files 16 | 17 | 3. **Smart File Management**: 18 | - Create a manifest of core files with checksums 19 | - During updates, compare existing files with the manifest 20 | - Only overwrite files that have changed in the update 21 | - Preserve user-modified files with an option to merge changes 22 | 23 | 4. **Configuration Versioning**: 24 | - Add version tracking to configuration files 25 | - Implement migration paths for configuration changes between versions 26 | - Provide backward compatibility for older configurations 27 | 28 | 5. **Update Notifications**: 29 | - Add a non-intrusive notification when updates are available 30 | - Include a changelog summary of what's new 31 | 32 | This system should work seamlessly with the existing `task-master init` command but provide a more automated and user-friendly update experience. 33 | 34 | # Test Strategy: 35 | Test the update mechanism with these specific scenarios: 36 | 37 | 1. **Version Detection Test**: 38 | - Install an older version, then verify the system correctly detects when a newer version is available 39 | - Test with minor and major version changes 40 | 41 | 2. **Update Command Test**: 42 | - Verify `task-master update` successfully updates the global package 43 | - Confirm all necessary files are updated correctly 44 | - Test with and without user-modified files present 45 | 46 | 3. **File Preservation Test**: 47 | - Modify configuration files, then update 48 | - Verify user changes are preserved while system files are updated 49 | - Test with conflicts between user changes and system updates 50 | 51 | 4. **Rollback Test**: 52 | - Implement and test a rollback mechanism if updates fail 53 | - Verify system returns to previous working state 54 | 55 | 5. **Integration Test**: 56 | - Create a test project with the current version 57 | - Run through the update process 58 | - Verify all functionality continues to work after update 59 | 60 | 6. **Edge Case Tests**: 61 | - Test updating with insufficient permissions 62 | - Test updating with network interruptions 63 | - Test updating from very old versions to latest 64 | -------------------------------------------------------------------------------- /mcp-server/src/tools/update-task.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/update-task.js 3 | * Tool to update a single task by ID with new information 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { updateTaskByIdDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the update-task tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerUpdateTaskTool(server) { 20 | server.addTool({ 21 | name: 'update_task', 22 | description: 23 | 'Updates a single task by ID with new information or context provided in the prompt.', 24 | parameters: z.object({ 25 | id: z 26 | .string() // ID can be number or string like "1.2" 27 | .describe( 28 | "ID of the task (e.g., '15') to update. Subtasks are supported using the update-subtask tool." 29 | ), 30 | prompt: z 31 | .string() 32 | .describe('New information or context to incorporate into the task'), 33 | research: z 34 | .boolean() 35 | .optional() 36 | .describe('Use Perplexity AI for research-backed updates'), 37 | file: z.string().optional().describe('Absolute path to the tasks file'), 38 | projectRoot: z 39 | .string() 40 | .describe('The directory of the project. Must be an absolute path.') 41 | }), 42 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 43 | const toolName = 'update_task'; 44 | try { 45 | log.info( 46 | `Executing ${toolName} tool with args: ${JSON.stringify(args)}` 47 | ); 48 | 49 | let tasksJsonPath; 50 | try { 51 | tasksJsonPath = findTasksJsonPath( 52 | { projectRoot: args.projectRoot, file: args.file }, 53 | log 54 | ); 55 | log.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`); 56 | } catch (error) { 57 | log.error(`${toolName}: Error finding tasks.json: ${error.message}`); 58 | return createErrorResponse( 59 | `Failed to find tasks.json: ${error.message}` 60 | ); 61 | } 62 | 63 | // 3. Call Direct Function - Include projectRoot 64 | const result = await updateTaskByIdDirect( 65 | { 66 | tasksJsonPath: tasksJsonPath, 67 | id: args.id, 68 | prompt: args.prompt, 69 | research: args.research, 70 | projectRoot: args.projectRoot 71 | }, 72 | log, 73 | { session } 74 | ); 75 | 76 | // 4. Handle Result 77 | log.info( 78 | `${toolName}: Direct function result: success=${result.success}` 79 | ); 80 | return handleApiResult(result, log, 'Error updating task'); 81 | } catch (error) { 82 | log.error( 83 | `Critical error in ${toolName} tool execute: ${error.message}` 84 | ); 85 | return createErrorResponse( 86 | `Internal tool error (${toolName}): ${error.message}` 87 | ); 88 | } 89 | }) 90 | }); 91 | } 92 | -------------------------------------------------------------------------------- /test-version-check-full.js: -------------------------------------------------------------------------------- 1 | import { 2 | checkForUpdate, 3 | displayUpgradeNotification, 4 | compareVersions 5 | } from './scripts/modules/commands.js'; 6 | import fs from 'fs'; 7 | import path from 'path'; 8 | 9 | // Force our current version for testing 10 | process.env.FORCE_VERSION = '0.9.30'; 11 | 12 | // Create a mock package.json in memory for testing 13 | const mockPackageJson = { 14 | name: 'task-master-ai', 15 | version: '0.9.30' 16 | }; 17 | 18 | // Modified version of checkForUpdate that doesn't use HTTP for testing 19 | async function testCheckForUpdate(simulatedLatestVersion) { 20 | // Get current version - use our forced version 21 | const currentVersion = process.env.FORCE_VERSION || '0.9.30'; 22 | 23 | console.log(`Using simulated current version: ${currentVersion}`); 24 | console.log(`Using simulated latest version: ${simulatedLatestVersion}`); 25 | 26 | // Compare versions 27 | const needsUpdate = 28 | compareVersions(currentVersion, simulatedLatestVersion) < 0; 29 | 30 | return { 31 | currentVersion, 32 | latestVersion: simulatedLatestVersion, 33 | needsUpdate 34 | }; 35 | } 36 | 37 | // Test with current version older than latest (should show update notice) 38 | async function runTest() { 39 | console.log('=== Testing version check scenarios ===\n'); 40 | 41 | // Scenario 1: Update available 42 | console.log( 43 | '\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---' 44 | ); 45 | const updateInfo1 = await testCheckForUpdate('1.0.0'); 46 | console.log('Update check results:'); 47 | console.log(`- Current version: ${updateInfo1.currentVersion}`); 48 | console.log(`- Latest version: ${updateInfo1.latestVersion}`); 49 | console.log(`- Update needed: ${updateInfo1.needsUpdate}`); 50 | 51 | if (updateInfo1.needsUpdate) { 52 | console.log('\nDisplaying upgrade notification:'); 53 | displayUpgradeNotification( 54 | updateInfo1.currentVersion, 55 | updateInfo1.latestVersion 56 | ); 57 | } 58 | 59 | // Scenario 2: No update needed (versions equal) 60 | console.log( 61 | '\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---' 62 | ); 63 | const updateInfo2 = await testCheckForUpdate('0.9.30'); 64 | console.log('Update check results:'); 65 | console.log(`- Current version: ${updateInfo2.currentVersion}`); 66 | console.log(`- Latest version: ${updateInfo2.latestVersion}`); 67 | console.log(`- Update needed: ${updateInfo2.needsUpdate}`); 68 | 69 | // Scenario 3: Development version (current newer than latest) 70 | console.log( 71 | '\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---' 72 | ); 73 | const updateInfo3 = await testCheckForUpdate('0.9.0'); 74 | console.log('Update check results:'); 75 | console.log(`- Current version: ${updateInfo3.currentVersion}`); 76 | console.log(`- Latest version: ${updateInfo3.latestVersion}`); 77 | console.log(`- Update needed: ${updateInfo3.needsUpdate}`); 78 | 79 | console.log('\n=== Test complete ==='); 80 | } 81 | 82 | // Run all tests 83 | runTest(); 84 | -------------------------------------------------------------------------------- /tasks/task_038.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 38 2 | # Title: Implement Version Check System with Upgrade Notifications 3 | # Status: done 4 | # Dependencies: None 5 | # Priority: high 6 | # Description: Create a system that checks for newer package versions and displays upgrade notifications when users run any command, informing them to update to the latest version. 7 | # Details: 8 | Implement a version check mechanism that runs automatically with every command execution: 9 | 10 | 1. Create a new module (e.g., `versionChecker.js`) that will: 11 | - Fetch the latest version from npm registry using the npm registry API (https://registry.npmjs.org/task-master-ai/latest) 12 | - Compare it with the current installed version (from package.json) 13 | - Store the last check timestamp to avoid excessive API calls (check once per day) 14 | - Cache the result to minimize network requests 15 | 16 | 2. The notification should: 17 | - Use colored text (e.g., yellow background with black text) to be noticeable 18 | - Include the current version and latest version 19 | - Show the exact upgrade command: 'npm i task-master-ai@latest' 20 | - Be displayed at the beginning or end of command output, not interrupting the main content 21 | - Include a small separator line to distinguish it from command output 22 | 23 | 3. Implementation considerations: 24 | - Handle network failures gracefully (don't block command execution if version check fails) 25 | - Add a configuration option to disable update checks if needed 26 | - Ensure the check is lightweight and doesn't significantly impact command performance 27 | - Consider using a package like 'semver' for proper version comparison 28 | - Implement a cooldown period (e.g., only check once per day) to avoid excessive API calls 29 | 30 | 4. The version check should be integrated into the main command execution flow so it runs for all commands automatically. 31 | 32 | # Test Strategy: 33 | 1. Manual testing: 34 | - Install an older version of the package 35 | - Run various commands and verify the update notification appears 36 | - Update to the latest version and confirm the notification no longer appears 37 | - Test with network disconnected to ensure graceful handling of failures 38 | 39 | 2. Unit tests: 40 | - Mock the npm registry response to test different scenarios: 41 | - When a newer version exists 42 | - When using the latest version 43 | - When the registry is unavailable 44 | - Test the version comparison logic with various version strings 45 | - Test the cooldown/caching mechanism works correctly 46 | 47 | 3. Integration tests: 48 | - Create a test that runs a command and verifies the notification appears in the expected format 49 | - Test that the notification appears for all commands 50 | - Verify the notification doesn't interfere with normal command output 51 | 52 | 4. Edge cases to test: 53 | - Pre-release versions (alpha/beta) 54 | - Very old versions 55 | - When package.json is missing or malformed 56 | - When npm registry returns unexpected data 57 | -------------------------------------------------------------------------------- /tasks/task_055.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 55 2 | # Title: Implement Positional Arguments Support for CLI Commands 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Upgrade CLI commands to support positional arguments alongside the existing flag-based syntax, allowing for more intuitive command usage. 7 | # Details: 8 | This task involves modifying the command parsing logic in commands.js to support positional arguments as an alternative to the current flag-based approach. The implementation should: 9 | 10 | 1. Update the argument parsing logic to detect when arguments are provided without flag prefixes (--) 11 | 2. Map positional arguments to their corresponding parameters based on their order 12 | 3. For each command in commands.js, define a consistent positional argument order (e.g., for set-status: first arg = id, second arg = status) 13 | 4. Maintain backward compatibility with the existing flag-based syntax 14 | 5. Handle edge cases such as: 15 | - Commands with optional parameters 16 | - Commands with multiple parameters 17 | - Commands that accept arrays or complex data types 18 | 6. Update the help text for each command to show both usage patterns 19 | 7. Modify the cursor rules to work with both input styles 20 | 8. Ensure error messages are clear when positional arguments are provided incorrectly 21 | 22 | Example implementations: 23 | - `task-master set-status 25 done` should be equivalent to `task-master set-status --id=25 --status=done` 24 | - `task-master add-task "New task name" "Task description"` should be equivalent to `task-master add-task --name="New task name" --description="Task description"` 25 | 26 | The code should prioritize maintaining the existing functionality while adding this new capability. 27 | 28 | # Test Strategy: 29 | Testing should verify both the new positional argument functionality and continued support for flag-based syntax: 30 | 31 | 1. Unit tests: 32 | - Create tests for each command that verify it works with both positional and flag-based arguments 33 | - Test edge cases like missing arguments, extra arguments, and mixed usage (some positional, some flags) 34 | - Verify help text correctly displays both usage patterns 35 | 36 | 2. Integration tests: 37 | - Test the full CLI with various commands using both syntax styles 38 | - Verify that output is identical regardless of which syntax is used 39 | - Test commands with different numbers of arguments 40 | 41 | 3. Manual testing: 42 | - Run through a comprehensive set of real-world usage scenarios with both syntax styles 43 | - Verify cursor behavior works correctly with both input methods 44 | - Check that error messages are helpful when incorrect positional arguments are provided 45 | 46 | 4. Documentation verification: 47 | - Ensure README and help text accurately reflect the new dual syntax support 48 | - Verify examples in documentation show both styles where appropriate 49 | 50 | All tests should pass with 100% of commands supporting both argument styles without any regression in existing functionality. 51 | -------------------------------------------------------------------------------- /mcp-server/src/tools/update.js: -------------------------------------------------------------------------------- 1 | /** 2 | * tools/update.js 3 | * Tool to update tasks based on new context/prompt 4 | */ 5 | 6 | import { z } from 'zod'; 7 | import { 8 | handleApiResult, 9 | createErrorResponse, 10 | withNormalizedProjectRoot 11 | } from './utils.js'; 12 | import { updateTasksDirect } from '../core/task-master-core.js'; 13 | import { findTasksJsonPath } from '../core/utils/path-utils.js'; 14 | 15 | /** 16 | * Register the update tool with the MCP server 17 | * @param {Object} server - FastMCP server instance 18 | */ 19 | export function registerUpdateTool(server) { 20 | server.addTool({ 21 | name: 'update', 22 | description: 23 | "Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task or 'update_subtask' for subtasks.", 24 | parameters: z.object({ 25 | from: z 26 | .string() 27 | .describe( 28 | "Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'" 29 | ), 30 | prompt: z 31 | .string() 32 | .describe('Explanation of changes or new context to apply'), 33 | research: z 34 | .boolean() 35 | .optional() 36 | .describe('Use Perplexity AI for research-backed updates'), 37 | file: z 38 | .string() 39 | .optional() 40 | .describe('Path to the tasks file relative to project root'), 41 | projectRoot: z 42 | .string() 43 | .optional() 44 | .describe( 45 | 'The directory of the project. (Optional, usually from session)' 46 | ) 47 | }), 48 | execute: withNormalizedProjectRoot(async (args, { log, session }) => { 49 | const toolName = 'update'; 50 | const { from, prompt, research, file, projectRoot } = args; 51 | 52 | try { 53 | log.info( 54 | `Executing ${toolName} tool with normalized root: ${projectRoot}` 55 | ); 56 | 57 | let tasksJsonPath; 58 | try { 59 | tasksJsonPath = findTasksJsonPath({ projectRoot, file }, log); 60 | log.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`); 61 | } catch (error) { 62 | log.error(`${toolName}: Error finding tasks.json: ${error.message}`); 63 | return createErrorResponse( 64 | `Failed to find tasks.json within project root '${projectRoot}': ${error.message}` 65 | ); 66 | } 67 | 68 | const result = await updateTasksDirect( 69 | { 70 | tasksJsonPath: tasksJsonPath, 71 | from: from, 72 | prompt: prompt, 73 | research: research, 74 | projectRoot: projectRoot 75 | }, 76 | log, 77 | { session } 78 | ); 79 | 80 | log.info( 81 | `${toolName}: Direct function result: success=${result.success}` 82 | ); 83 | return handleApiResult(result, log, 'Error updating tasks'); 84 | } catch (error) { 85 | log.error( 86 | `Critical error in ${toolName} tool execute: ${error.message}` 87 | ); 88 | return createErrorResponse( 89 | `Internal tool error (${toolName}): ${error.message}` 90 | ); 91 | } 92 | }) 93 | }); 94 | } 95 | -------------------------------------------------------------------------------- /tasks/task_046.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 46 2 | # Title: Implement ICE Analysis Command for Task Prioritization 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Create a new command that analyzes and ranks tasks based on Impact, Confidence, and Ease (ICE) scoring methodology, generating a comprehensive prioritization report. 7 | # Details: 8 | Develop a new command called `analyze-ice` that evaluates non-completed tasks (excluding those marked as done, cancelled, or deferred) and ranks them according to the ICE methodology: 9 | 10 | 1. Core functionality: 11 | - Calculate an Impact score (how much value the task will deliver) 12 | - Calculate a Confidence score (how certain we are about the impact) 13 | - Calculate an Ease score (how easy it is to implement) 14 | - Compute a total ICE score (sum or product of the three components) 15 | 16 | 2. Implementation details: 17 | - Reuse the filtering logic from `analyze-complexity` to select relevant tasks 18 | - Leverage the LLM to generate scores for each dimension on a scale of 1-10 19 | - For each task, prompt the LLM to evaluate and justify each score based on task description and details 20 | - Create an `ice_report.md` file similar to the complexity report 21 | - Sort tasks by total ICE score in descending order 22 | 23 | 3. CLI rendering: 24 | - Implement a sister command `show-ice-report` that displays the report in the terminal 25 | - Format the output with colorized scores and rankings 26 | - Include options to sort by individual components (impact, confidence, or ease) 27 | 28 | 4. Integration: 29 | - If a complexity report exists, reference it in the ICE report for additional context 30 | - Consider adding a combined view that shows both complexity and ICE scores 31 | 32 | The command should follow the same design patterns as `analyze-complexity` for consistency and code reuse. 33 | 34 | # Test Strategy: 35 | 1. Unit tests: 36 | - Test the ICE scoring algorithm with various mock task inputs 37 | - Verify correct filtering of tasks based on status 38 | - Test the sorting functionality with different ranking criteria 39 | 40 | 2. Integration tests: 41 | - Create a test project with diverse tasks and verify the generated ICE report 42 | - Test the integration with existing complexity reports 43 | - Verify that changes to task statuses correctly update the ICE analysis 44 | 45 | 3. CLI tests: 46 | - Verify the `analyze-ice` command generates the expected report file 47 | - Test the `show-ice-report` command renders correctly in the terminal 48 | - Test with various flag combinations and sorting options 49 | 50 | 4. Validation criteria: 51 | - The ICE scores should be reasonable and consistent 52 | - The report should clearly explain the rationale behind each score 53 | - The ranking should prioritize high-impact, high-confidence, easy-to-implement tasks 54 | - Performance should be acceptable even with a large number of tasks 55 | - The command should handle edge cases gracefully (empty projects, missing data) 56 | -------------------------------------------------------------------------------- /tasks/task_035.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 35 2 | # Title: Integrate Grok3 API for Research Capabilities 3 | # Status: cancelled 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity. 7 | # Details: 8 | This task involves migrating from Perplexity to Grok3 API for research capabilities throughout the application. Implementation steps include: 9 | 10 | 1. Create a new API client module for Grok3 in `src/api/grok3.ts` that handles authentication, request formatting, and response parsing 11 | 2. Update the research service layer to use the new Grok3 client instead of Perplexity 12 | 3. Modify the request payload structure to match Grok3's expected format (parameters like temperature, max_tokens, etc.) 13 | 4. Update response handling to properly parse and extract Grok3's response format 14 | 5. Implement proper error handling for Grok3-specific error codes and messages 15 | 6. Update environment variables and configuration files to include Grok3 API keys and endpoints 16 | 7. Ensure rate limiting and quota management are properly implemented according to Grok3's specifications 17 | 8. Update any UI components that display research provider information to show Grok3 instead of Perplexity 18 | 9. Maintain backward compatibility for any stored research results from Perplexity 19 | 10. Document the new API integration in the developer documentation 20 | 21 | Grok3 API has different parameter requirements and response formats compared to Perplexity, so careful attention must be paid to these differences during implementation. 22 | 23 | # Test Strategy: 24 | Testing should verify that the Grok3 API integration works correctly and maintains feature parity with the previous Perplexity implementation: 25 | 26 | 1. Unit tests: 27 | - Test the Grok3 API client with mocked responses 28 | - Verify proper error handling for various error scenarios (rate limits, authentication failures, etc.) 29 | - Test the transformation of application requests to Grok3-compatible format 30 | 31 | 2. Integration tests: 32 | - Perform actual API calls to Grok3 with test credentials 33 | - Verify that research results are correctly parsed and returned 34 | - Test with various types of research queries to ensure broad compatibility 35 | 36 | 3. End-to-end tests: 37 | - Test the complete research flow from UI input to displayed results 38 | - Verify that all existing research features work with the new API 39 | 40 | 4. Performance tests: 41 | - Compare response times between Perplexity and Grok3 42 | - Ensure the application handles any differences in response time appropriately 43 | 44 | 5. Regression tests: 45 | - Verify that existing features dependent on research capabilities continue to work 46 | - Test that stored research results from Perplexity are still accessible and displayed correctly 47 | 48 | Create a test environment with both APIs available to compare results and ensure quality before fully replacing Perplexity with Grok3. 49 | -------------------------------------------------------------------------------- /tasks/task_072.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 72 2 | # Title: Implement PDF Generation for Project Progress and Dependency Overview 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Develop a feature to generate a PDF report summarizing the current project progress and visualizing the dependency chain of tasks. 7 | # Details: 8 | This task involves creating a new CLI command named 'progress-pdf' within the existing project framework to generate a PDF document. The PDF should include: 1) A summary of project progress, detailing completed, in-progress, and pending tasks with their respective statuses and completion percentages if applicable. 2) A visual representation of the task dependency chain, leveraging the output format from the 'diagram' command (Task 70) to include Mermaid diagrams or similar visualizations converted to image format for PDF embedding. Use a suitable PDF generation library (e.g., jsPDF for JavaScript environments or ReportLab for Python) compatible with the project’s tech stack. Ensure the command accepts optional parameters to filter tasks by status or ID for customized reports. Handle large dependency chains by implementing pagination or zoomable image sections in the PDF. Provide error handling for cases where diagram generation or PDF creation fails, logging detailed error messages for debugging. Consider accessibility by ensuring text in the PDF is selectable and images have alt text descriptions. Integrate this feature with the existing CLI structure, ensuring it aligns with the project’s configuration settings (e.g., output directory for generated files). Document the command usage and parameters in the project’s help or README file. 9 | 10 | # Test Strategy: 11 | Verify the completion of this task through a multi-step testing approach: 1) Unit Tests: Create tests for the PDF generation logic to ensure data (task statuses and dependencies) is correctly fetched and formatted. Mock the PDF library to test edge cases like empty task lists or broken dependency links. 2) Integration Tests: Run the 'progress-pdf' command via CLI to confirm it generates a PDF file without errors under normal conditions, with filtered task IDs, and with various status filters. Validate that the output file exists in the specified directory and can be opened. 3) Content Validation: Manually or via automated script, check the generated PDF content to ensure it accurately reflects the current project state (compare task counts and statuses against a known project state) and includes dependency diagrams as images. 4) Error Handling Tests: Simulate failures in diagram generation or PDF creation (e.g., invalid output path, library errors) and verify that appropriate error messages are logged and the command exits gracefully. 5) Accessibility Checks: Use a PDF accessibility tool or manual inspection to confirm that text is selectable and images have alt text. Run these tests across different project sizes (small with few tasks, large with complex dependencies) to ensure scalability. Document test results and include a sample PDF output in the project repository for reference. 12 | -------------------------------------------------------------------------------- /tasks/task_044.txt: -------------------------------------------------------------------------------- 1 | # Task ID: 44 2 | # Title: Implement Task Automation with Webhooks and Event Triggers 3 | # Status: pending 4 | # Dependencies: None 5 | # Priority: medium 6 | # Description: Design and implement a system that allows users to automate task actions through webhooks and event triggers, enabling integration with external services and automated workflows. 7 | # Details: 8 | This feature will enable users to create automated workflows based on task events and external triggers. Implementation should include: 9 | 10 | 1. A webhook registration system that allows users to specify URLs to be called when specific task events occur (creation, status change, completion, etc.) 11 | 2. An event system that captures and processes all task-related events 12 | 3. A trigger definition interface where users can define conditions for automation (e.g., 'When task X is completed, create task Y') 13 | 4. Support for both incoming webhooks (external services triggering actions in Taskmaster) and outgoing webhooks (Taskmaster notifying external services) 14 | 5. A secure authentication mechanism for webhook calls 15 | 6. Rate limiting and retry logic for failed webhook deliveries 16 | 7. Integration with the existing task management system 17 | 8. Command-line interface for managing webhooks and triggers 18 | 9. Payload templating system allowing users to customize the data sent in webhooks 19 | 10. Logging system for webhook activities and failures 20 | 21 | The implementation should be compatible with both the solo/local mode and the multiplayer/remote mode, with appropriate adaptations for each context. When operating in MCP mode, the system should leverage the MCP communication protocol implemented in Task #42. 22 | 23 | # Test Strategy: 24 | Testing should verify both the functionality and security of the webhook system: 25 | 26 | 1. Unit tests: 27 | - Test webhook registration, modification, and deletion 28 | - Verify event capturing for all task operations 29 | - Test payload generation and templating 30 | - Validate authentication logic 31 | 32 | 2. Integration tests: 33 | - Set up a mock server to receive webhooks and verify payload contents 34 | - Test the complete flow from task event to webhook delivery 35 | - Verify rate limiting and retry behavior with intentionally failing endpoints 36 | - Test webhook triggers creating new tasks and modifying existing ones 37 | 38 | 3. Security tests: 39 | - Verify that authentication tokens are properly validated 40 | - Test for potential injection vulnerabilities in webhook payloads 41 | - Verify that sensitive information is not leaked in webhook payloads 42 | - Test rate limiting to prevent DoS attacks 43 | 44 | 4. Mode-specific tests: 45 | - Verify correct operation in both solo/local and multiplayer/remote modes 46 | - Test the interaction with MCP protocol when in multiplayer mode 47 | 48 | 5. Manual verification: 49 | - Set up integrations with common services (GitHub, Slack, etc.) to verify real-world functionality 50 | - Verify that the CLI interface for managing webhooks works as expected 51 | --------------------------------------------------------------------------------