├── .nvmrc ├── bin ├── tsconfig.json ├── docker-publish.sh └── test-tools.ts ├── .cursor └── mcp.json ├── .dockerignore ├── .gitignore ├── src ├── formats │ ├── json.ts │ ├── yml.ts │ └── md.ts ├── pkg.ts ├── types.ts ├── metadata.ts ├── cli.ts ├── logger.ts ├── index.ts ├── schemas.ts ├── server.ts ├── storage.ts ├── env.ts ├── sources.ts ├── util.ts └── tools.ts ├── Dockerfile ├── tsconfig.json ├── LICENSE ├── .eslintrc.json ├── tsup.config.ts ├── package.json └── README.md /.nvmrc: -------------------------------------------------------------------------------- 1 | 20.19 2 | -------------------------------------------------------------------------------- /bin/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "noEmit": true, 5 | "rootDir": ".." 6 | }, 7 | "include": ["../src/**/*.ts", "./**/*.ts", "../tsup.config.ts"] 8 | } -------------------------------------------------------------------------------- /.cursor/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "mcp-tasks-local": { 4 | "command": "node", 5 | "args": ["./dist/index.js"], 6 | "env": { 7 | "DEBUG": "true" 8 | } 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Include only what we need: package*.json and dist/index.js 2 | # Exclude everything else first, then include specific files 3 | 4 | # Exclude all 5 | * 6 | 7 | # Include only what we need 8 | !package.json 9 | !package-lock.json 10 | !dist/index.js -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .cursor/local 2 | .cursor/rules 3 | tmp/ 4 | .eslintcache 5 | bin/test.ts 6 | sources.json 7 | logs.json 8 | 9 | # Dependencies 10 | node_modules/ 11 | npm-debug.log* 12 | yarn-debug.log* 13 | yarn-error.log* 14 | 15 | # Build output 16 | dist/ 17 | 18 | # IDE 19 | .vscode/ 20 | 21 | # OS 22 | .DS_Store 23 | Thumbs.db 24 | 25 | # TypeScript 26 | *.tsbuildinfo 27 | 28 | # Logs 29 | logs 30 | *.log -------------------------------------------------------------------------------- /src/formats/json.ts: -------------------------------------------------------------------------------- 1 | import type { FormatParser } from '../types.js' 2 | import util from '../util.js' 3 | 4 | const json: FormatParser = { 5 | read(path) { 6 | const content = util.readFile(path, '{}').trim() 7 | return JSON.parse(content) 8 | }, 9 | 10 | write(path, state) { 11 | const content = JSON.stringify(state, null, '\t') 12 | util.writeFile(path, content) 13 | }, 14 | } 15 | 16 | export default json 17 | -------------------------------------------------------------------------------- /src/pkg.ts: -------------------------------------------------------------------------------- 1 | import type { PackageJson } from 'types-package-json' 2 | import util from './util.js' 3 | 4 | const pkgPath = util.resolve('package.json', util.REPO) 5 | const { default: pkg } = await import(pkgPath, { with: { type: 'json' } }) as { default: Partial } 6 | 7 | export default { 8 | ...pkg, 9 | version: pkg.version as `${number}.${number}.${number}`, 10 | author: pkg.homepage?.split('/')[3] || 'unknown', 11 | } 12 | -------------------------------------------------------------------------------- /src/formats/yml.ts: -------------------------------------------------------------------------------- 1 | import * as YAML from 'yaml' 2 | import type { FormatParser } from '../types.js' 3 | import util from '../util.js' 4 | 5 | const yml: FormatParser = { 6 | read(path) { 7 | const content = util.readFile(path, '').trim() 8 | return YAML.parse(content) 9 | }, 10 | 11 | write(path, state) { 12 | const content = YAML.stringify(state) 13 | util.writeFile(path, content) 14 | }, 15 | } 16 | 17 | export default yml 18 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20-alpine 2 | 3 | WORKDIR /app 4 | 5 | # Copy package files for production dependencies 6 | COPY package*.json ./ 7 | 8 | # Install ONLY production dependencies (for externalized deps) 9 | RUN npm ci --only=production && npm cache clean --force 10 | 11 | # Copy pre-built application 12 | COPY dist/ ./dist/ 13 | 14 | # Set environment variables 15 | ENV NODE_ENV=production 16 | 17 | # Run the server 18 | CMD ["node", "dist/index.js"] 19 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "NodeNext", 5 | "moduleResolution": "NodeNext", 6 | "strict": true, 7 | "esModuleInterop": true, 8 | "skipLibCheck": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "resolveJsonModule": true, 11 | "outDir": "./dist", 12 | "declaration": true, 13 | "declarationDir": "./dist", 14 | "sourceMap": false, 15 | "rootDir": "src", 16 | "useUnknownInCatchVariables": false, 17 | "noImplicitAny": true, 18 | "noImplicitReturns": true, 19 | "declarationMap": false 20 | }, 21 | "include": ["src/**/*.ts"], 22 | "exclude": ["node_modules", "dist"] 23 | } 24 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | import type { ZodSchema } from 'zod' 2 | import type z from 'zod' 3 | 4 | export interface SourceRaw { 5 | path: string 6 | workspace: string 7 | } 8 | 9 | export interface Source extends SourceRaw { 10 | id: string 11 | } 12 | 13 | export interface State { 14 | groups: Record 15 | } 16 | 17 | export interface FormatParser { 18 | read(path: string): State 19 | write(path: string, state: State): void 20 | } 21 | 22 | export interface Task { 23 | id: string 24 | text: string 25 | status: string 26 | index: number 27 | } 28 | 29 | export interface Metadata { 30 | source: Source 31 | state: State 32 | groups: Record 33 | tasks: Task[] 34 | tasksByIdOrText: Record 35 | statuses: string[] 36 | } 37 | 38 | export interface Tool { 39 | schema: S 40 | description: string 41 | isResource: boolean 42 | isReadOnly: boolean 43 | handler: (args: z.infer, context?: any) => any 44 | fromArgs: (args: string[]) => z.infer 45 | } 46 | -------------------------------------------------------------------------------- /src/metadata.ts: -------------------------------------------------------------------------------- 1 | import sources from './sources.js' 2 | import storage from './storage.js' 3 | import type { Metadata, Task } from './types.js' 4 | import util from './util.js' 5 | 6 | const metadata = { 7 | load(sourceId?: string): Metadata { 8 | const source = sources.require(sourceId) 9 | const state = storage.load(source.path) 10 | const statuses = util.keysOf(state.groups) 11 | 12 | const groups: Record = {} 13 | const tasks: Task[] = [] 14 | const tasksByIdOrText: Record = {} 15 | 16 | for (const status of statuses) { 17 | const taskTexts = state.groups[status] || [] 18 | groups[status] = taskTexts.map((text, index) => { 19 | const id = util.generateId(text) 20 | const task: Task = { id, text, status, index } 21 | tasks.push(task) 22 | tasksByIdOrText[id] = task 23 | tasksByIdOrText[text] = task 24 | return task 25 | }) 26 | } 27 | return { source, state, groups, tasks, tasksByIdOrText, statuses } 28 | }, 29 | } 30 | 31 | export default metadata 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Ariel Flesler 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /src/cli.ts: -------------------------------------------------------------------------------- 1 | import { ZodError } from 'zod' 2 | import tools from './tools.js' 3 | import type { Tool } from './types.js' 4 | 5 | type Tools = typeof tools 6 | type ToolName = keyof Tools 7 | 8 | const cli = { 9 | isCommand: (arg: string) => arg in tools, 10 | 11 | async run(args: string[]) { 12 | try { 13 | const cmd = args.shift() as ToolName 14 | const tool = tools[cmd] 15 | const res = await cli.runTool(tool, tool.fromArgs(args)) 16 | console.log(res) 17 | } catch (err) { 18 | if (err instanceof ZodError) { 19 | const issues = err.issues.map(issue => `${issue.path.join('.')}: ${issue.message}`).join(', ') 20 | console.error(`Error: ${issues}`) 21 | process.exit(1) 22 | } 23 | throw err 24 | } 25 | }, 26 | 27 | async runTool(tool: Tool, args: any): Promise { 28 | try { 29 | const validatedArgs = tool.schema.parse(args) 30 | const result = tool.handler(validatedArgs) 31 | return typeof result === 'string' ? result : JSON.stringify(result) 32 | } catch (err) { 33 | throw err 34 | } 35 | }, 36 | } 37 | 38 | export default cli 39 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "parser": "@typescript-eslint/parser", 3 | "parserOptions": { 4 | "project": ["./tsconfig.json", "./bin/tsconfig.json"], 5 | "ecmaVersion": 2022, 6 | "sourceType": "module", 7 | "warnOnUnsupportedTypeScriptVersion": false 8 | }, 9 | "plugins": [ 10 | "@typescript-eslint" 11 | ], 12 | "env": { 13 | "node": true, 14 | "es2022": true 15 | }, 16 | "settings": { 17 | "react": { 18 | "version": "999.999.999" 19 | } 20 | }, 21 | "rules": { 22 | "semi": ["error", "never"], 23 | "@typescript-eslint/semi": ["error", "never"], 24 | "quotes": ["error", "single"], 25 | "indent": ["error", 2], 26 | "no-trailing-spaces": "error", 27 | "eol-last": "error", 28 | "comma-dangle": ["error", "always-multiline"], 29 | "object-curly-spacing": ["error", "always"], 30 | "array-bracket-spacing": ["error", "never"], 31 | "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], 32 | "@typescript-eslint/consistent-type-imports": "error", 33 | "no-multiple-empty-lines":["error", { "max": 1, "maxEOF": 1, "maxBOF": 0 }], 34 | "no-console": "off" 35 | } 36 | } -------------------------------------------------------------------------------- /src/logger.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import env from './env.js' 3 | import pkg from './pkg.js' 4 | import util from './util.js' 5 | 6 | // Can't log to stdio as it disrupts the JSON-RPC protocol 7 | const LOG_FILE = util.resolve('./logs.json', util.REPO) 8 | 9 | function formatMessage(level: string, msg: string, data?: object): string { 10 | const logEntry = { 11 | timestamp: new Date().toISOString(), 12 | level, 13 | version: pkg.version, 14 | cwd: util.CWD, 15 | message: msg, 16 | ...data, 17 | } 18 | return JSON.stringify(logEntry) + '\n' 19 | } 20 | 21 | function writeLog(level: string, msg: string, data?: object): void { 22 | if (!env.DEBUG) { 23 | return 24 | } 25 | try { 26 | const formatted = formatMessage(level, msg, data) 27 | fs.appendFileSync(LOG_FILE, formatted, 'utf-8') 28 | } catch (err) { 29 | // Fallback to stderr if file writing fails 30 | console.error('Logger error:', err) 31 | } 32 | } 33 | 34 | const logger = { 35 | log: (msg: string, data?: object) => writeLog('LOG', msg, data), 36 | info: (msg: string, data?: object) => writeLog('INFO', msg, data), 37 | warn: (msg: string, data?: object) => writeLog('WARN', msg, data), 38 | error: (msg: string, data?: object) => writeLog('ERROR', msg, data), 39 | } 40 | 41 | export default logger 42 | -------------------------------------------------------------------------------- /tsup.config.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import { defineConfig } from 'tsup' 3 | 4 | export default defineConfig((options) => { 5 | // Conditional config: fast build (--no-dts) vs full build 6 | const dts = options.dts !== false 7 | const now = Date.now() 8 | return { 9 | entry: ['src/index.ts'], 10 | format: ['esm'], 11 | platform: 'node', 12 | target: 'node20', 13 | outDir: 'dist', 14 | dts, 15 | clean: dts, 16 | treeshake: dts, 17 | minify: dts, 18 | silent: !dts, 19 | skipNodeModulesBundle: true, 20 | esbuildOptions(options) { 21 | options.banner = { 22 | js: '#!/usr/bin/env node', 23 | } 24 | options.legalComments = 'none' 25 | options.drop = ['debugger'] 26 | }, 27 | async onSuccess() { 28 | // Fix deprecated import assertion syntax in built output 29 | const outputPath = 'dist/index.js' 30 | let content = fs.readFileSync(outputPath, 'utf-8') 31 | content = content.replace( 32 | /import\([^,]+,\s*\{\s*assert:\s*\{\s*type:\s*['"]json['"]\s*\}\s*\}\s*\)/g, 33 | (match) => match.replace('assert:', 'with:'), 34 | ) 35 | fs.writeFileSync(outputPath, content, 'utf-8') 36 | if (!dts) { 37 | console.log(`Build success in ${Date.now() - now}ms`) 38 | } 39 | }, 40 | } 41 | }) 42 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import cli from './cli.js' 2 | import pkg from './pkg.js' 3 | import server from './server.js' 4 | 5 | const args = process.argv.slice(2) 6 | if (args.length === 0) { 7 | await server.start() 8 | } else if (cli.isCommand(args[0])) { 9 | // Run CLI command 10 | await cli.run(args) 11 | } else if (args[0] === '--check') { 12 | process.exit(0) 13 | } else { 14 | const cmd = pkg.name 15 | console.log(`${pkg.author}/${cmd} ${pkg.version} 16 | ${pkg.description} 17 | 18 | Server Usage: 19 | ${cmd} # Run MCP server with stdio transport 20 | TRANSPORT=http ${cmd} # Run MCP server with HTTP transport 21 | 22 | CLI Usage: 23 | ${cmd} setup [workspace] # Setup a task file 24 | ${cmd} add [status] [index] # Add a task 25 | ${cmd} search [statuses] [terms] # Search tasks 26 | ${cmd} update # Update task status 27 | ${cmd} summary # Get task summary 28 | 29 | Examples: 30 | ${cmd} setup tasks.md /home/user/project 31 | ${cmd} add "Implement login" "To Do" 0 32 | ${cmd} search "To Do,Done" "auth,login" 33 | ${cmd} update m3Qw,p9Lx "Done" 34 | ${cmd} summary 35 | `) 36 | process.exit(0) 37 | } 38 | 39 | // Library exports - available for programmatic usage 40 | export { default as cli } from './cli.js' 41 | export { default as tools } from './tools.js' 42 | export type { Tool } from './types.js' 43 | 44 | -------------------------------------------------------------------------------- /bin/docker-publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | DRY_RUN=false 5 | if [[ "$1" == "--dry-run" ]]; then 6 | DRY_RUN=true 7 | fi 8 | 9 | # Execute command or just echo it based on dry-run flag 10 | run() { 11 | if [[ "$DRY_RUN" == "true" ]]; then 12 | echo " [DRY] $*" 13 | else 14 | "$@" 15 | fi 16 | } 17 | 18 | # Read package info from package.json 19 | PACKAGE_NAME=$(jq -r '.name' package.json) 20 | PACKAGE_VERSION=$(jq -r '.version' package.json) 21 | 22 | # Extract GitHub username from git remote URL 23 | GIT_URL=$(git config --get remote.origin.url) 24 | GITHUB_USER=$(echo "$GIT_URL" | sed -E 's|.*[:/]([^/]+)/.*|\1|') 25 | DOCKER_REPO="${GITHUB_USER}/${PACKAGE_NAME}" 26 | 27 | echo "📦 Publishing ${PACKAGE_NAME} v${PACKAGE_VERSION} to Docker Hub..." 28 | echo "🐙 GitHub user: ${GITHUB_USER}" 29 | 30 | # Build the Docker image 31 | echo "🔨 Building Docker image..." 32 | run docker build -t "${DOCKER_REPO}" . 33 | 34 | # Push latest tag 35 | echo "🚀 Pushing ${DOCKER_REPO}:latest..." 36 | run docker push "${DOCKER_REPO}" 37 | 38 | # Tag and push specific version 39 | echo "🏷️ Tagging and pushing ${DOCKER_REPO}:${PACKAGE_VERSION}..." 40 | run docker tag "${DOCKER_REPO}" "${DOCKER_REPO}:${PACKAGE_VERSION}" 41 | run docker push "${DOCKER_REPO}:${PACKAGE_VERSION}" 42 | 43 | echo "✅ Successfully published to Docker Hub!" 44 | echo " Latest: ${DOCKER_REPO}:latest" 45 | echo " Version: ${DOCKER_REPO}:${PACKAGE_VERSION}" 46 | 47 | if [[ "$DRY_RUN" == "true" ]]; then 48 | echo "" 49 | echo "💡 This was a dry run. Run without --dry-run to actually publish." 50 | fi -------------------------------------------------------------------------------- /src/schemas.ts: -------------------------------------------------------------------------------- 1 | import { z } from 'zod' 2 | import env from './env.js' 3 | import storage from './storage.js' 4 | import util from './util.js' 5 | 6 | export default { 7 | sourcePath: z.string().min(1, util.trimLines(` 8 | Path to a file (one of ${storage.supportedExtensions().join(', ')}). 9 | - It can be relative if you provide a workspace/project path 10 | - Otherwise it must be absolute! 11 | - Never invent or guess one! Ask the user for it 12 | `)), 13 | 14 | sourceId: z.string().min(1).optional().describe(util.trimLines(` 15 | Source ID from task_setup() response 16 | - Defaults to most recent in the workspace if not provided 17 | - Try to always provide it! 18 | - If you don't have it, ask the user for a file path and call task_setup() 19 | `)), 20 | 21 | status: z.enum(env.STATUSES as [string, ...string[]]).describe(util.trimLines(` 22 | You might need to infer it from the context: 23 | - "${env.STATUS_TODO}" for tasks coming up next (e.g. "Do X next") 24 | - "${env.STATUS_WIP}" for what you\'ll do now (e.g. "First do X") 25 | ${env.STATUS_REMINDERS ? `- "${env.STATUS_REMINDERS}" instructions for you (the AI) to be constantly reminded of` : ''} 26 | ${env.STATUS_NOTES ? `- "${env.STATUS_NOTES}" to collect non-actionable notes` : ''} 27 | `)), 28 | 29 | ids: z.array(z.string()).describe('The IDs of existing tasks'), 30 | 31 | index: z.number().int().min(0).optional().describe(util.trimLines(` 32 | 0-based index to place the tasks. e.g.: 33 | - 0 for "Do this next" 34 | - Omit to place at the end ("Do this later") 35 | `)), 36 | } 37 | -------------------------------------------------------------------------------- /src/server.ts: -------------------------------------------------------------------------------- 1 | import { FastMCP } from 'fastmcp' 2 | 3 | import cli from './cli.js' 4 | import env from './env.js' 5 | import logger from './logger.js' 6 | import pkg from './pkg.js' 7 | import tools from './tools.js' 8 | 9 | async function start() { 10 | const server = new FastMCP({ 11 | name: `${pkg.author}/${pkg.name}`, 12 | version: pkg.version, 13 | }) 14 | 15 | // Register all tools & resources 16 | for (const tool of Object.values(tools)) { 17 | if (!tool.isEnabled) { 18 | continue 19 | } 20 | if (tool.isResource) { 21 | // Register as resource 22 | server.addResource({ 23 | uri: `resource://${tool.name}`, 24 | name: tool.description, 25 | mimeType: 'text/plain', 26 | load: () => cli.runTool(tool, []).then(text => ({ text })), 27 | }) 28 | } else { 29 | // Register as tool with enhanced logging 30 | server.addTool({ 31 | annotations: { 32 | openWorldHint: false, // This tool doesn't interact with external systems 33 | readOnlyHint: tool.isReadOnly, 34 | title: tool.name, 35 | }, 36 | name: tool.name, 37 | description: tool.description, 38 | parameters: tool.schema, 39 | execute: (args) => cli.runTool(tool, args), 40 | }) 41 | } 42 | } 43 | 44 | if (env.TRANSPORT === 'http') { 45 | await server.start({ 46 | transportType: 'httpStream', 47 | httpStream: { 48 | port: env.PORT, 49 | }, 50 | }) 51 | } else { 52 | await server.start({ 53 | transportType: 'stdio', 54 | }) 55 | 56 | logger.log('Started new server', { transport: env.TRANSPORT }) 57 | } 58 | } 59 | 60 | export default { start } 61 | -------------------------------------------------------------------------------- /src/storage.ts: -------------------------------------------------------------------------------- 1 | import env from './env.js' 2 | import json from './formats/json.js' 3 | import md from './formats/md.js' 4 | import yml from './formats/yml.js' 5 | import type { FormatParser, State } from './types.js' 6 | import util from './util.js' 7 | 8 | const PARSERS: Record = { 9 | md, 10 | json, 11 | yml, 12 | } 13 | 14 | const storage = { 15 | load(path: string): State { 16 | const empty = storage.emptyState() 17 | if (!util.exists(path)) { 18 | return storage.save(path, empty) 19 | } 20 | try { 21 | const state = storage.getParser(path).read(path) 22 | if (!env.KEEP_DELETED) { 23 | // In case it was switched off after 24 | delete state.groups[env.STATUS_DELETED] 25 | } 26 | return { ...empty, groups: { ...empty.groups, ...state.groups } } 27 | } catch { 28 | return empty 29 | } 30 | }, 31 | 32 | save(path: string, state: State): State { 33 | storage.getParser(path).write(path, state) 34 | return state 35 | }, 36 | 37 | /** Get the appropriate parser based on file extension */ 38 | getParser(path: string): FormatParser { 39 | const extension = util.ext(path) 40 | const parser = PARSERS[extension] 41 | if (!parser) { 42 | const exts = storage.supportedExtensions().join(', ') 43 | throw new Error(`Unsupported file extension: ${extension}. Use one of: ${exts}`) 44 | } 45 | return parser 46 | }, 47 | 48 | supportedExtensions: () => Object.keys(PARSERS), 49 | 50 | emptyState(): State { 51 | const groups: Record = {} 52 | for (const status of env.STATUSES) { 53 | groups[status] = [] 54 | } 55 | return { groups } 56 | }, 57 | } 58 | 59 | export default storage 60 | -------------------------------------------------------------------------------- /src/env.ts: -------------------------------------------------------------------------------- 1 | import pkg from './pkg.js' 2 | 3 | const env = { 4 | PORT: readNumber('PORT', 4680), 5 | PREFIX_TOOLS: readBoolean('PREFIX_TOOLS', true), 6 | TRANSPORT: readString('TRANSPORT', 'stdio'), 7 | STATUS_WIP: readString('STATUS_WIP', 'In Progress'), 8 | STATUS_TODO: readString('STATUS_TODO', 'To Do'), 9 | STATUS_DONE: readString('STATUS_DONE', 'Done'), 10 | STATUS_REMINDERS: readString('STATUS_REMINDERS', 'Reminders'), 11 | STATUS_NOTES: readString('STATUS_NOTES', 'Notes'), 12 | // Not configurable 13 | STATUS_DELETED: 'Deleted', 14 | STATUSES: readStrings('STATUSES', 'Backlog'), 15 | AUTO_WIP: readBoolean('AUTO_WIP', true), 16 | INSTRUCTIONS: readString('INSTRUCTIONS', `Use ${pkg.name} tools when the user mentions new or updated tasks`), 17 | KEEP_DELETED: readBoolean('KEEP_DELETED', true), 18 | DEBUG: readBoolean('DEBUG', false), 19 | SOURCES_PATH: readString('SOURCES_PATH', './sources.json'), 20 | } 21 | 22 | const { STATUSES } = env 23 | // Augment if not explicitly set 24 | if (!STATUSES.includes(env.STATUS_TODO)) { 25 | STATUSES.unshift(env.STATUS_TODO) 26 | } 27 | if (!STATUSES.includes(env.STATUS_WIP)) { 28 | STATUSES.unshift(env.STATUS_WIP) 29 | } 30 | if (!STATUSES.includes(env.STATUS_DONE)) { 31 | STATUSES.push(env.STATUS_DONE) 32 | } 33 | if (env.STATUS_REMINDERS && !STATUSES.includes(env.STATUS_REMINDERS)) { 34 | STATUSES.push(env.STATUS_REMINDERS) 35 | } 36 | if (env.STATUS_NOTES && !STATUSES.includes(env.STATUS_NOTES)) { 37 | STATUSES.push(env.STATUS_NOTES) 38 | } 39 | if (env.KEEP_DELETED && !STATUSES.includes(env.STATUS_DELETED)) { 40 | STATUSES.push(env.STATUS_DELETED) 41 | } 42 | 43 | function readString(key: string, def: any): string { 44 | return process.env[key] ?? String(def) 45 | } 46 | 47 | function readNumber(key: string, def: number): number { 48 | return Number.parseFloat(readString(key, def)) 49 | } 50 | 51 | function readBoolean(key: string, def: boolean): boolean { 52 | return readString(key, def) === 'true' 53 | } 54 | 55 | function readStrings(key: string, def: string): string[] { 56 | return readString(key, def).split(/\s*,\s*/).filter(Boolean) 57 | } 58 | 59 | export default env 60 | -------------------------------------------------------------------------------- /src/sources.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash' 2 | import { isAbsolute } from 'path' 3 | import env from './env.js' 4 | import type { Source, SourceRaw } from './types.js' 5 | import util from './util.js' 6 | 7 | const SOURCES_PATH = util.resolve(env.SOURCES_PATH, util.REPO) 8 | // If it's equal to HOME, it's not a valid workspace 9 | const CWD = util.CWD === process.env.HOME ? '' : util.CWD 10 | 11 | const sources = { 12 | raw(): SourceRaw[] { 13 | try { 14 | const content = util.readFile(SOURCES_PATH, '[]') 15 | const data: SourceRaw[] = JSON.parse(content) 16 | // Filter out non-objects (legacy format) 17 | return data.filter(_.isObject) 18 | } catch { 19 | return [] 20 | } 21 | }, 22 | 23 | load(): Source[] { 24 | return sources.raw().map(sources.fromRaw) 25 | }, 26 | 27 | register(sourcePath: string, workspace = CWD): Source { 28 | let path = sourcePath 29 | if (!isAbsolute(path)) { 30 | if (!workspace) { 31 | throw new Error('You must specify a workspace directory when registering a relative path.') 32 | } 33 | path = util.resolve(path, workspace) 34 | } 35 | const list = sources.raw() 36 | // Remove if exists and add to front (LIFO) 37 | const filtered = list.filter(s => s.path !== path) 38 | const source: SourceRaw = { path, workspace } 39 | util.writeFile(SOURCES_PATH, JSON.stringify([source, ...filtered])) 40 | return sources.fromRaw(source) 41 | }, 42 | 43 | require(id?: string, workspace = CWD): Source { 44 | const list = sources.load() 45 | const msg = 'You must request a file path from the user, make it absolute and call tasks_setup.' 46 | if (id) { 47 | const src = list.find(src => src.id === id) 48 | if (!src) { 49 | throw new Error(`Source "${id}" not found. ${msg}`) 50 | } 51 | return src 52 | } 53 | // Default to the workspace's most recent 54 | const src = list.find(s => s.workspace === workspace) || list[0] 55 | if (!src) { 56 | throw new Error(msg) 57 | } 58 | return src 59 | }, 60 | 61 | fromRaw(raw: SourceRaw): Source { 62 | return { ...raw, id: util.generateId(raw.path) } 63 | }, 64 | } 65 | 66 | export default sources 67 | -------------------------------------------------------------------------------- /src/formats/md.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash' 2 | import { basename } from 'path' 3 | import env from '../env.js' 4 | import type { FormatParser, State } from '../types.js' 5 | import util from '../util.js' 6 | 7 | const PREFIX = '## ' 8 | const LINE_REGEX: RegExp = /^ *- *(?:\[.?\])? *(.+) *$/ 9 | 10 | // TODO: Make this configurable (?) 11 | const SKIP_IF_EMPTY: string[] = _.compact([env.STATUS_DELETED, env.STATUS_NOTES, env.STATUS_REMINDERS]) 12 | 13 | const md: FormatParser = { 14 | read(path) { 15 | const content = util.readFile(path) 16 | const lines = _.compact(content.split('\n').map(line => line.trim())) 17 | const state: State = { groups: {} } 18 | 19 | let currentGroup = env.STATUS_TODO 20 | for (const line of lines) { 21 | if (line.startsWith(PREFIX)) { 22 | const group = line.substring(PREFIX.length).trim() 23 | if (group) { 24 | currentGroup = group 25 | } 26 | } else { 27 | const text = line.match(LINE_REGEX)?.[1]?.trim() 28 | if (text) { 29 | if (!state.groups[currentGroup]) { 30 | state.groups[currentGroup] = [] 31 | } 32 | const unescaped = text.replace(/\\n/g, '\n') 33 | state.groups[currentGroup].push(unescaped) 34 | } 35 | } 36 | } 37 | return state 38 | }, 39 | 40 | write(path, state) { 41 | const title = _.startCase(basename(path, '.md')) 42 | let content = `# Tasks - ${title}\n\n` 43 | 44 | for (const group of util.keysOf(state.groups)) { 45 | const tasks = state.groups[group] || [] 46 | if (!tasks.length && (SKIP_IF_EMPTY.includes(group) || !env.STATUSES.includes(group))) { 47 | continue 48 | } 49 | content += `${PREFIX}${group}\n\n` 50 | for (const task of tasks) { 51 | const char = group === env.STATUS_DONE ? 'x' : 52 | group === env.STATUS_NOTES || group === env.STATUS_REMINDERS ? '' : ' ' 53 | const block = char ? `[${char}] ` : '' 54 | const escaped = task.replace(/\r?\n/g, '\\n') 55 | content += `- ${block}${escaped}\n` 56 | } 57 | content += '\n' 58 | } 59 | util.writeFile(path, `${content.trim()}\n`) 60 | }, 61 | } 62 | 63 | export default md 64 | -------------------------------------------------------------------------------- /src/util.ts: -------------------------------------------------------------------------------- 1 | import crypto from 'crypto' 2 | import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs' 3 | import _ from 'lodash' 4 | import { dirname, isAbsolute, resolve } from 'path' 5 | import { fileURLToPath } from 'url' 6 | 7 | /** Object.keys() with more accurate types */ 8 | export type KeysOf = Array 9 | 10 | const ID_LENGTH = 4 11 | // From Cursor 12 | const CWD = process.env.WORKSPACE_FOLDER_PATHS || process.cwd() 13 | 14 | const util = { 15 | CWD, 16 | // Relative to the project root 17 | REPO: resolve(dirname(fileURLToPath(import.meta.url)), '..'), 18 | 19 | /** Resolve a path relative to the project root (avoids __dirname recreation everywhere) */ 20 | resolve(path: string, dir = CWD): string { 21 | if (isAbsolute(path)) { 22 | return path 23 | } 24 | return resolve(dir, path) 25 | }, 26 | 27 | readFile(path: string, def?: string): string { 28 | if (!util.exists(path)) { 29 | return def || '' 30 | } 31 | return readFileSync(path, 'utf-8') 32 | }, 33 | 34 | writeFile(path: string, content: string): void { 35 | util.mkdirp(dirname(path)) 36 | writeFileSync(path, content, 'utf-8') 37 | }, 38 | 39 | mkdirp(path: string): void { 40 | if (!util.exists(path)) { 41 | mkdirSync(path, { recursive: true }) 42 | } 43 | }, 44 | 45 | ext(path: string): string { 46 | const match = path.match(/\.(\w{2,5})$/) 47 | return match ? match[1] : '' 48 | }, 49 | 50 | isFile(path: string): boolean { 51 | return !!util.ext(path) 52 | }, 53 | 54 | exists(path: string): boolean { 55 | return existsSync(path) 56 | }, 57 | 58 | generateId(text: string): string { 59 | const hash = crypto.createHash('md5').update(text).digest('base64url') 60 | return hash.replace(/\W+/, '').slice(-ID_LENGTH) 61 | }, 62 | 63 | isId(id: string): boolean { 64 | return id.length === ID_LENGTH && /^\w+$/.test(id) 65 | }, 66 | 67 | /** Object.keys() with more accurate types */ 68 | keysOf(obj: T): KeysOf { 69 | return Object.keys(obj) as KeysOf 70 | }, 71 | 72 | /** Checks if a search is included in a string, case insensitive */ 73 | fuzzySearch(str: string, search: string): boolean { 74 | return util.canonical(str).includes(util.canonical(search)) 75 | }, 76 | 77 | canonical: _.memoize((str: string): string => { 78 | return str.toLowerCase().replace(/\W+/g, ' ').trim() 79 | }), 80 | 81 | trimLines(str: string): string { 82 | return str.replace(/^ +\n?/gm, '').trim() 83 | }, 84 | 85 | clamp(value: number, min: number, max: number): number { 86 | return Math.max(min, Math.min(value, max)) 87 | }, 88 | } 89 | 90 | export default util 91 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-tasks", 3 | "version": "1.6.1", 4 | "type": "module", 5 | "description": "An efficient task manager. Designed to minimize tool confusion and maximize LLM budget efficiency while providing powerful search, filtering, and organization capabilities across multiple file formats (Markdown, JSON, YAML)", 6 | "main": "dist/index.js", 7 | "types": "dist/index.d.ts", 8 | "bin": { 9 | "mcp-tasks": "dist/index.js" 10 | }, 11 | "files": [ 12 | "dist/**/*", 13 | "README.md", 14 | "LICENSE" 15 | ], 16 | "scripts": { 17 | "build": "npm run build:code", 18 | "build:code": "tsup --no-dts", 19 | "build:all": "tsup", 20 | "build:check": "tsc --noEmit", 21 | "build:watch": "npm run build:code -- --watch", 22 | "build:clean": "rimraf dist/*", 23 | "start": "dist/index.js", 24 | "start:check": "dist/index.js --check", 25 | "start:http": "TRANSPORT=http dist/index.js", 26 | "dev": "npm run build:watch -- --onSuccess \"dist/index.js\"", 27 | "dev:bg": "rimraf dev.log && npm run dev > dev.log 2>&1", 28 | "cli": "npm run build && npm start --", 29 | "inspect": "npx fastmcp inspect src/index.ts", 30 | "ts": "tsx", 31 | "eslint": "eslint --cache", 32 | "lint": "npm run eslint -- 'src/**/*.ts'", 33 | "lint:fix": "npm run lint -- --fix", 34 | "lint:full": "npm run build:check && npm run lint", 35 | "test": "npm run ts -- bin/test-tools.ts", 36 | "test:full": "npm run build:clean && npm run lint:full && SILENT=true npm test && npm run build:all && npm run start:check", 37 | "prepack": "npm run test:full", 38 | "publish:dry": "npm pack --dry-run", 39 | "publish:npm": "npm publish", 40 | "publish:docker": "bin/docker-publish.sh", 41 | "publish:all": "npm run publish:npm && npm run publish:docker && npm run publish:test", 42 | "publish:test": "npx mcp-tasks --help", 43 | "version:patch": "npm version patch", 44 | "version:minor": "npm version minor", 45 | "version:major": "npm version major" 46 | }, 47 | "keywords": [ 48 | "mcp", 49 | "task", 50 | "management", 51 | "llm", 52 | "model-context-protocol", 53 | "ai", 54 | "claude", 55 | "cursor", 56 | "todo", 57 | "productivity" 58 | ], 59 | "author": "Ariel Flesler ", 60 | "license": "MIT", 61 | "repository": { 62 | "type": "git", 63 | "url": "git+https://github.com/flesler/mcp-tasks.git" 64 | }, 65 | "homepage": "https://github.com/flesler/mcp-tasks#readme", 66 | "bugs": { 67 | "url": "https://github.com/flesler/mcp-tasks/issues" 68 | }, 69 | "engines": { 70 | "node": ">=20.0.0" 71 | }, 72 | "dependencies": { 73 | "fastmcp": "^3.9.0", 74 | "lodash": "^4.17.21", 75 | "yaml": "^2.8.0", 76 | "zod": "^3.22.0" 77 | }, 78 | "devDependencies": { 79 | "@types/lodash": "^4.14.0", 80 | "@types/node": "^20.0.0", 81 | "@typescript-eslint/eslint-plugin": "^7.0.0", 82 | "@typescript-eslint/parser": "^7.0.0", 83 | "eslint": "^8.57.0", 84 | "rimraf": "^6.0.1", 85 | "tsup": "^8.5.0", 86 | "tsx": "^4.20.3", 87 | "types-package-json": "^2.0.39", 88 | "typescript": "^5.8.3" 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/tools.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash' 2 | import type { ZodSchema } from 'zod' 3 | import { z } from 'zod' 4 | import env from './env.js' 5 | import metadata from './metadata.js' 6 | import pkg from './pkg.js' 7 | import schemas from './schemas.js' 8 | import sources from './sources.js' 9 | import storage from './storage.js' 10 | import type { Tool } from './types.js' 11 | import util from './util.js' 12 | 13 | const tools = { 14 | setup: defineTool('setup', { 15 | schema: z.object({ 16 | workspace: z.string().optional().describe('Workspace/project directory path (provided by the IDE or use $PWD)'), 17 | source_path: schemas.sourcePath, 18 | }), 19 | fromArgs: ([sourcePath, workspace]) => ({ source_path: sourcePath, workspace: workspace || undefined }), 20 | description: util.trimLines(` 21 | Initializes an source file from a path 22 | - Always call once per conversation when asked to use these tools 23 | - Ask the user to clarify the file path if not given, before calling this tool 24 | - Creates the file if it does not exist 25 | - Returns the source ID for further use 26 | ${env.INSTRUCTIONS ? `- ${env.INSTRUCTIONS}` : ''} 27 | `), 28 | handler: (args) => { 29 | storage.getParser(args.source_path) 30 | // Register the source and get ID 31 | const source = sources.register(args.source_path, args.workspace) 32 | return getSummary(source.id) 33 | }, 34 | }), 35 | 36 | search: defineTool('search', { 37 | schema: z.object({ 38 | source_id: schemas.sourceId, 39 | statuses: z.array(schemas.status).optional().describe('Specific statuses to get. Gets all if omitted'), 40 | terms: z.array(z.string()).optional().describe('Search terms to filter tasks by text or status (case-insensitive, OR logic, no regex or wildcards)'), 41 | ids: schemas.ids.optional().describe('Optional list of task IDs to search for'), 42 | limit: z.number().int().min(1).optional().describe('Maximum number of results (only for really large task lists)'), 43 | }), 44 | fromArgs: ([statuses = '', terms = '']) => ({ statuses: split(statuses), terms: split(terms) }), 45 | description: 'Search tasks from specific statuses with optional text & ID filtering', 46 | isReadOnly: true, 47 | handler: (args) => { 48 | const meta = metadata.load(args.source_id) 49 | const groups = args.statuses?.length ? args.statuses : meta.statuses 50 | let results = groups.flatMap(status => meta.groups[status] || []) 51 | 52 | if (args.ids) { 53 | results = results.filter(task => args.ids!.includes(task.id)) 54 | } 55 | 56 | if (args.terms?.length) { 57 | results = results.filter(task => args.terms!.some(term => 58 | util.fuzzySearch(`${task.text} ${task.status}`, term), 59 | )) 60 | } 61 | if (args.limit) { 62 | results = results.slice(0, args.limit) 63 | } 64 | return results 65 | }, 66 | }), 67 | 68 | add: defineTool('add', { 69 | schema: z.object({ 70 | source_id: schemas.sourceId, 71 | texts: z.array(z.string().min(1)).describe('Each text becomes a task'), 72 | status: schemas.status, 73 | index: schemas.index, 74 | }), 75 | fromArgs: ([text, status = env.STATUS_TODO, index]) => ({ texts: [text], status, index: index ? Number(index) : undefined }), 76 | description: 'Add new tasks with a specific status. It\'s faster and cheaper if you use this in batch. User can add atomically while AI works using the CLI add tool', 77 | handler: (args, context) => { 78 | let meta = metadata.load(args.source_id) 79 | const { source, state } = meta 80 | const { texts, status } = args 81 | // Remove existing tasks with same text from all groups (duplicate handling) 82 | for (const groupName of meta.statuses) { 83 | if (state.groups[groupName]) { 84 | state.groups[groupName] = state.groups[groupName].filter(text => !texts.includes(text)) 85 | } 86 | } 87 | let group = state.groups[status] 88 | // Special handling for Deleted and other unknown statuses 89 | if (!group) { 90 | storage.save(source.path, state) 91 | return getSummary(source.id) 92 | } 93 | const wip = state.groups[env.STATUS_WIP] 94 | const todos = state.groups[env.STATUS_TODO] 95 | if (env.AUTO_WIP && args.status === env.STATUS_WIP) { 96 | // Move all WIP but the first to ToDo 97 | todos.unshift(...wip) 98 | wip.length = 0 99 | } 100 | 101 | // Add new tasks at the specified index 102 | const index = util.clamp(args.index ?? group.length, 0, group.length) 103 | group.splice(index, 0, ...texts) 104 | const isUpdate = !!context?.update 105 | if (env.AUTO_WIP && !wip.length && todos[0] && (todos[0] !== texts[0] || isUpdate)) { 106 | // Move first ToDo to WIP (but not for updates) 107 | wip.push(todos.shift()!) 108 | } 109 | storage.save(source.path, state) 110 | // Re-load metadata after state changes 111 | meta = metadata.load(source.id) 112 | const affected = _.compact(texts.map(t => meta.tasksByIdOrText[t])) 113 | return getSummary(source.id, { [isUpdate ? 'updated' : 'added']: affected }) 114 | }, 115 | }), 116 | 117 | update: defineTool('update', { 118 | schema: z.object({ 119 | source_id: schemas.sourceId, 120 | ids: schemas.ids, 121 | status: z.union([schemas.status, z.literal(env.STATUS_DELETED)]).describe(util.trimLines(` 122 | ${schemas.status.description} 123 | - "${env.STATUS_DELETED}" when they want these removed 124 | ${env.AUTO_WIP ? `- Updating tasks to ${env.STATUS_WIP} moves others to ${env.STATUS_TODO}, finishing a ${env.STATUS_WIP} task moves the first ${env.STATUS_DONE} to ${env.STATUS_WIP}` : ''} 125 | `)), 126 | index: schemas.index, 127 | }), 128 | fromArgs: ([taskIds, status]) => ({ ids: split(taskIds) || [], status }), 129 | description: 'Update tasks in bulk by ID to a different status. Returns complete summary no need to call tasks_summary afterwards. Prevents AI accidentally rename or deleting tasks during mass updates, not even possible', 130 | handler: (args, context = {}) => { 131 | const meta = metadata.load(args.source_id) 132 | const texts = args.ids.map((id) => { 133 | const task = meta.tasksByIdOrText[id] 134 | if (task) { 135 | return task.text 136 | } 137 | if (util.isId(id)) { 138 | throw new Error(`Task ID ${id} not found`) 139 | } 140 | // Assume the AI passed a text for a new task by mistake 141 | return id 142 | }) 143 | // Use add internally also for DELETED 144 | return tools.add.handler({ 145 | source_id: args.source_id, 146 | status: args.status, 147 | index: args.index, 148 | texts, 149 | }, { ...context, update: true }) 150 | }, 151 | }), 152 | 153 | summary: defineTool('summary', { 154 | schema: z.object({ 155 | source_id: schemas.sourceId, 156 | }), 157 | fromArgs: () => ({}), 158 | description: 'Get per-status task counts and the WIP task(s). Redundant right after tasks_add/tasks_update', 159 | isReadOnly: true, 160 | handler: (args) => { 161 | return getSummary(args.source_id) 162 | }, 163 | }), 164 | 165 | debug: defineTool('debug', { 166 | schema: z.object({}), 167 | fromArgs: () => ({}), 168 | description: util.trimLines(` 169 | Get debug information about the MCP server and context 170 | - ${pkg.name} is at version ${pkg.version} 171 | `), 172 | isReadOnly: true, 173 | isEnabled: env.DEBUG, 174 | handler: (args, context) => { 175 | return { 176 | ...args, processEnv: process.env, argv: process.argv, 177 | env, context, version: pkg.version, CWD: util.CWD, ROOT: util.REPO, 178 | } 179 | }, 180 | }), 181 | } as const satisfies Record 182 | 183 | function getSummary(sourceId?: string, extra?: object) { 184 | const meta = metadata.load(sourceId) 185 | const counts = _.mapValues(meta.groups, tasks => tasks.length) 186 | const total = Object.values(counts).reduce((sum, count) => sum + count, 0) 187 | const wip = _.camelCase(env.STATUS_WIP) 188 | return JSON.stringify({ 189 | source: _.omit(meta.source, ['workspace']), 190 | ...counts, total, ...extra, 191 | instructions: env.INSTRUCTIONS || undefined, 192 | reminders: env.STATUS_REMINDERS ? meta.groups[env.STATUS_REMINDERS] : undefined, 193 | [wip]: meta.groups[env.STATUS_WIP], 194 | }) 195 | } 196 | 197 | function defineTool(name: string, tool: { 198 | schema: S 199 | description: string 200 | isResource?: boolean 201 | isReadOnly?: boolean 202 | isEnabled?: boolean 203 | handler: (args: z.infer, context?: any) => any 204 | fromArgs: (args: string[]) => z.infer 205 | }) { 206 | const toolName = env.PREFIX_TOOLS ? `tasks_${name}` : name 207 | return { 208 | ...tool, 209 | name: toolName, 210 | isResource: tool.isResource ?? false, 211 | isReadOnly: tool.isReadOnly ?? false, 212 | isEnabled: tool.isEnabled ?? true, 213 | } 214 | } 215 | 216 | function split(str: string): string[] | undefined { 217 | return str.length > 0 ? str.split(/\s*,\s*/).filter(Boolean) : undefined 218 | } 219 | 220 | export default tools 221 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Tasks 📋 2 | 3 | [![Install MCP Server](https://cursor.com/deeplink/mcp-install-dark.svg)](https://cursor.com/install-mcp?name=mcp-tasks&config=JTdCJTIyY29tbWFuZCUyMiUzQSUyMm5weCUyMC15JTIwbWNwLXRhc2tzJTIyJTdE) 4 | [![npm version](https://img.shields.io/npm/v/mcp-tasks.svg)](https://www.npmjs.com/package/mcp-tasks) 5 | [![Node.js](https://img.shields.io/node/v/mcp-tasks.svg)](https://nodejs.org/) 6 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) 7 | [![Docker](https://img.shields.io/docker/v/flesler/mcp-tasks?label=docker)](https://hub.docker.com/r/flesler/mcp-tasks) 8 | 9 | An efficient task manager. Designed to minimize tool confusion and maximize LLM budget efficiency while providing powerful search, filtering, and organization capabilities across multiple file formats (Markdown, JSON, YAML) 10 | 11 | ## 📚 **Table of Contents** 12 | 13 | - [✨ Features](#-features) 14 | - [🚀 Quick Start](#-quick-start) 15 | - [🤖 AI Integration Tips](#-ai-integration-tips) 16 | - [🔧 Installation Examples](#-installation-examples) 17 | - [📁 Supported File Formats](#-supported-file-formats) 18 | - [🛠️ Available Tools](#️-available-tools) 19 | - [🎛️ Environment Variables](#️-environment-variables) 20 | - [📊 File Formats](#-file-formats) 21 | - [🖥️ Server Usage](#️-server-usage) 22 | - [💻 CLI Usage](#-cli-usage) 23 | - [🧪 Development](#-development) 24 | - [🛠️ Troubleshooting](#️-troubleshooting) 25 | - [Why not let AI edit files directly?](#why-not-just-have-ai-edit-the-task-files-directly) 26 | - [🤝 Contributing](#-contributing) 27 | - [📄 License](#-license) 28 | - [🔗 Links](#-links) 29 | 30 | ## ✨ **Features** 31 | 32 | - ⚡ **Ultra-efficient design**: Minimal tool count (5 tools) to reduce AI confusion 33 | - 🎯 **Budget-optimized**: Batch operations, smart defaults and auto-operations minimize LLM API calls 34 | - 🚀 **Multi-format support**: Markdown (`.md`), JSON (`.json`), and YAML (`.yml`) task files 35 | - 🔍 **Powerful search**: Case-insensitive text/status filtering with OR logic, and ID-based lookup 36 | - 📊 **Smart organization**: Status-based filtering with customizable workflow states 37 | - 🎯 **Position-based indexing**: Easy task ordering with 0-based insertion 38 | - 📁 **Multi-source support**: Manage multiple task files simultaneously 39 | - 🔄 **Real-time updates**: Changes persist automatically to your chosen format 40 | - 🤖 **Auto WIP management**: Automatically manages work-in-progress task limits 41 | - 🚫 **Duplicate prevention**: Automatically prevents duplicate tasks 42 | - 🛡️ **Type-safe**: Full TypeScript support with Zod validation 43 | - 🔒 **Ultra-safe**: AI has no way to rewrite or delete your tasks (unless you enable it), only add and move them 44 | - 📅 **Optional reminders**: Enable a dedicated Reminders section the AI constantly sees and can maintain 45 | 46 | ## 🚀 **Quick Start** 47 | 48 | Add this to `~/.cursor/mcp.json` for Cursor, `~/.config/claude_desktop_config.json` for Claude Desktop. 49 | 50 | ### Option 1: NPX (Recommended) 51 | ```json 52 | { 53 | "mcpServers": { 54 | "mcp-tasks": { 55 | "command": "npx", 56 | "args": ["-y", "mcp-tasks"] 57 | } 58 | } 59 | } 60 | ``` 61 | 62 | ### Option 2: Docker 63 | ```json 64 | { 65 | "mcpServers": { 66 | "mcp-tasks": { 67 | "command": "docker", 68 | "args": [ 69 | "run", 70 | "--rm", 71 | "-i", 72 | "flesler/mcp-tasks" 73 | ] 74 | } 75 | } 76 | } 77 | ``` 78 | 79 | ## 🤖 **AI Integration Tips** 80 | 81 | To encourage the AI to use these tools, you can start with a prompt like the following, with any path you want with .md (recommended), .json, .yml: 82 | 83 | ``` 84 | Use mcp-tasks tools to track our work in path/to/tasks.md 85 | ``` 86 | 87 | If you are telling it about new or updated tasks, you can append this to the end of your prompt: 88 | 89 | ``` 90 | use mcp-tasks 91 | ``` 92 | 93 | **Adding tasks while AI works:** To safely add tasks without interfering with AI operations, [use the CLI](#-cli-usage) from a separate terminal: 94 | 95 | ```bash 96 | npx mcp-tasks add "Your new task text" "To Do" 0 97 | ``` 98 | 99 | 100 | ## 🔧 **Installation Examples** 101 | 102 | **Full configuration with custom environment:** 103 | ```json 104 | { 105 | "mcpServers": { 106 | "mcp-tasks": { 107 | "command": "npx", 108 | "args": ["-y", "mcp-tasks"], 109 | "env": { 110 | "STATUS_WIP": "In Progress", 111 | "STATUS_TODO": "To Do", 112 | "STATUS_DONE": "Done", 113 | "STATUS_REMINDERS": "Reminders", 114 | "STATUS_NOTES": "Notes", 115 | "STATUSES": "In Progress,To Do,Done,Backlog,Reminders,Notes", 116 | "AUTO_WIP": "true", 117 | "PREFIX_TOOLS": "true", 118 | "KEEP_DELETED": "true", 119 | "TRANSPORT": "stdio", 120 | "PORT": "4680", 121 | "INSTRUCTIONS": "Use mcp-tasks tools when the user mentions new or updated tasks" 122 | } 123 | } 124 | } 125 | } 126 | ``` 127 | 128 | **HTTP transport for remote access:** 129 | 130 | First run the server: 131 | ```bash 132 | TRANSPORT=http PORT=4680 npx mcp-tasks 133 | ``` 134 | Then: 135 | ```json 136 | { 137 | "mcpServers": { 138 | "mcp-tasks": { 139 | "type": "streamableHttp", 140 | "url": "http://localhost:4680/mcp" 141 | } 142 | } 143 | } 144 | ``` 145 | 146 | ## 📁 **Supported File Formats** 147 | 148 | | Extension | Format | Best For | Auto-Created | 149 | |-----------|--------|----------|--------------| 150 | | `.md` | Markdown | Human-readable task lists | ✅ | 151 | | `.json` | JSON | Structured data, APIs | ✅ | 152 | | `.yml` | YAML | Configuration files | ✅ | 153 | 154 | **Format is auto-detected from file extension.** All formats support the same features and can be mixed in the same project. 155 | 156 | **Recommended**: Markdown (`.md`) for human readability and editing 157 | 158 | **⚠️ Warning**: Start with a new file rather than using pre-existing task files to avoid losing non-task content. 159 | 160 | ## 🛠️ **Available Tools** 161 | 162 | When `PREFIX_TOOLS=true` (default), all tools are prefixed with `tasks_`: 163 | 164 | | Tool | Description | Parameters | 165 | |------|-------------|------------| 166 | | `tasks_setup` | Initialize a task file (creates if missing, supports `.md`, `.json`, `.yml`) | `source_path`, `workspace?` | 167 | | `tasks_search` | Search tasks with filtering | `source_id`, `statuses?`, `terms?`, `ids?` | 168 | | `tasks_add` | Add new tasks to a status | `source_id`, `texts[]`, `status`, `index?` | 169 | | `tasks_update` | Update tasks by ID | `source_id`, `ids[]`, `status`, `index?` | 170 | | `tasks_summary` | Get task counts and work-in-progress | `source_id` | 171 | 172 | **ID Format**: Both `source_id` (from file path) and task `id` (from task text) are 4-character alphanumeric strings (e.g., `"xK8p"`, `"m3Qw"`). 173 | 174 | ### Tool Examples 175 | 176 | **Setup a task file:** 177 | ```javascript 178 | tasks_setup({ 179 | workspace: "/path/to/project", 180 | source_path: "tasks.md" // relative to workspace or absolute 181 | // source_path: "tasks.json" 182 | // source_path: "tasks.yml" 183 | }) 184 | // Returns: {"source":{"id":"xK8p","path":"/path/to/project/tasks.md"},"Backlog":0,"To Do":0,"In Progress":0,"Done":0,"inProgress":[]} 185 | // Source ID (4-char alphanumeric) is used for all subsequent operations 186 | ``` 187 | 188 | **Add tasks:** 189 | ```javascript 190 | tasks_add({ 191 | source_id: "xK8p", // From setup response 192 | texts: ["Implement authentication", "Write tests"], 193 | status: "To Do", 194 | index: 0 // Add at top (optional) 195 | }) 196 | // Returns: {"source":{"id":"xK8p","path":"/absolute/path/to/tasks.md"},"Backlog":0,"To Do":2,"In Progress":0,"Done":0,"inProgress":[],"tasks":[{"id":"m3Qw","text":"Implement authentication","status":"To Do","index":0},{"id":"p9Lx","text":"Write tests","status":"To Do","index":1}]} 197 | ``` 198 | 199 | **Search and filter:** 200 | ```javascript 201 | tasks_search({ 202 | source_id: "xK8p", // From setup response 203 | terms: ["auth", "deploy"], // Search terms (text or status, OR logic) 204 | statuses: ["To Do"], // Filter by status 205 | ids: ["m3Qw", "p9Lx"] // Filter by specific task IDs 206 | }) 207 | // Returns: [{"id":"m3Qw","text":"Implement authentication","status":"To Do","index":0}] 208 | ``` 209 | 210 | **Update tasks status:** 211 | ```javascript 212 | tasks_update({ 213 | source_id: "xK8p", // From setup response 214 | ids: ["m3Qw", "p9Lx"], // Task IDs from add/search responses 215 | status: "Done" // Use "Deleted" to remove 216 | }) 217 | // Returns: {"source":{"id":"xK8p","path":"/absolute/path/to/tasks.md"},"Backlog":0,"To Do":0,"In Progress":0,"Done":2,"inProgress":[],"tasks":[{"id":"m3Qw","text":"Implement authentication","status":"Done","index":0},{"id":"p9Lx","text":"Write tests","status":"Done","index":1}]} 218 | ``` 219 | 220 | **Get overview:** 221 | ```javascript 222 | tasks_summary({ 223 | source_id: "xK8p" // From setup response 224 | }) 225 | // Returns: {"source":{"id":"xK8p","path":"/absolute/path/to/tasks.md"},"Backlog":0,"To Do":0,"In Progress":1,"Done":2,"inProgress":[{"id":"r7Km","text":"Fix critical bug","status":"In Progress","index":0}]} 226 | ``` 227 | 228 | ## 🎛️ **Environment Variables** 229 | 230 | | Variable | Default | Description | 231 | |----------|---------|-------------| 232 | | `TRANSPORT` | `stdio` | Transport mode: `stdio` or `http` | 233 | | `PORT` | `4680` | HTTP server port (when `TRANSPORT=http`) | 234 | | `PREFIX_TOOLS` | `true` | Prefix tool names with `tasks_` | 235 | | `STATUS_WIP` | `In Progress` | Work-in-progress status name | 236 | | `STATUS_TODO` | `To Do` | ToDo status name | 237 | | `STATUS_DONE` | `Done` | Completed status name | 238 | | `STATUS_REMINDERS` | `Reminders` | Reminders for the AI (empty string to disable) | 239 | | `STATUS_NOTES` | `Notes` | Notes/non-actionable tasks (empty string to disable) | 240 | | `STATUSES` | `Backlog` | Comma-separated additional statuses | 241 | | `AUTO_WIP` | `true` | One WIP moves rest to To Do, first To Do to WIP when no WIP's | 242 | | `KEEP_DELETED` | `true` | Retain deleted tasks (AI can't lose you tasks!) | 243 | | `INSTRUCTIONS` | `...` | Included in all tool responses, for the AI to follow | 244 | | `SOURCES_PATH` | `./sources.json` | File to store source registry (internal) | 245 | | `DEBUG` | `false` | if true, enable the `tasks_debug` tool | 246 | 247 | ### Advanced Configuration Examples 248 | 249 | Optional, the WIP/ToDo/Done statuses can be included to control their order. 250 | 251 | **Custom workflow statuses:** 252 | ```json 253 | { 254 | "env": { 255 | "STATUSES": "WIP,Pending,Archived,Done,To Review", 256 | "STATUS_WIP": "WIP", 257 | "STATUS_TODO": "Pending", 258 | "AUTO_WIP": "false" 259 | } 260 | } 261 | ``` 262 | 263 | ## 📊 **File Formats** 264 | 265 | ### Markdown (`.md`) - Human-Readable 266 | ```markdown 267 | # Tasks - File Name 268 | 269 | ## In Progress 270 | - [ ] Write user registration 271 | 272 | ## To Do 273 | - [ ] Implement authentication 274 | - [ ] Set up CI/CD pipeline 275 | 276 | ## Backlog 277 | - [ ] Plan architecture 278 | - [ ] Design database schema 279 | 280 | ## Done 281 | - [x] Set up project structure 282 | - [x] Initialize repository 283 | 284 | ## Reminders 285 | - [ ] Don't move to Done until you verified it works 286 | - [ ] After you move to Done, commit all the changes, use the task name as the commit message 287 | 288 | ## Notes 289 | - [ ] The task tools were really great to use! 290 | ``` 291 | 292 | ### JSON (`.json`) - Structured Data 293 | ```json 294 | { 295 | "groups": { 296 | "In Progress": [ 297 | "Write user registration" 298 | ], 299 | "To Do": [ 300 | "Implement authentication", 301 | "Set up CI/CD pipeline" 302 | ], 303 | "Backlog": [ 304 | "Plan architecture", 305 | "Design database schema" 306 | ], 307 | "Done": [ 308 | "Set up project structure", 309 | "Initialize repository" 310 | ], 311 | "Reminders": [ 312 | "Don't move to Done until you verified it works", 313 | "After you move to Done, commit all the changes, use the task name as the commit message" 314 | ], 315 | "Notes": [ 316 | "The task tools were really great to use!" 317 | ] 318 | } 319 | } 320 | ``` 321 | 322 | ### YAML (`.yml`) - Configuration-Friendly 323 | ```yaml 324 | groups: 325 | "In Progress": 326 | - Write user registration 327 | "To Do": 328 | - Implement authentication 329 | - Set up CI/CD pipeline 330 | Backlog: 331 | - Plan architecture 332 | - Design database schema 333 | Done: 334 | - Set up project structure 335 | - Initialize repository 336 | Reminders: 337 | - Don't move to Done until you verified it works 338 | - After you move to Done, commit all the changes, use the task name as the commit message 339 | ``` 340 | 341 | ## 🖥️ **Server Usage** 342 | 343 | ```bash 344 | # Show help 345 | mcp-tasks --help 346 | 347 | # Default: stdio transport 348 | mcp-tasks 349 | 350 | # HTTP transport 351 | TRANSPORT=http mcp-tasks 352 | TRANSPORT=http PORT=8080 mcp-tasks 353 | 354 | # Custom configuration 355 | STATUS_WIP="Working" AUTO_WIP=false mcp-tasks 356 | ``` 357 | 358 | ## 💻 **CLI Usage** 359 | 360 | You can also use `mcp-tasks` (or `npx mcp-tasks`) as a command-line tool for quick task management: 361 | 362 | ```bash 363 | # Setup a task file 364 | mcp-tasks setup tasks.md $PWD # Setup with workspace 365 | 366 | # Add tasks 367 | mcp-tasks add "Implement authentication" # Defaults to "To Do" status 368 | mcp-tasks add "Write tests" "Backlog" # Add with specific status 369 | mcp-tasks add "Fix critical bug" "In Progress" 0 # Add at top (index 0) 370 | 371 | # Search tasks 372 | mcp-tasks search # All tasks 373 | mcp-tasks search "" "auth,login" # Search for specific terms 374 | mcp-tasks search "To Do,Done" "" # Filter by statuses 375 | mcp-tasks search "In Progress" "bug" # Filter by status and search terms 376 | 377 | # Update task status (comma-separated IDs) 378 | mcp-tasks update m3Qw,p9Lx Done 379 | 380 | # Get summary 381 | mcp-tasks summary 382 | 383 | # Add a reminder (feature must be enabled with REMINDERS=true) 384 | mcp-tasks add "Don't move to Done until you verified it works" Reminders 385 | ``` 386 | 387 | **CLI Features:** 388 | - Direct access to all MCP tool functionality 389 | - JSON output for easy parsing and scripting 390 | - Same reliability and duplicate prevention as MCP tools 391 | - Perfect for automation scripts and CI/CD pipelines 392 | 393 | ## 🧪 **Development** 394 | 395 | ```bash 396 | # Clone and setup 397 | git clone https://github.com/flesler/mcp-tasks 398 | cd mcp-tasks 399 | npm install 400 | 401 | # Development mode (auto-restart) 402 | npm run dev # STDIO transport 403 | npm run dev:http # HTTP transport on port 4680 404 | 405 | # Build and test 406 | npm run build # Compile TypeScript 407 | npm run lint # Check code style 408 | npm run lint:full # Build + lint 409 | ``` 410 | 411 | ## 🛠️ **Troubleshooting** 412 | 413 | ### **Requirements** 414 | - **Node.js ≥20** - This package requires Node.js version 20 or higher 415 | 416 | ### **Common Issues** 417 | 418 | **ERR_MODULE_NOT_FOUND when running `npx-tasks`** 419 | - **Problem**: Error like `Cannot find module '@modelcontextprotocol/sdk/dist/esm/server/index.js'` when running `npx mcp-tasks` 420 | - **Cause**: Corrupt or incomplete npx cache preventing proper dependency resolution 421 | - **Solution**: Clear the npx cache and try again: 422 | ```bash 423 | npx clear-npx-cache 424 | npx mcp-tasks 425 | ``` 426 | - **Note**: This issue can occur on both Node.js v20 and v22, and the cache clear resolves it 427 | 428 | **Where are my tasks stored?** 429 | - Tasks are stored in the file path you specified by the AI in `tasks_setup` 430 | - The absolute path is returned in every tool call response under `source.path` 431 | - If you forgot the location, check any tool response or ask the AI to show it to you 432 | 433 | **Lost content in Markdown files:** 434 | - ⚠️ The tools will rewrite the entire file, preserving only tasks under recognized status sections 435 | - Non-task content (notes, documentation) may be lost when tools modify the file 436 | - Use a dedicated task file rather than mixing tasks with other content 437 | 438 | ## Why not just have AI edit the task files directly? 439 | 440 | - **File parsing complexity:** AI must read entire files, parse markdown structure, and understand current state - expensive and error-prone 441 | - **Multi-step operations:** Moving a task from "In Progress" to "Done" requires multiple `read_file`, `grep_search`, `sed` calls to locate and modify correct sections 442 | - **Context loss:** Large task files forcing AI to work with incomplete chunks due to token restrictions and lose track of overall structure 443 | - **State comprehension:** AI struggles to understand true project state when reading fragmented file sections - which tasks are actually in progress? 444 | - **Edit precision:** Manual editing risks corrupting markdown formatting, losing tasks, or accidentally modifying the wrong sections 445 | - **Concurrent editing conflicts:** When AI directly edits files, humans can't safely make manual changes without creating conflicts or overwrites 446 | - **Token inefficiency:** Reading+parsing+editing cycles consume far more tokens than structured tool calls with clear inputs/outputs 447 | - **Safety:** AI can accidentally change or delete tasks when directly editing files, but with these tools it cannot rewrite or delete your tasks 448 | 449 | ## 🤝 **Contributing** 450 | 451 | We welcome contributions! Please: 452 | 453 | 1. Fork the repository 454 | 2. Create a feature branch: `git checkout -b feature-name` 455 | 3. Make your changes with tests 456 | 4. Run: `npm run lint:full` 457 | 5. Submit a pull request 458 | 459 | ## 📄 **License** 460 | 461 | MIT License - see [LICENSE](LICENSE) for details. 462 | 463 | ## 🔗 **Links** 464 | 465 | - 📦 **[NPM Package](https://www.npmjs.com/package/mcp-tasks)** 466 | - 🐙 **[GitHub Repository](https://github.com/flesler/mcp-tasks)** 467 | - 🐛 **[Report Issues](https://github.com/flesler/mcp-tasks/issues)** 468 | - 📚 **[MCP Specification](https://modelcontextprotocol.io/)** 469 | - ⚡ **[FastMCP Framework](https://github.com/punkpeye/fastmcp)** 470 | -------------------------------------------------------------------------------- /bin/test-tools.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tsx 2 | 3 | import _ from 'lodash' 4 | import path from 'path' 5 | import env from '../src/env.js' 6 | import storage from '../src/storage.js' 7 | import tools from '../src/tools.js' 8 | import util from '../src/util.js' 9 | 10 | // Disable AUTO_WIP for consistent test expectations 11 | env.AUTO_WIP = false 12 | env.STATUS_REMINDERS = 'Reminders' 13 | env.STATUS_NOTES = 'Notes' 14 | 15 | // Track test failures for proper exit codes 16 | let hasFailures = false 17 | 18 | // Centralized logging helpers that track failures and respect SILENT mode 19 | function log(message: string) { 20 | if (!process.env.SILENT) { 21 | console.log(message) 22 | } 23 | } 24 | 25 | function logError(message: string) { 26 | hasFailures = true 27 | console.error('❌ Error: ' + message) 28 | process.exit(1) 29 | } 30 | 31 | // Global error handlers to ensure exit code 1 on any failure 32 | process.on('uncaughtException', (err) => { 33 | logError('Uncaught exception: ' + err.message) 34 | process.exit(1) 35 | }) 36 | 37 | process.on('unhandledRejection', (reason) => { 38 | logError('Unhandled rejection: ' + String(reason)) 39 | process.exit(1) 40 | }) 41 | 42 | log('🧪 Testing tools...') 43 | 44 | const BACKLOG = 'Backlog' 45 | 46 | // Helper function to create expected count objects 47 | function createExpected(todo: number, done: number, backlog: number = 0, inProgress: number = 0, notes: number = 0, reminders: number = 0): any { 48 | const expected: any = {} 49 | expected[BACKLOG] = backlog 50 | expected[env.STATUS_TODO] = todo 51 | expected[env.STATUS_DONE] = done 52 | expected[env.STATUS_WIP] = inProgress 53 | expected[env.STATUS_NOTES] = notes 54 | expected[env.STATUS_REMINDERS] = reminders 55 | return expected 56 | } 57 | 58 | // Helper function to group search results by status 59 | function groupResults(tasks: any): any { 60 | log(`groupResults input: ${typeof tasks} ${JSON.stringify(tasks)}`) 61 | if (!Array.isArray(tasks)) { 62 | log(`Search returned non-array: ${JSON.stringify(tasks)}`) 63 | return {} 64 | } 65 | const grouped: any = {} 66 | for (const task of tasks) { 67 | if (!grouped[task.status]) { 68 | grouped[task.status] = [] 69 | } 70 | grouped[task.status].push(task) 71 | } 72 | return grouped 73 | } 74 | 75 | const sourceIds: string[] = [] 76 | 77 | // Test all formats with the same complete test suite 78 | storage.supportedExtensions().forEach((ext, i) => { 79 | const format = ext.toUpperCase() 80 | log(`${format} === TEST ${i + 1}: ${format} Format - COMPLETE SUITE ===`) 81 | const absolute = path.join(process.cwd(), `tmp/test/tools.${ext}`) 82 | const sourceId = setupFile(absolute) 83 | sourceIds.push(sourceId) 84 | 85 | // 1. Initial setup verification 86 | assertCounts(sourceId, createExpected(0, 0, 0, 0), 'Initial state') 87 | 88 | // 2. Basic task addition (AUTO_WIP moves one to In Progress) 89 | runTool('add', { 90 | source_id: sourceId, 91 | texts: [`${format} task 1`, `${format} task 2`], 92 | status: env.STATUS_TODO, 93 | }) 94 | assertCounts(sourceId, createExpected(2, 0, 0, 0), 'After adding 2 tasks') 95 | 96 | const readResultArray = runTool('search', { source_id: sourceId }) 97 | log(`Raw search result: ${JSON.stringify(readResultArray)}`) 98 | const readResult = groupResults(readResultArray) 99 | 100 | assert(readResult[env.STATUS_TODO] && readResult[env.STATUS_TODO].length === 2, `Should have 2 tasks in ${env.STATUS_TODO}`) 101 | assert(readResult[env.STATUS_TODO][0].text === `${format} task 1`, 'First task text should match') 102 | 103 | // 3. Add to different status 104 | runTool('add', { 105 | source_id: sourceId, 106 | texts: [`${format} Backlog task`], 107 | status: BACKLOG, 108 | }) 109 | assertCounts(sourceId, createExpected(2, 0, 1, 0), 'After adding Backlog task') 110 | 111 | // 4. Task operations (update/move) 112 | const taskToMove = groupResults(runTool('search', { source_id: sourceId, statuses: [env.STATUS_TODO] }))[env.STATUS_TODO][0] 113 | log(`🎯 Moving task ID: ${taskToMove.id}`) 114 | 115 | runTool('update', { 116 | source_id: sourceId, 117 | ids: [taskToMove.id], 118 | status: env.STATUS_DONE, 119 | }) 120 | assertCounts(sourceId, createExpected(1, 1, 1, 0), 'After moving 1 task to Done') 121 | 122 | // 5. Bulk operations 123 | runTool('add', { 124 | source_id: sourceId, 125 | texts: ['Bulk task 1', 'Bulk task 2', 'Bulk task 3', 'Bulk task 4'], 126 | status: BACKLOG, 127 | }) 128 | assertCounts(sourceId, createExpected(1, 1, 5, 0), 'After bulk add') 129 | 130 | const backlogTasks = groupResults(runTool('search', { source_id: sourceId, statuses: [BACKLOG] })) 131 | const taskIds = backlogTasks[BACKLOG].slice(0, 2).map((t: any) => t.id) 132 | log(`🎯 Moving multiple task IDs: ${taskIds}`) 133 | 134 | runTool('update', { 135 | source_id: sourceId, 136 | ids: taskIds, 137 | status: env.STATUS_DONE, 138 | }) 139 | assertCounts(sourceId, createExpected(1, 3, 3, 0), 'After bulk move') 140 | 141 | // 6. Task deletion 142 | const doneTaskId = groupResults(runTool('search', { source_id: sourceId, statuses: [env.STATUS_DONE] }))[env.STATUS_DONE][0].id 143 | runTool('update', { 144 | source_id: sourceId, 145 | ids: [doneTaskId], 146 | status: 'Deleted', 147 | }) 148 | assertCounts(sourceId, createExpected(1, 2, 3, 0), 'After deleting 1 task') 149 | 150 | // 7. Index positioning 151 | runTool('add', { 152 | source_id: sourceId, 153 | texts: ['Inserted at index 0'], 154 | status: env.STATUS_TODO, 155 | index: 0, 156 | }) 157 | 158 | const orderedTasks = groupResults(runTool('search', { source_id: sourceId, statuses: [env.STATUS_TODO] })) 159 | assert(orderedTasks[env.STATUS_TODO][0].text === 'Inserted at index 0', 'Task should be inserted at index 0') 160 | 161 | // 8. Test some basic scenarios (simplified without Skipped) 162 | runTool('add', { 163 | source_id: sourceId, 164 | texts: ['Test task for status change'], 165 | status: env.STATUS_TODO, 166 | }) 167 | assertCounts(sourceId, createExpected(3, 2, 3, 0), 'After adding test task') 168 | 169 | // 9. Move task between statuses 170 | const testTask = groupResults(runTool('search', { source_id: sourceId, statuses: [env.STATUS_TODO] }))[env.STATUS_TODO].find((t: any) => t.text === 'Test task for status change') 171 | runTool('update', { 172 | source_id: sourceId, 173 | ids: [testTask.id], 174 | status: BACKLOG, 175 | }) 176 | assertCounts(sourceId, createExpected(2, 2, 4, 0), 'After To Do -> Backlog') 177 | 178 | // 10. Move task from Done to Backlog 179 | const doneTask = groupResults(runTool('search', { source_id: sourceId, statuses: [env.STATUS_DONE] }))[env.STATUS_DONE][0] 180 | runTool('update', { 181 | source_id: sourceId, 182 | ids: [doneTask.id], 183 | status: BACKLOG, 184 | }) 185 | assertCounts(sourceId, createExpected(2, 1, 5, 0), 'After Done -> Backlog') 186 | 187 | // 11. Move task from Backlog to To Do 188 | const backlogTask = groupResults(runTool('search', { source_id: sourceId, statuses: [BACKLOG] }))[BACKLOG][0] 189 | runTool('update', { 190 | source_id: sourceId, 191 | ids: [backlogTask.id], 192 | status: env.STATUS_TODO, 193 | }) 194 | assertCounts(sourceId, createExpected(3, 1, 4, 0), 'After Backlog -> To Do') 195 | 196 | // 12. Move task from Backlog to Done 197 | const backlogTask2 = groupResults(runTool('search', { source_id: sourceId, statuses: [BACKLOG] }))[BACKLOG][0] 198 | runTool('update', { 199 | source_id: sourceId, 200 | ids: [backlogTask2.id], 201 | status: env.STATUS_DONE, 202 | }) 203 | assertCounts(sourceId, createExpected(3, 2, 3, 0), 'After Backlog -> Done') 204 | 205 | // 13. Add back a task that was moved earlier - should create new since it no longer exists 206 | const duplicateText = `${format} task 1` 207 | log(`🔍 Looking for existing task: "${duplicateText}"`) 208 | const beforeDupe = groupResults(runTool('search', { source_id: sourceId })) 209 | log(`Current state before duplicate test: ${Object.entries(beforeDupe).map(([k, v]) => `${k}:${(v as any[]).length}`)}`) 210 | const existingTask = Object.values(beforeDupe).flat().find((t: any) => t.text === duplicateText) 211 | log(`Found existing task: ${JSON.stringify(existingTask)}`) 212 | 213 | runTool('add', { 214 | source_id: sourceId, 215 | texts: [duplicateText], 216 | status: env.STATUS_TODO, 217 | }) 218 | // Since the task doesn't exist anymore, it should be added as new, so counts should increase 219 | assertCounts(sourceId, createExpected(4, 2, 3, 0), 'After adding back deleted task') 220 | 221 | // 14. Delete and recreate task 222 | const taskToDelete = groupResults(runTool('search', { source_id: sourceId, statuses: [env.STATUS_TODO] }))[env.STATUS_TODO][0] 223 | const deletedText = taskToDelete.text 224 | runTool('update', { 225 | source_id: sourceId, 226 | ids: [taskToDelete.id], 227 | status: 'Deleted', 228 | }) 229 | assertCounts(sourceId, createExpected(3, 2, 3, 0), 'After deleting task') 230 | 231 | runTool('add', { 232 | source_id: sourceId, 233 | texts: [deletedText], 234 | status: env.STATUS_DONE, 235 | }) 236 | assertCounts(sourceId, createExpected(3, 3, 3, 0), 'After re-creating deleted task') 237 | 238 | // 15. Bulk operations with different status 239 | const backlogTaskIds = groupResults(runTool('search', { source_id: sourceId, statuses: [BACKLOG] }))[BACKLOG].slice(0, 2).map((t: any) => t.id) 240 | runTool('update', { 241 | source_id: sourceId, 242 | ids: backlogTaskIds, 243 | status: env.STATUS_TODO, 244 | }) 245 | assertCounts(sourceId, createExpected(5, 3, 1, 0), 'After bulk Backlog -> To Do') 246 | 247 | // 16. Search with query filtering 248 | log('\n🔍 Testing search with filtering...') 249 | const allResults = tools.search.handler({ source_id: sourceId }) 250 | groupResults(allResults) 251 | log(`📊 All tasks: [${Object.entries(_.groupBy(allResults, 'status')).map(([s, tasks]) => `"${s}:${tasks.length}"`)}]`) 252 | 253 | const filteredResults = tools.search.handler({ source_id: sourceId, terms: [format.toUpperCase()] }) 254 | groupResults(filteredResults) 255 | log(`🔍 Filtered by "${format.toUpperCase()}": ${filteredResults.length} tasks`) 256 | 257 | // Test ID search functionality 258 | if (filteredResults.length >= 2) { 259 | const testIds = [filteredResults[0].id, filteredResults[1].id] 260 | const idResults = tools.search.handler({ source_id: sourceId, ids: testIds }) 261 | groupResults(idResults) 262 | log(`🔍 Filtered by IDs [${testIds.join(', ')}]: ${idResults.length} tasks`) 263 | assert(idResults.length === 2, `Should find exactly 2 tasks by ID, got ${idResults.length}`) 264 | assert(idResults.every((task: any) => testIds.includes(task.id)), 'All returned tasks should have the requested IDs') 265 | log('✅ ID search works correctly') 266 | } 267 | 268 | const finalResults = tools.search.handler({ source_id: sourceId }) 269 | groupResults(finalResults) 270 | log('📊 Final task distribution:') 271 | const finalGrouped = _.groupBy(finalResults, 'status') 272 | Object.entries(finalGrouped).forEach(([status, tasks]: [string, any]) => { 273 | log(` ${status}: ${tasks.length} tasks`) 274 | }) 275 | 276 | log(`✅ ${format} format test completed successfully!\n`) 277 | }) 278 | 279 | // Test Notes and Reminders functionality 280 | log('\n📝 === NOTES AND REMINDERS TESTS ===') 281 | 282 | // Test 1: Test Notes functionality 283 | log('\n📝 Testing Notes functionality...') 284 | const notesTestPath = path.join(process.cwd(), 'tmp/test/notes-test.md') 285 | const notesSourceId = setupFile(notesTestPath) 286 | 287 | // Add notes 288 | runTool('add', { 289 | source_id: notesSourceId, 290 | texts: ['Research new tech stack', 'Team meeting notes from Monday'], 291 | status: env.STATUS_NOTES, 292 | }) 293 | assertCounts(notesSourceId, createExpected(0, 0, 0, 0, 2, 0), 'After adding Notes') 294 | 295 | // Verify Notes appear in summary 296 | const notesSummary = JSON.parse(runTool('summary', { source_id: notesSourceId })) 297 | assert(notesSummary[env.STATUS_NOTES] === 2, 'Notes should show count of 2') 298 | 299 | log('✅ Notes functionality working correctly!') 300 | 301 | // Test 2: Test Reminders functionality 302 | log('\n🔔 Testing Reminders functionality...') 303 | const remindersTestPath = path.join(process.cwd(), 'tmp/test/reminders-test.md') 304 | const remindersSourceId = setupFile(remindersTestPath) 305 | 306 | // Add reminders 307 | runTool('add', { 308 | source_id: remindersSourceId, 309 | texts: ['Review quarterly goals', 'Schedule performance reviews'], 310 | status: env.STATUS_REMINDERS, 311 | }) 312 | assertCounts(remindersSourceId, createExpected(0, 0, 0, 0, 0, 2), 'After adding Reminders') 313 | 314 | // Verify Reminders appear in summary with reminders array 315 | const remindersSummary = JSON.parse(runTool('summary', { source_id: remindersSourceId })) 316 | assert(remindersSummary[env.STATUS_REMINDERS] === 2, 'Reminders should show count of 2') 317 | assert(Array.isArray(remindersSummary.reminders), 'Summary should include reminders array') 318 | assert(remindersSummary.reminders.length === 2, 'Reminders array should have 2 items') 319 | assert(remindersSummary.reminders[0].text === 'Review quarterly goals', 'First reminder text should match') 320 | 321 | log('✅ Reminders functionality working correctly!') 322 | 323 | // Test 3: Test mixed Notes and Reminders 324 | log('\n📝🔔 Testing mixed Notes and Reminders...') 325 | const mixedTestPath = path.join(process.cwd(), 'tmp/test/mixed-test.md') 326 | const mixedSourceId = setupFile(mixedTestPath) 327 | 328 | // Add tasks to all statuses including Notes and Reminders 329 | runTool('add', { 330 | source_id: mixedSourceId, 331 | texts: ['Regular task'], 332 | status: env.STATUS_TODO, 333 | }) 334 | runTool('add', { 335 | source_id: mixedSourceId, 336 | texts: ['Important note'], 337 | status: env.STATUS_NOTES, 338 | }) 339 | runTool('add', { 340 | source_id: mixedSourceId, 341 | texts: ['Important reminder'], 342 | status: env.STATUS_REMINDERS, 343 | }) 344 | assertCounts(mixedSourceId, createExpected(1, 0, 0, 0, 1, 1), 'After adding mixed tasks') 345 | 346 | // Verify mixed summary 347 | const mixedSummary = JSON.parse(runTool('summary', { source_id: mixedSourceId })) 348 | assert(mixedSummary[env.STATUS_TODO] === 1, 'Should have 1 todo') 349 | assert(mixedSummary[env.STATUS_NOTES] === 1, 'Should have 1 note') 350 | assert(mixedSummary[env.STATUS_REMINDERS] === 1, 'Should have 1 reminder') 351 | assert(Array.isArray(mixedSummary.reminders), 'Should include reminders array') 352 | assert(mixedSummary.reminders.length === 1, 'Should have 1 reminder in array') 353 | 354 | log('✅ Mixed Notes and Reminders working correctly!') 355 | 356 | // Test 4: Test search functionality with Notes and Reminders 357 | log('\n🔍 Testing search with Notes and Reminders...') 358 | const allMixedTasks = runTool('search', { source_id: mixedSourceId }) 359 | const groupedMixed = groupResults(allMixedTasks) 360 | assert(groupedMixed[env.STATUS_NOTES]?.length === 1, 'Search should find Notes') 361 | assert(groupedMixed[env.STATUS_REMINDERS]?.length === 1, 'Search should find Reminders') 362 | 363 | // Test filtering by status 364 | const notesOnly = runTool('search', { source_id: mixedSourceId, statuses: [env.STATUS_NOTES] }) 365 | assert(notesOnly.length === 1, 'Should find only Notes when filtering') 366 | assert(notesOnly[0].status === env.STATUS_NOTES, 'Found task should be a Note') 367 | 368 | const remindersOnly = runTool('search', { source_id: mixedSourceId, statuses: [env.STATUS_REMINDERS] }) 369 | assert(remindersOnly.length === 1, 'Should find only Reminders when filtering') 370 | assert(remindersOnly[0].status === env.STATUS_REMINDERS, 'Found task should be a Reminder') 371 | 372 | log('✅ Search with Notes and Reminders working correctly!') 373 | 374 | // Test 5: Test markdown format with Notes and Reminders 375 | log('\n📄 Testing markdown format with Notes and Reminders...') 376 | const mdNotesRemindersPath = path.join(process.cwd(), 'tmp/test/md-notes-reminders.md') 377 | const mdNotesRemindersId = setupFile(mdNotesRemindersPath) 378 | 379 | // Add content and verify file format 380 | runTool('add', { 381 | source_id: mdNotesRemindersId, 382 | texts: ['Task to complete'], 383 | status: env.STATUS_TODO, 384 | }) 385 | runTool('add', { 386 | source_id: mdNotesRemindersId, 387 | texts: ['Research findings'], 388 | status: env.STATUS_NOTES, 389 | }) 390 | runTool('add', { 391 | source_id: mdNotesRemindersId, 392 | texts: ['Follow up next week'], 393 | status: env.STATUS_REMINDERS, 394 | }) 395 | 396 | // Read the markdown file and verify format 397 | const mdContent = util.readFile(mdNotesRemindersPath) 398 | assert(mdContent.includes('## To Do'), 'Should have To Do section') 399 | assert(mdContent.includes('## Notes'), 'Should have Notes section') 400 | assert(mdContent.includes('## Reminders'), 'Should have Reminders section') 401 | assert(mdContent.includes('- [ ] Task to complete'), 'Should have todo task') 402 | assert(mdContent.includes('- Research findings'), 'Should have note (no checkbox)') 403 | assert(mdContent.includes('- Follow up next week'), 'Should have reminder without checkbox') 404 | 405 | log('✅ Markdown format with Notes and Reminders working correctly!') 406 | 407 | log('\n✅ All Notes and Reminders tests passed!') 408 | 409 | // Test markdown parser defaults unrecognized sections to To Do 410 | log('\n📝 === MARKDOWN PARSER SPECIFIC TEST ===') 411 | log('\n📝 Testing markdown parser defaults unrecognized sections to To Do...') 412 | 413 | const mdTestPath = path.join(process.cwd(), 'tmp/test/md-parser-test.md') 414 | 415 | const mdSourceId = setupFile(mdTestPath) 416 | 417 | // Write markdown content AFTER setup (setupFile overwrites with empty content) 418 | util.writeFile(mdTestPath, `# Test Tasks 419 | 420 | - [ ] Task before any section 421 | - [ ] Another task before sections 422 | 423 | ## Random Section 424 | - [ ] Task from random section 425 | 426 | ## Some Other Header 427 | - [ ] Task from other header 428 | 429 | ## To Do 430 | - [ ] Existing To Do task 431 | 432 | ## Done 433 | - [x] Completed task 434 | `) 435 | 436 | const mdResult = groupResults(tools.search.handler({ source_id: mdSourceId })) 437 | 438 | // Check that tasks before sections were moved to To Do 439 | const todoTasks = mdResult[env.STATUS_TODO] || [] 440 | const expectedTodoTexts = ['Task before any section', 'Another task before sections', 'Existing To Do task'].sort() 441 | const actualTodoTexts = todoTasks.map((t: any) => t.text).sort() 442 | assert(todoTasks.length === 3, `Expected 3 To Do tasks (2 from before sections + 1 existing), got ${todoTasks.length}`) 443 | assert(JSON.stringify(actualTodoTexts) === JSON.stringify(expectedTodoTexts), 444 | `To Do task texts don't match. Expected: ${JSON.stringify(expectedTodoTexts)}, Got: ${JSON.stringify(actualTodoTexts)}`) 445 | 446 | // Check that unrecognized sections are preserved as-is 447 | const randomSectionTasks = mdResult['Random Section'] || [] 448 | assert(randomSectionTasks.length === 1, `Expected 1 Random Section task, got ${randomSectionTasks.length}`) 449 | assert(randomSectionTasks[0].text === 'Task from random section', 'Random section task text mismatch') 450 | 451 | // Done task should remain in Done 452 | const doneTasks = mdResult[env.STATUS_DONE] || [] 453 | assert(doneTasks.length === 1, `Expected 1 Done task, got ${doneTasks.length}`) 454 | assert(doneTasks[0].text === 'Completed task', `Expected 'Completed task', got '${doneTasks[0].text}'`) 455 | 456 | log('✅ Markdown parser correctly defaults unrecognized sections to To Do!') 457 | 458 | // Test sourceId auto-detection with file-based stack 459 | log('\n🎯 Testing sourceId auto-detection...') 460 | const testFile = path.join(process.cwd(), 'tmp/test/auto-detect.md') 461 | const autoSourceId = setupFile(testFile) 462 | 463 | // Add a task without specifying sourceId (should auto-detect) 464 | runTool('add', { 465 | texts: ['Auto-detected task'], 466 | status: env.STATUS_TODO, 467 | }) 468 | 469 | // Verify the task was added to the auto-detected source 470 | const autoResult = groupResults(runTool('search', { source_id: autoSourceId })) 471 | assert(autoResult[env.STATUS_TODO].length === 1, 'Auto-detection should work') 472 | assert(autoResult[env.STATUS_TODO][0].text === 'Auto-detected task', 'Auto-detected task should match') 473 | 474 | log('\n🚨 === ERROR CONDITION TESTS ===') 475 | 476 | // Test 1: Invalid path for setup (relative path without workspace) 477 | log('\n📍 Testing setup with relative path without workspace...') 478 | try { 479 | tools.setup.handler({ source_path: 'tmp/relative/path.md', workspace: '' }) 480 | logError('Expected error for relative path without workspace') 481 | } catch (err) { 482 | if (err.message.includes('You must specify a workspace directory when registering a relative path')) { 483 | log('✅ Correctly rejected relative path without workspace') 484 | } else { 485 | logError(`Unexpected error: ${err.message}`) 486 | } 487 | } 488 | 489 | // Test 2: Invalid path for setup (directory without file extension) 490 | log('\n📍 Testing setup with directory path...') 491 | try { 492 | tools.setup.handler({ source_path: '/tmp/nonexistent_directory' }) 493 | logError('Expected error for directory path') 494 | } catch (err) { 495 | if (err.message.includes('Unsupported file extension')) { 496 | log('✅ Correctly rejected path without valid file extension') 497 | } else { 498 | logError(`Unexpected error: ${err.message}`) 499 | } 500 | } 501 | 502 | // Test 2b: Test sources.ts validation (path with extension but not a file) 503 | log('\n📍 Testing setup with path that looks like file but isn\'t...') 504 | try { 505 | // Use a path that has extension but is actually a directory 506 | tools.setup.handler({ source_path: '/home.md' }) 507 | logError('Expected error for path that is not a file') 508 | } catch (err) { 509 | if (err.message.includes('Must be an absolute to a file')) { 510 | log('✅ Correctly rejected path that is not actually a file') 511 | } else { 512 | log(`ℹ️ Different validation error (still good): ${err.message}`) 513 | log('✅ System correctly validates paths') 514 | } 515 | } 516 | 517 | // Test 3: Invalid source ID 518 | log('\n📍 Testing with non-existent source ID...') 519 | try { 520 | tools.summary.handler({ source_id: 'INVALID' }) 521 | logError('Expected error for invalid source ID') 522 | } catch (err) { 523 | if (err.message.includes('Source "INVALID" not found. You must request a file path from the user, make it absolute and call tasks_setup.')) { 524 | log('✅ Correctly rejected invalid source ID with AI-helpful message') 525 | } else { 526 | logError(`Unexpected error: ${err.message}`) 527 | } 528 | } 529 | 530 | // Test 4: DELETED status validation (positive test) 531 | log('\n📍 Testing DELETED status...') 532 | try { 533 | const testPath = util.resolve('./tmp/deleted-test.md', util.REPO) 534 | const sourceId = setupFile(testPath) 535 | 536 | // Add a task first 537 | tools.add.handler({ 538 | source_id: sourceId, 539 | texts: ['Task to delete'], 540 | status: 'To Do', 541 | }) 542 | 543 | // Search for the task 544 | const tasks = tools.search.handler({ source_id: sourceId }) 545 | if (tasks.length > 0) { 546 | // Try to delete it using DELETED status 547 | tools.update.handler({ 548 | source_id: sourceId, 549 | ids: [tasks[0].id], 550 | status: 'Deleted', 551 | }) 552 | log('✅ DELETED status accepted by schema validation') 553 | } else { 554 | log('⚠️ No tasks found to delete') 555 | } 556 | } catch (err) { 557 | logError(`DELETED status validation failed: ${err.message}`) 558 | } 559 | 560 | log('\n✅ All error condition tests passed!') 561 | log('🎉 All tests completed successfully!') 562 | 563 | log('\n🔍 === ENHANCED SEARCH TESTS ===') 564 | 565 | // Test enhanced search features: array search + status matching 566 | log('\n📍 Testing enhanced search features...') 567 | try { 568 | const testPath = util.resolve('./tmp/enhanced-search-test.md', util.REPO) 569 | const sourceId = setupFile(testPath) 570 | 571 | // Add diverse test tasks 572 | tools.add.handler({ 573 | source_id: sourceId, 574 | texts: ['Fix authentication bug', 'Add user dashboard', 'Deploy to production'], 575 | status: 'To Do', 576 | }) 577 | 578 | tools.add.handler({ 579 | source_id: sourceId, 580 | texts: ['Setup database connection'], 581 | status: 'Done', 582 | }) 583 | 584 | // Test 1: Array search with OR logic 585 | const arraySearch = tools.search.handler({ 586 | source_id: sourceId, 587 | terms: ['auth', 'deploy'], // Multiple search terms 588 | }) 589 | log(`✅ Array search found: ${arraySearch.length} tasks (auth OR deploy)`) 590 | 591 | // Test 2: Status search (user passes status by mistake) 592 | const statusSearch = tools.search.handler({ 593 | source_id: sourceId, 594 | terms: ['Done'], // Status search as array 595 | }) 596 | log(`✅ Status search found: ${statusSearch.length} tasks (Done status)`) 597 | 598 | // Test 3: Mixed search (text + status in one search) 599 | const mixedSearch = tools.search.handler({ 600 | source_id: sourceId, 601 | terms: ['user', 'To Do'], // Mixed search terms 602 | }) 603 | log(`✅ Mixed search found: ${mixedSearch.length} tasks (user OR To Do)`) 604 | 605 | // Test 4: Single string search (now as single-element array) 606 | const singleSearch = tools.search.handler({ 607 | source_id: sourceId, 608 | terms: ['database'], // Single search term in array 609 | }) 610 | log(`✅ Single search found: ${singleSearch.length} tasks (database)`) 611 | 612 | log('✅ Enhanced search features working perfectly!') 613 | 614 | } catch (err) { 615 | logError(`Enhanced search test failed: ${err.message}`) 616 | } 617 | 618 | log('\n🎉 All tests completed successfully!') 619 | 620 | function setupFile(absolute: string): string { 621 | util.writeFile(absolute, '') 622 | return JSON.parse(tools.setup.handler({ source_path: absolute })).sourceId 623 | } 624 | 625 | function runTool(name: string, args: any = {}) { 626 | const tool = (tools as any)[name] 627 | if (!tool) { 628 | throw new Error(`Tool ${name} not found`) 629 | } 630 | log(`🔧 ${name}(${Object.keys(args).map(k => `${k}=${JSON.stringify(args[k])}`).join(', ')})`) 631 | const result = tool.handler(args) 632 | return result 633 | } 634 | 635 | function dump(sourceId: string, prefix = '') { 636 | const summaryStr = runTool('summary', { source_id: sourceId }) 637 | const summary = JSON.parse(summaryStr) 638 | 639 | // Extract just the counts from the summary (remove path, sourceId, task) 640 | const counts: any = {} 641 | Object.entries(summary).forEach(([key, value]) => { 642 | if (key !== 'path' && key !== 'sourceId' && key !== 'task' && typeof value === 'number') { 643 | counts[key] = value 644 | } 645 | }) 646 | 647 | log(`${prefix}${JSON.stringify(counts)}`) 648 | return counts 649 | } 650 | 651 | function assert(condition: boolean, message: string) { 652 | if (condition) { 653 | log(`✅ ${message}`) 654 | } else { 655 | logError(`ASSERTION FAILED: ${message}`) 656 | throw new Error(`Assertion failed: ${message}`) 657 | } 658 | } 659 | 660 | function assertCounts(sourceId: string, expected: any, label: string) { 661 | const summary = dump(sourceId, `After ${label.toLowerCase()}: `) 662 | assert(summary[env.STATUS_TODO] === expected[env.STATUS_TODO], `${label}: ${env.STATUS_TODO} count should be ${expected[env.STATUS_TODO]}, got ${summary[env.STATUS_TODO]}`) 663 | assert(summary[env.STATUS_DONE] === expected[env.STATUS_DONE], `${label}: ${env.STATUS_DONE} count should be ${expected[env.STATUS_DONE]}, got ${summary[env.STATUS_DONE]}`) 664 | assert(summary[BACKLOG] === expected[BACKLOG], `${label}: Backlog count should be ${expected[BACKLOG]}, got ${summary[BACKLOG]}`) 665 | assert(summary[env.STATUS_WIP] === expected[env.STATUS_WIP], `${label}: ${env.STATUS_WIP} count should be ${expected[env.STATUS_WIP]}, got ${summary[env.STATUS_WIP]}`) 666 | if (env.STATUS_NOTES && expected[env.STATUS_NOTES] !== undefined) { 667 | assert(summary[env.STATUS_NOTES] === expected[env.STATUS_NOTES], `${label}: ${env.STATUS_NOTES} count should be ${expected[env.STATUS_NOTES]}, got ${summary[env.STATUS_NOTES]}`) 668 | } 669 | if (env.STATUS_REMINDERS && expected[env.STATUS_REMINDERS] !== undefined) { 670 | assert(summary[env.STATUS_REMINDERS] === expected[env.STATUS_REMINDERS], `${label}: ${env.STATUS_REMINDERS} count should be ${expected[env.STATUS_REMINDERS]}, got ${summary[env.STATUS_REMINDERS]}`) 671 | } 672 | } 673 | 674 | // Exit with proper code based on test results 675 | process.exit(hasFailures ? 1 : 0) 676 | --------------------------------------------------------------------------------