├── .llm-context ├── templates │ ├── highlights.hbs │ ├── files.hbs │ ├── context.hbs │ └── prompt.hbs └── config.json ├── src ├── utils │ └── globAsync.ts ├── types │ └── index.ts ├── types.ts └── services │ ├── FileWatcherService.ts │ ├── TemplateService.ts │ ├── LoggingService.ts │ ├── ProfileService.ts │ └── CodeAnalysisService.ts ├── .cursor └── mcp.json ├── INITIAL.md ├── Dockerfile ├── examples ├── getFiles_request_schema.json └── getfiles_response_schema.json ├── smithery.yaml ├── tsconfig.json ├── .claude ├── settings.local.json └── commands │ ├── execute-prp.md │ └── generate-prp.md ├── package.json ├── LICENSE ├── GETFILES.md ├── .gitignore ├── docs ├── mcp-logging-research-findings.md ├── mcp-logging-implementation-summary.md ├── technical-design.md ├── mcp-logging-implementation-brief.md ├── logging-service-technical-spec.md └── mcp-logging-implementation-plan.md ├── CLAUDE.md ├── STREAMING.md ├── README.md └── PRPs ├── templates └── prp_base.md ├── getFiles-mcp-tool.md └── EXAMPLE_multi_agent_prp.md /.llm-context/templates/highlights.hbs: -------------------------------------------------------------------------------- 1 | {{#each highlights}} 2 | ### {{path}} 3 | 4 | ``` 5 | {{{outline}}} 6 | ``` 7 | 8 | {{/each}} -------------------------------------------------------------------------------- /.llm-context/templates/files.hbs: -------------------------------------------------------------------------------- 1 | {{#each files}} 2 | ### {{path}} 3 | 4 | ```{{language}} 5 | {{{content}}} 6 | ``` 7 | 8 | {{/each}} -------------------------------------------------------------------------------- /src/utils/globAsync.ts: -------------------------------------------------------------------------------- 1 | import { glob } from 'glob'; 2 | import { promisify } from 'util'; 3 | 4 | export const globAsync = promisify(glob); -------------------------------------------------------------------------------- /.cursor/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "fileContext": { 4 | "command": "node", 5 | "args": [ 6 | "C:\\Projects\\mcp-servers\\mcp-context-server\\dist\\index.js" 7 | ] 8 | 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /INITIAL.md: -------------------------------------------------------------------------------- 1 | ## FEATURE: 2 | 3 | [Insert your feature here] 4 | 5 | ## EXAMPLES: 6 | 7 | [Provide and explain examples that you have in the `examples/` folder] 8 | 9 | ## DOCUMENTATION: 10 | 11 | [List out any documentation (web pages, sources for an MCP server like Crawl4AI RAG, etc.) that will need to be referenced during development] 12 | 13 | ## OTHER CONSIDERATIONS: 14 | 15 | [Any other considerations or specific requirements - great place to include gotchas that you see AI coding assistants miss with your projects a lot] 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile 2 | FROM node:lts-alpine 3 | 4 | WORKDIR /app 5 | 6 | # Copy package files 7 | COPY package*.json ./ 8 | 9 | # Install dependencies 10 | RUN npm install 11 | 12 | # Copy the rest of the project 13 | COPY . . 14 | 15 | # Build the project 16 | RUN npm run build 17 | 18 | # Set environment variables defaults 19 | ENV MAX_CACHE_SIZE=1000 20 | ENV CACHE_TTL=3600000 21 | ENV MAX_FILE_SIZE=1048576 22 | 23 | # Start the server 24 | CMD ["node", "dist/index.js"] 25 | -------------------------------------------------------------------------------- /examples/getFiles_request_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "filePathList": { 5 | "type": "array", 6 | "description": "The list of file paths for the file content to return.", 7 | "minItems": 1, 8 | "items": { 9 | "type": "object", 10 | "properties": { 11 | "fileName": { 12 | "type": "string", 13 | "description": "Path and file name for the file to be retrieved." 14 | } 15 | }, 16 | "required": [ 17 | "fileName" 18 | ] 19 | } 20 | } 21 | }, 22 | "required": [ 23 | "filePathList" 24 | ] 25 | } -------------------------------------------------------------------------------- /smithery.yaml: -------------------------------------------------------------------------------- 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml 2 | 3 | startCommand: 4 | type: stdio 5 | configSchema: 6 | # JSON Schema defining the configuration options for the MCP. 7 | type: object 8 | properties: {} 9 | commandFunction: 10 | # A JS function that produces the CLI command based on the given config to start the MCP on stdio. 11 | |- 12 | (config) => ({ command: 'node', args: ['build/index.js'], env: { MAX_CACHE_SIZE: process.env.MAX_CACHE_SIZE || '1000', CACHE_TTL: process.env.CACHE_TTL || '3600000', MAX_FILE_SIZE: process.env.MAX_FILE_SIZE || '1048576' } }) 13 | exampleConfig: {} 14 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "skipLibCheck": true, 4 | "target": "es2020", 5 | "module": "NodeNext", 6 | "moduleResolution": "NodeNext", 7 | "esModuleInterop": true, 8 | "outDir": "./dist", 9 | "rootDir": "./src", 10 | "strict": true, 11 | "declaration": true, 12 | "sourceMap": true, 13 | "resolveJsonModule": true, 14 | "allowJs": true, 15 | "paths": { 16 | "@typescript-eslint/*": [ 17 | "./node_modules/@typescript-eslint/*" 18 | ] 19 | } 20 | }, 21 | "include": [ 22 | "src/**/*" 23 | ], 24 | "exclude": [ 25 | "node_modules", 26 | "build" 27 | ] 28 | } -------------------------------------------------------------------------------- /.llm-context/templates/context.hbs: -------------------------------------------------------------------------------- 1 | {{#if prompt}} 2 | {{{prompt}}} 3 | {{/if}} 4 | {{#if project_notes}} 5 | {{{project_notes}}} 6 | {{/if}} 7 | {{#if user_notes}} 8 | {{{user_notes}}} 9 | {{/if}} 10 | # Repository Content: **{{project_name}}** 11 | 12 | > Generation timestamp: {{timestamp}} 13 | > Use lc-list-modified-files tool to track changes since generation 14 | 15 | {{#if sample_requested_files}} 16 | ## File Access Instructions 17 | 18 | Missing/incomplete files (marked "✓" or "○" in the repository structure) can be retrieved using the lc-get-files tool. 19 | {{/if}} 20 | 21 | ## Repository Structure 22 | 23 | ``` 24 | {{{folder_structure_diagram}}} 25 | ``` 26 | 27 | {{#if files}} 28 | ## Current Files 29 | 30 | {{> files}} 31 | {{/if}} 32 | 33 | {{#if highlights}} 34 | ## Code Outlines 35 | 36 | {{> highlights}} 37 | {{/if}} -------------------------------------------------------------------------------- /.claude/settings.local.json: -------------------------------------------------------------------------------- 1 | { 2 | "permissions": { 3 | "allow": [ 4 | "Bash(grep:*)", 5 | "Bash(ls:*)", 6 | "Bash(source:*)", 7 | "Bash(find:*)", 8 | "Bash(mv:*)", 9 | "Bash(mkdir:*)", 10 | "Bash(tree:*)", 11 | "Bash(ruff:*)", 12 | "Bash(touch:*)", 13 | "Bash(cat:*)", 14 | "Bash(ruff check:*)", 15 | "Bash(pytest:*)", 16 | "Bash(python:*)", 17 | "Bash(python -m pytest:*)", 18 | "Bash(python3 -m pytest:*)", 19 | "WebFetch(domain:docs.anthropic.com)", 20 | "WebFetch(domain:modelcontextprotocol.io)", 21 | "mcp__thinking__sequentialThinking", 22 | "Bash(npm run build:*)", 23 | "Bash(rm:*)", 24 | "Bash(timeout 10s npm run dev)", 25 | "Bash(timeout 5s npm run dev:*)", 26 | "Bash(node:*)" 27 | ], 28 | "deny": [] 29 | } 30 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "file-context-server", 3 | "version": "1.0.0", 4 | "description": "File context server for Model Context Protocol", 5 | "main": "dist/index.js", 6 | "type": "module", 7 | "scripts": { 8 | "build": "tsc", 9 | "start": "node dist/index.js", 10 | "dev": "tsc && node dist/index.js", 11 | "test": "jest" 12 | }, 13 | "dependencies": { 14 | "@modelcontextprotocol/sdk": "^1.15.0", 15 | "@typescript-eslint/parser": "^6.0.0", 16 | "@typescript-eslint/types": "^6.0.0", 17 | "chokidar": "^4.0.3", 18 | "express": "^5.1.0", 19 | "glob": "^10.0.0", 20 | "handlebars": "^4.7.8", 21 | "lru-cache": "^10.4.3", 22 | "mime-types": "^2.1.35", 23 | "typescript": "^5.0.0" 24 | }, 25 | "devDependencies": { 26 | "@types/express": "^5.0.3", 27 | "@types/glob": "^8.1.0", 28 | "@types/jest": "^30.0.0", 29 | "@types/mime-types": "^2.1.1", 30 | "@types/node": "^20.0.0", 31 | "jest": "^30.0.4", 32 | "ts-node": "^10.9.1" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 bsmi021 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /GETFILES.md: -------------------------------------------------------------------------------- 1 | ## FEATURE: 2 | 3 | A new MCP server tool to accept an array of filenames (and paths) and the tool quickly retreives each file and appends them to a result set. The result set should be a fixed schema that is predictable for the MCP client to work with. The point of this feature is to allow a MCP client to provide a list of file names to the MCP server, and the MCP server uses the paths to retrieve the files, the file information is then converted to the response object type and returned to the client. 4 | 5 | ## EXAMPLES: 6 | 7 | [Provide and explain examples that you have in the `examples/` folder] 8 | 9 | - getFiles_request_schema.json: request schema for retrieving files 10 | - getFiles_response_schema.json: response schema for new tool, provides attributes for all relevant information 11 | 12 | ## DOCUMENTATION: 13 | 14 | [List out any documentation (web pages, sources for an MCP server like Crawl4AI RAG, etc.) that will need to be referenced during development] 15 | @./docs/llms-full.txt 16 | @./docs/mcp-typescript-readme.md 17 | 18 | ## OTHER CONSIDERATIONS: 19 | 20 | [Any other considerations or specific requirements - great place to include gotchas that you see AI coding assistants miss with your projects a lot] 21 | -------------------------------------------------------------------------------- /examples/getfiles_response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json-schema.org/draft-07/schema#", 3 | "title": "File List", 4 | "description": "A list of file objects.", 5 | "type": "array", 6 | "items": { 7 | "type": "object", 8 | "title": "File", 9 | "description": "Represents a single file with its metadata and content.", 10 | "properties": { 11 | "fileName": { 12 | "type": "string", 13 | "description": "The name of the file, including its extension." 14 | }, 15 | "content": { 16 | "type": "string", 17 | "description": "Holds the full file content in plain text." 18 | }, 19 | "fileSize": { 20 | "type": "integer", 21 | "description": "File size in bytes.", 22 | "minimum": 0 23 | }, 24 | "lastModifiedDateTime": { 25 | "type": "string", 26 | "format": "date-time", 27 | "description": "The last modified date and time of the file in ISO 8601 format (e.g., '2024-07-04T15:04:05Z')." 28 | } 29 | }, 30 | "required": [ 31 | "fileName", 32 | "content", 33 | "fileSize", 34 | "lastModifiedDateTime" 35 | ] 36 | } 37 | } -------------------------------------------------------------------------------- /.claude/commands/execute-prp.md: -------------------------------------------------------------------------------- 1 | # Execute BASE PRP 2 | 3 | Implement a feature using using the PRP file. 4 | 5 | ## PRP File: $ARGUMENTS 6 | 7 | ## Execution Process 8 | 9 | 1. **Load PRP** 10 | - Read the specified PRP file 11 | - Understand all context and requirements 12 | - Follow all instructions in the PRP and extend the research if needed 13 | - Ensure you have all needed context to implement the PRP fully 14 | - Do more web searches and codebase exploration as needed 15 | 16 | 2. **ULTRATHINK** 17 | - Think hard before you execute the plan. Create a comprehensive plan addressing all requirements. 18 | - Break down complex tasks into smaller, manageable steps using your todos tools. 19 | - Use the TodoWrite tool to create and track your implementation plan. 20 | - Identify implementation patterns from existing code to follow. 21 | 22 | 3. **Execute the plan** 23 | - Execute the PRP 24 | - Implement all the code 25 | 26 | 4. **Validate** 27 | - Run each validation command 28 | - Fix any failures 29 | - Re-run until all pass 30 | 31 | 5. **Complete** 32 | - Ensure all checklist items done 33 | - Run final validation suite 34 | - Report completion status 35 | - Read the PRP again to ensure you have implemented everything 36 | 37 | 6. **Reference the PRP** 38 | - You can always reference the PRP again if needed 39 | 40 | Note: If validation fails, use error patterns in PRP to fix and retry. -------------------------------------------------------------------------------- /.llm-context/templates/prompt.hbs: -------------------------------------------------------------------------------- 1 | # LLM Instructions 2 | 3 | ## Role and Context 4 | You are a code-aware AI assistant analyzing the provided repository content. Your task is to help understand, modify, and improve the codebase while maintaining its integrity and following best practices. 5 | 6 | ## Guidelines 7 | 1. Always analyze the full context before making suggestions 8 | 2. Consider dependencies and potential side effects 9 | 3. Maintain consistent code style 10 | 4. Preserve existing functionality unless explicitly asked to change it 11 | 5. Document significant changes 12 | 6. Handle errors gracefully 13 | 14 | ## Response Structure 15 | 1. First, acknowledge the specific files or code sections you're working with 16 | 2. Explain your understanding of the current implementation 17 | 3. Present your suggestions or changes clearly 18 | 4. Include any necessary warnings about potential impacts 19 | 5. Provide context for your decisions 20 | 21 | ## Code Style 22 | - Follow the project's existing conventions 23 | - Maintain consistent indentation and formatting 24 | - Use clear, descriptive names 25 | - Add appropriate comments for complex logic 26 | 27 | ## Security Considerations 28 | - Never expose sensitive information 29 | - Validate inputs appropriately 30 | - Handle errors securely 31 | - Follow security best practices for the language/framework 32 | 33 | ## Performance 34 | - Consider efficiency in your suggestions 35 | - Highlight potential performance impacts 36 | - Suggest optimizations when relevant 37 | 38 | Remember to maintain a balance between ideal solutions and practical constraints within the existing codebase. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | .pnp/ 4 | .pnp.js 5 | 6 | # Build output 7 | build/ 8 | dist/ 9 | out/ 10 | *.tsbuildinfo 11 | 12 | # Environment variables 13 | .env 14 | .env.local 15 | .env.*.local 16 | 17 | # IDE and editor files 18 | .idea/ 19 | .vscode/ 20 | *.swp 21 | *.swo 22 | *~ 23 | 24 | # Logs 25 | logs/ 26 | *.log 27 | npm-debug.log* 28 | yarn-debug.log* 29 | yarn-error.log* 30 | 31 | # Testing 32 | coverage/ 33 | .nyc_output/ 34 | 35 | # Operating System 36 | .DS_Store 37 | Thumbs.db 38 | 39 | # Temporary files 40 | *.tmp 41 | *.temp 42 | .cache/ 43 | 44 | # Debug files 45 | .debug/ 46 | *.debug 47 | 48 | # TypeScript source maps 49 | *.map 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional REPL history 58 | .node_repl_history 59 | 60 | # Output of 'npm pack' 61 | *.tgz 62 | 63 | # Yarn Integrity file 64 | .yarn-integrity 65 | 66 | # dotenv environment variable files 67 | .env 68 | .env.test 69 | .env.production 70 | 71 | # parcel-bundler cache (https://parceljs.org/) 72 | .cache 73 | .parcel-cache 74 | 75 | # Next.js build output 76 | .next 77 | out 78 | 79 | # Nuxt.js build / generate output 80 | .nuxt 81 | dist 82 | 83 | # Gatsby files 84 | .cache/ 85 | # Comment in the public line in if your project uses Gatsby and not Next.js 86 | # public 87 | 88 | # vuepress build output 89 | .vuepress/dist 90 | 91 | # Serverless directories 92 | .serverless/ 93 | 94 | # FuseBox cache 95 | .fusebox/ 96 | 97 | # DynamoDB Local files 98 | .dynamodb/ 99 | 100 | # TernJS port file 101 | .tern-port 102 | 103 | # Stores VSCode versions used for testing VSCode extensions 104 | .vscode-test 105 | 106 | # yarn v2 107 | .yarn/cache 108 | .yarn/unplugged 109 | .yarn/build-state.yml 110 | .yarn/install-state.gz 111 | .pnp.* 112 | 113 | # Local backup files 114 | *.bak 115 | 116 | rules/ -------------------------------------------------------------------------------- /src/types/index.ts: -------------------------------------------------------------------------------- 1 | export interface FileMetadata { 2 | size: number; 3 | mimeType: string; 4 | modifiedTime: string; 5 | createdTime: string; 6 | isDirectory: boolean; 7 | } 8 | 9 | export interface FileContent { 10 | content: string; 11 | metadata: FileMetadata; 12 | encoding: string; 13 | truncated: boolean; 14 | totalLines: number; 15 | } 16 | 17 | export interface DirectoryContent { 18 | files: { [path: string]: FileContent }; 19 | metadata: { 20 | totalFiles: number; 21 | totalSize: number; 22 | truncated: boolean; 23 | searchPath: string; 24 | fileTypes?: string[]; 25 | timestamp: string; 26 | }; 27 | } 28 | 29 | export interface FileEntry { 30 | path: string; 31 | name: string; 32 | metadata: FileMetadata; 33 | } 34 | 35 | export interface SearchResult { 36 | matches: Array<{ 37 | path: string; 38 | line: number; 39 | content: string; 40 | context: { 41 | before: string[]; 42 | after: string[]; 43 | }; 44 | }>; 45 | totalMatches: number; 46 | } 47 | 48 | export interface SearchOptions { 49 | recursive?: boolean; 50 | includeHidden?: boolean; 51 | contextLines?: number; 52 | fileTypes?: string[]; 53 | excludePatterns?: string[]; 54 | } 55 | 56 | export enum FileErrorCode { 57 | FILE_NOT_FOUND = 'FILE_NOT_FOUND', 58 | INVALID_PATH = 'INVALID_PATH', 59 | FILE_TOO_LARGE = 'FILE_TOO_LARGE', 60 | PERMISSION_DENIED = 'PERMISSION_DENIED', 61 | UNKNOWN_ERROR = 'UNKNOWN_ERROR' 62 | } 63 | 64 | export class FileOperationError extends Error { 65 | constructor( 66 | public code: FileErrorCode, 67 | message: string, 68 | public path: string 69 | ) { 70 | super(message); 71 | this.name = 'FileOperationError'; 72 | } 73 | } 74 | 75 | export interface TaskResult { 76 | success: boolean; 77 | data?: T; 78 | error?: Error; 79 | duration: number; 80 | } 81 | 82 | export interface ProcessFileResult { 83 | lines: number; 84 | size: number; 85 | truncated: boolean; 86 | } 87 | 88 | export interface FileProcessingResult { 89 | content: FileContent; 90 | error?: Error; 91 | } -------------------------------------------------------------------------------- /.claude/commands/generate-prp.md: -------------------------------------------------------------------------------- 1 | # Create PRP 2 | 3 | ## Feature file: $ARGUMENTS 4 | 5 | Generate a complete PRP for general feature implementation with thorough research. Ensure context is passed to the AI agent to enable self-validation and iterative refinement. Read the feature file first to understand what needs to be created, how the examples provided help, and any other considerations. 6 | 7 | The AI agent only gets the context you are appending to the PRP and training data. Assuma the AI agent has access to the codebase and the same knowledge cutoff as you, so its important that your research findings are included or referenced in the PRP. The Agent has Websearch capabilities, so pass urls to documentation and examples. 8 | 9 | ## Research Process 10 | 11 | 1. **Codebase Analysis** 12 | - Search for similar features/patterns in the codebase 13 | - Identify files to reference in PRP 14 | - Note existing conventions to follow 15 | - Check test patterns for validation approach 16 | 17 | 2. **External Research** 18 | - Search for similar features/patterns online 19 | - Library documentation (include specific URLs) 20 | - Implementation examples (GitHub/StackOverflow/blogs) 21 | - Best practices and common pitfalls 22 | 23 | 3. **User Clarification** (if needed) 24 | - Specific patterns to mirror and where to find them? 25 | - Integration requirements and where to find them? 26 | 27 | ## PRP Generation 28 | 29 | Using PRPs/templates/prp_base.md as template: 30 | 31 | ### Critical Context to Include and pass to the AI agent as part of the PRP 32 | - **Documentation**: URLs with specific sections 33 | - **Code Examples**: Real snippets from codebase 34 | - **Gotchas**: Library quirks, version issues 35 | - **Patterns**: Existing approaches to follow 36 | 37 | ### Implementation Blueprint 38 | - Start with pseudocode showing approach 39 | - Reference real files for patterns 40 | - Include error handling strategy 41 | - list tasks to be completed to fullfill the PRP in the order they should be completed 42 | 43 | ### Validation Gates (Must be Executable) eg for python 44 | ```bash 45 | # Syntax/Style 46 | ruff check --fix && mypy . 47 | 48 | # Unit Tests 49 | uv run pytest tests/ -v 50 | 51 | ``` 52 | 53 | *** CRITICAL AFTER YOU ARE DONE RESEARCHING AND EXPLORING THE CODEBASE BEFORE YOU START WRITING THE PRP *** 54 | 55 | *** ULTRATHINK ABOUT THE PRP AND PLAN YOUR APPROACH THEN START WRITING THE PRP *** 56 | 57 | ## Output 58 | Save as: `PRPs/{feature-name}.md` 59 | 60 | ## Quality Checklist 61 | - [ ] All necessary context included 62 | - [ ] Validation gates are executable by AI 63 | - [ ] References existing patterns 64 | - [ ] Clear implementation path 65 | - [ ] Error handling documented 66 | 67 | Score the PRP on a scale of 1-10 (confidence level to succeed in one-pass implementation using claude codes) 68 | 69 | Remember: The goal is one-pass implementation success through comprehensive context. -------------------------------------------------------------------------------- /.llm-context/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "templates": { 3 | "context": "context.j2", 4 | "files": "files.j2", 5 | "highlights": "highlights.j2" 6 | }, 7 | "profiles": { 8 | "code": { 9 | "gitignores": { 10 | "full_files": [ 11 | ".git/", 12 | "node_modules/", 13 | "dist/", 14 | "build/", 15 | ".env", 16 | ".env.*", 17 | "*.min.*", 18 | "*.bundle.*" 19 | ], 20 | "outline_files": [ 21 | ".git/", 22 | "node_modules/", 23 | "dist/", 24 | "build/", 25 | ".env", 26 | ".env.*", 27 | "*.min.*", 28 | "*.bundle.*" 29 | ] 30 | }, 31 | "only_includes": { 32 | "full_files": [ 33 | "**/*" 34 | ], 35 | "outline_files": [ 36 | "**/*" 37 | ] 38 | }, 39 | "settings": { 40 | "no_media": true, 41 | "with_user_notes": false 42 | } 43 | }, 44 | "code-prompt": { 45 | "gitignores": { 46 | "full_files": [ 47 | ".git/", 48 | "node_modules/", 49 | "dist/", 50 | "build/", 51 | ".env", 52 | ".env.*", 53 | "*.min.*", 54 | "*.bundle.*" 55 | ], 56 | "outline_files": [ 57 | ".git/", 58 | "node_modules/", 59 | "dist/", 60 | "build/", 61 | ".env", 62 | ".env.*", 63 | "*.min.*", 64 | "*.bundle.*" 65 | ] 66 | }, 67 | "only_includes": { 68 | "full_files": [ 69 | "**/*" 70 | ], 71 | "outline_files": [ 72 | "**/*" 73 | ] 74 | }, 75 | "settings": { 76 | "no_media": true, 77 | "with_user_notes": false 78 | }, 79 | "base": "code", 80 | "prompt": "prompt.md" 81 | }, 82 | "code-file": { 83 | "gitignores": { 84 | "full_files": [ 85 | ".git/", 86 | "node_modules/", 87 | "dist/", 88 | "build/", 89 | ".env", 90 | ".env.*", 91 | "*.min.*", 92 | "*.bundle.*" 93 | ], 94 | "outline_files": [ 95 | ".git/", 96 | "node_modules/", 97 | "dist/", 98 | "build/", 99 | ".env", 100 | ".env.*", 101 | "*.min.*", 102 | "*.bundle.*" 103 | ] 104 | }, 105 | "only_includes": { 106 | "full_files": [ 107 | "**/*" 108 | ], 109 | "outline_files": [ 110 | "**/*" 111 | ] 112 | }, 113 | "settings": { 114 | "no_media": true, 115 | "with_user_notes": false, 116 | "context_file": "project-context.md.tmp" 117 | }, 118 | "base": "code" 119 | } 120 | } 121 | } -------------------------------------------------------------------------------- /docs/mcp-logging-research-findings.md: -------------------------------------------------------------------------------- 1 | # MCP Logging Research Findings 2 | 3 | ## Summary of Research on @modelcontextprotocol/sdk@latest Logging API 4 | 5 | This document summarizes the research conducted on implementing MCP SDK logging in the file-context-server. 6 | 7 | ### Key Findings 8 | 9 | #### 1. MCP SDK Version 10 | - Current project uses: `@modelcontextprotocol/sdk@1.15.0` (latest) 11 | - SDK includes full logging API support with client-server communication 12 | 13 | #### 2. Logging API Structure 14 | ```typescript 15 | // Server method available 16 | server.sendLoggingMessage({ 17 | level: LoggingLevel, // 8 levels: debug, info, notice, warning, error, critical, alert, emergency 18 | logger?: string, // Optional logger name 19 | data: unknown // Any JSON serializable content 20 | }); 21 | 22 | // Client control via logging/setLevel requests 23 | // Real-time streaming to MCP clients 24 | ``` 25 | 26 | #### 3. Current State Analysis 27 | - **43+ console.error statements** across 5 files 28 | - **No structured logging** or client visibility 29 | - **stderr output only** - invisible to MCP clients 30 | - **No log level control** or filtering 31 | 32 | #### 4. Implementation Architecture Designed 33 | - **LoggingService class** with level filtering and context enrichment 34 | - **Server integration** for client logging/setLevel requests 35 | - **Service injection pattern** for all existing services 36 | - **Performance timing** and structured error context 37 | - **Graceful fallback** to console logging for development 38 | 39 | ### Documentation Created 40 | 41 | 1. **[MCP Logging Implementation Brief](./mcp-logging-implementation-brief.md)** (11.3KB) 42 | - Complete project brief with timeline, risks, and success criteria 43 | - Detailed phase-by-phase implementation plan 44 | - Configuration design and testing strategy 45 | 46 | 2. **[MCP Logging Implementation Summary](./mcp-logging-implementation-summary.md)** (6.6KB) 47 | - Quick reference guide for developers 48 | - File-by-file migration plan with priorities 49 | - Key implementation points and validation steps 50 | 51 | 3. **[LoggingService Technical Specification](./logging-service-technical-spec.md)** (11.3KB) 52 | - Complete TypeScript interface definitions 53 | - Implementation details with code examples 54 | - Integration patterns and error handling strategy 55 | 56 | ### Key Implementation Points 57 | 58 | #### Migration Strategy 59 | - **Gradual replacement** of 43+ console.error calls 60 | - **Enhanced context** with operation details, file paths, timing 61 | - **Level filtering** based on client preferences 62 | - **Backwards compatibility** maintained throughout 63 | 64 | #### Technical Benefits 65 | - **Real-time visibility** for MCP clients 66 | - **Structured debugging** with rich context 67 | - **Performance monitoring** with operation timing 68 | - **Client-controlled verbosity** via log levels 69 | 70 | #### Files Requiring Migration 71 | | File | Console Calls | Priority | Context | 72 | |------|---------------|----------|---------| 73 | | `src/index.ts` | 17 | High | Tool operations, file paths | 74 | | `src/services/ProfileService.ts` | 13 | Medium | Profile management | 75 | | `src/services/FileWatcherService.ts` | 9 | Medium | File system events | 76 | | `src/services/CodeAnalysisService.ts` | 2 | Low | Analysis operations | 77 | | `src/services/TemplateService.ts` | 2 | Low | Template rendering | 78 | 79 | ### Next Steps 80 | 81 | The research is complete and ready for implementation. The project brief provides: 82 | 83 | 1. ✅ **Complete understanding** of MCP SDK logging capabilities 84 | 2. ✅ **Detailed implementation plan** with timelines and priorities 85 | 3. ✅ **Technical specifications** for all required components 86 | 4. ✅ **Migration strategy** for 43+ existing log statements 87 | 5. ✅ **Testing approach** and success criteria 88 | 6. ✅ **Risk assessment** and mitigation strategies 89 | 90 | ### Implementation Timeline 91 | - **Week 1**: LoggingService foundation and testing 92 | - **Week 2**: Server integration and client communication 93 | - **Week 3-4**: Service migration and context enhancement 94 | - **Final**: Documentation, testing, and performance validation 95 | 96 | This research provides everything needed to implement professional, structured logging that enhances debugging capabilities while maintaining full backwards compatibility. -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md 2 | 3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. 4 | 5 | ## Development Commands 6 | 7 | ### Build and Run 8 | - `npm run build` - Compile TypeScript to JavaScript in `/dist` 9 | - `npm run dev` - Build and run the server in development mode 10 | - `npm start` - Start the compiled server from `/dist` 11 | - `npm test` - Run Jest tests 12 | 13 | ### Environment Configuration 14 | - `MAX_CACHE_SIZE` - Maximum cached entries (default: 1000) 15 | - `CACHE_TTL` - Cache time-to-live in milliseconds (default: 1 hour) 16 | - `MAX_FILE_SIZE` - Maximum file size in bytes for reading 17 | 18 | ## Architecture Overview 19 | 20 | This is a Model Context Protocol (MCP) server that provides file system context and analysis to LLMs. The server is built using the MCP SDK and provides tools for reading, searching, and analyzing code files. 21 | 22 | ### Core Components 23 | 24 | **Main Server Class (`src/index.ts`):** 25 | - `FileContextServer` - Main server implementation handling MCP protocol 26 | - Implements streaming, compression, and chunking for large files 27 | - Provides file watching with automatic cache invalidation 28 | - Handles cross-platform path compatibility (Windows/macOS/Linux) 29 | 30 | **Services (`src/services/`):** 31 | - `CodeAnalysisService` - Analyzes code complexity, dependencies, and quality metrics 32 | - `FileWatcherService` - Real-time file system monitoring with chokidar 33 | - `ProfileService` - Context generation profiles for different use cases 34 | - `TemplateService` - Template rendering for prompts and context 35 | 36 | **Type Definitions (`src/types.ts`):** 37 | - Complete type definitions for file operations, caching, and analysis 38 | - Error handling with custom `FileOperationError` class 39 | - Profile and context specification interfaces 40 | 41 | ### Key Features 42 | 43 | **File Operations:** 44 | - Cross-platform path handling using POSIX-style paths for globs 45 | - Streaming support for large files with configurable chunk sizes 46 | - Automatic encoding detection (UTF-8, UTF-16LE with BOM support) 47 | - LRU caching with TTL for performance optimization 48 | 49 | **Code Analysis:** 50 | - Cyclomatic complexity calculation 51 | - Dependency extraction and analysis 52 | - Code quality metrics (duplicate lines, long lines, complex functions) 53 | - Language-specific analysis for TypeScript, JavaScript, Python 54 | 55 | **Search and Filtering:** 56 | - Regex pattern matching with context lines 57 | - File type filtering by extension 58 | - Recursive directory traversal 59 | - Automatic exclusion of common artifact directories 60 | 61 | **Profile System:** 62 | - Multiple context generation profiles (code, code-prompt, code-file) 63 | - Configurable file selection and exclusion rules 64 | - Template-based prompt generation 65 | 66 | ### Tool Interface 67 | 68 | The server exposes these MCP tools: 69 | - `list_context_files` - List files with metadata 70 | - `read_context` - Read file contents with chunking support 71 | - `search_context` - Search for patterns in files 72 | - `get_chunk_count` - Calculate chunks before reading large content 73 | - `set_profile` - Switch context generation profiles 74 | - `get_profile_context` - Generate structured repository context 75 | - `generate_outline` - Create code structure outlines 76 | 77 | ### Path Handling 78 | 79 | **Critical for cross-platform compatibility:** 80 | - Use `path.posix.join()` for glob patterns 81 | - Use `path.normalize()` for file system operations 82 | - All internal paths are resolved to absolute paths 83 | - Glob patterns use forward slashes regardless of OS 84 | 85 | ### Caching Strategy 86 | 87 | **LRU Cache Implementation:** 88 | - File content caching with modification time validation 89 | - Automatic cache invalidation on file changes 90 | - Configurable cache size and TTL 91 | - Cache statistics tracking for performance monitoring 92 | 93 | ### Error Handling 94 | 95 | **Structured Error Management:** 96 | - Custom `FileOperationError` class with error codes 97 | - Specific error codes: `FILE_NOT_FOUND`, `PERMISSION_DENIED`, `INVALID_PATH`, `FILE_TOO_LARGE` 98 | - Graceful fallback for inaccessible files 99 | - Detailed error context for debugging 100 | 101 | ### Testing 102 | 103 | Run tests with `npm test` using Jest framework. The project uses TypeScript with strict type checking enabled. -------------------------------------------------------------------------------- /STREAMING.md: -------------------------------------------------------------------------------- 1 | ## FEATURE: 2 | 3 | Implementation of streamableHTTP API endpoints for the Model Context Protocol (MCP) using the TypeScript SDK (@modelcontextprotocol/sdk). This feature enables MCP servers to support remote HTTP communication through a modern "Streamable HTTP" transport that unifies client-to-server messaging and server-to-client streaming into a single endpoint architecture. This replaces the previous HTTP+SSE dual-endpoint approach while maintaining backward compatibility for existing SSE clients. 4 | 5 | ## EXAMPLES: 6 | 7 | [Provide and explain examples that you have in the `examples/` folder] 8 | 9 | - https://modelcontextprotocol.io/docs/concepts/transports 10 | 11 | ## DOCUMENTATION: 12 | 13 | [List out any documentation (web pages, sources for an MCP server like Crawl4AI RAG, etc.) that will need to be referenced during development] 14 | 15 | ### Core MCP Resources: 16 | @./docs/mcp-typescript-readme.md 17 | @./docs/technical-design.md 18 | @./docs/llms-full.txt 19 | 20 | ### Official Specification: 21 | - https://spec.modelcontextprotocol.io/specification/ 22 | - https://github.com/modelcontextprotocol/specification/pull/206 (StreamableHTTP RFC) 23 | 24 | ### TypeScript SDK Documentation: 25 | - https://www.npmjs.com/package/@modelcontextprotocol/sdk 26 | - https://github.com/modelcontextprotocol/typescript-sdk 27 | 28 | ### HTTP Transport Guides: 29 | - Building Your First Model Context Protocol Server: https://thenewstack.io/building-your-first-model-context-protocol-server/ 30 | - Understanding MCP Recent Change Around HTTP+SSE: https://blog.christianposta.com/ai/understanding-mcp-recent-change-around-http-sse/ 31 | 32 | ### Real-time Communication References: 33 | - Server-Sent Events (SSE) specification: https://html.spec.whatwg.org/multipage/server-sent-events.html 34 | - Express.js SSE middleware patterns 35 | - HTTP streaming and content negotiation standards 36 | 37 | ## OTHER CONSIDERATIONS: 38 | 39 | [Any other considerations or specific requirements - great place to include gotchas that you see AI coding assistants miss with your projects a lot] 40 | 41 | ### Protocol Evolution & Compatibility: 42 | - **Backward Compatibility Critical**: Must support existing SSE clients during transition period 43 | - **Session Management**: Server-generated session IDs for stateful connections vs stateless operation modes 44 | - **Content Negotiation**: Proper handling of Accept headers to determine JSON vs SSE response format 45 | - **Error Handling**: Graceful degradation when clients don't support streamable responses 46 | 47 | ### Express.js Integration Gotchas: 48 | - **CORS Configuration**: Must expose 'mcp-session-id' header for cross-origin requests 49 | - **Body Parser Limits**: Ensure JSON payload size limits accommodate large tool responses 50 | - **Streaming Response Handling**: Proper cleanup of SSE connections on client disconnect 51 | - **Load Balancer Compatibility**: Session sticky routing or Redis-based session storage for horizontally scaled deployments 52 | 53 | ### Security & Infrastructure: 54 | - **DNS Rebinding Protection**: Enable allowedHosts/allowedOrigins validation for production 55 | - **Authentication Integration**: Bearer token support in Authorization headers for each request 56 | - **Rate Limiting**: Apply appropriate throttling for streaming endpoints to prevent abuse 57 | - **Proxy Support**: Ensure compatibility with reverse proxies and CDN edge deployments 58 | 59 | ### Performance Optimizations: 60 | - **Connection Pooling**: Manage SSE connection lifecycle efficiently 61 | - **Buffering Strategy**: Balance between real-time responsiveness and resource utilization 62 | - **Memory Management**: Prevent memory leaks from long-lived streaming connections 63 | - **Compression Support**: gzip/deflate for JSON responses, streaming-friendly compression for SSE 64 | 65 | ### Development & Testing: 66 | - **Local Development**: Use tools like MCP Inspector for debugging streamable connections 67 | - **Integration Testing**: Verify behavior with both stateful and stateless server configurations 68 | - **Browser Compatibility**: Test EventSource API behavior across different browsers 69 | - **Network Resilience**: Handle reconnection scenarios and connection state management 70 | 71 | ### Migration Strategy: 72 | - **Dual Transport Support**: Run both old HTTP+SSE and new StreamableHTTP during transition 73 | - **Client Detection**: Implement feature detection to route to appropriate transport 74 | - **Monitoring**: Track usage patterns to determine when legacy SSE support can be deprecated 75 | - **Documentation**: Provide clear migration guide for existing MCP server implementations 76 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | export interface FileMetadata { 2 | size: number; 3 | mimeType: string; 4 | modifiedTime: string; 5 | createdTime: string; 6 | isDirectory: boolean; 7 | analysis?: CodeAnalysis; 8 | lastAnalyzed?: string; 9 | } 10 | 11 | export interface FileContent { 12 | content: string; 13 | metadata: FileMetadata; 14 | encoding: string; 15 | truncated: boolean; 16 | totalLines: number; 17 | path: string; 18 | } 19 | 20 | export interface CodeAnalysis { 21 | definitions?: string[]; 22 | imports?: string[]; 23 | complexity?: number; 24 | } 25 | 26 | export interface CacheEntry { 27 | content: FileContent; 28 | lastModified: number; 29 | lastAccessed: Date; 30 | } 31 | 32 | export interface EnhancedCacheEntry extends CacheEntry { 33 | size: number; 34 | hits: number; 35 | } 36 | 37 | export interface FileEntry { 38 | path: string; 39 | name: string; 40 | metadata: FileMetadata; 41 | } 42 | 43 | export enum FileErrorCode { 44 | INVALID_PATH = 'INVALID_PATH', 45 | FILE_NOT_FOUND = 'FILE_NOT_FOUND', 46 | FILE_TOO_LARGE = 'FILE_TOO_LARGE', 47 | UNKNOWN_ERROR = 'UNKNOWN_ERROR', 48 | INVALID_CHUNK = 'INVALID_CHUNK' 49 | } 50 | 51 | export class FileOperationError extends Error { 52 | constructor( 53 | public code: FileErrorCode, 54 | message: string, 55 | public path: string 56 | ) { 57 | super(message); 58 | this.name = 'FileOperationError'; 59 | } 60 | } 61 | 62 | export interface SearchOptions { 63 | recursive?: boolean; 64 | includeHidden?: boolean; 65 | fileTypes?: string[]; 66 | } 67 | 68 | export interface SearchResult { 69 | matches: Array<{ 70 | path: string; 71 | line: number; 72 | content: string; 73 | context: { 74 | before: string[]; 75 | after: string[]; 76 | }; 77 | }>; 78 | } 79 | 80 | export interface DirectoryContent { 81 | files: Record; 82 | totalSize: number; 83 | totalFiles: number; 84 | } 85 | 86 | export interface EnhancedSearchOptions extends SearchOptions { 87 | maxResults?: number; 88 | contextLines?: number; 89 | ignoreCase?: boolean; 90 | } 91 | 92 | export interface ChunkMetadata { 93 | id: string; 94 | path: string; 95 | startOffset: number; 96 | endOffset: number; 97 | type: 'code' | 'text' | 'markdown'; 98 | relevanceScore: number; 99 | semanticContext?: string; 100 | } 101 | 102 | export interface CompressedChunk { 103 | id: string; 104 | compressedData: Buffer; 105 | originalSize: number; 106 | compressionRatio: number; 107 | } 108 | 109 | export interface ContentChunk { 110 | metadata: ChunkMetadata; 111 | content: string | CompressedChunk; 112 | lastAccessed: number; 113 | accessCount: number; 114 | } 115 | 116 | export interface ChunkingStrategy { 117 | maxChunkSize: number; 118 | minChunkSize: number; 119 | preferredBoundaries: RegExp[]; 120 | compressionThreshold: number; 121 | } 122 | 123 | export interface MemoryPressureEvent { 124 | timestamp: number; 125 | currentUsage: number; 126 | threshold: number; 127 | availableMemory: number; 128 | } 129 | 130 | export interface CacheStats { 131 | totalSize: number; 132 | chunkCount: number; 133 | compressionRatio: number; 134 | hitRate: number; 135 | evictionCount: number; 136 | } 137 | 138 | export interface Profile { 139 | name: string; 140 | description?: string; 141 | settings: { 142 | no_media: boolean; 143 | with_user_notes: boolean; 144 | context_file?: string; 145 | }; 146 | gitignores: { 147 | full_files: string[]; 148 | outline_files: string[]; 149 | }; 150 | only_includes: { 151 | full_files: string[]; 152 | outline_files: string[]; 153 | }; 154 | prompt?: string; 155 | } 156 | 157 | export interface ProfileState { 158 | profile_name: string; 159 | full_files: string[]; 160 | outline_files: string[]; 161 | excluded_files: string[]; 162 | timestamp: number; 163 | } 164 | 165 | export interface ProfileConfig { 166 | profiles: Record; 167 | default_profile: string; 168 | } 169 | 170 | export interface ContextSpec { 171 | profile: Profile; 172 | state: ProfileState; 173 | } 174 | 175 | export interface FileOutline { 176 | path: string; 177 | outline: string; 178 | metadata: FileMetadata; 179 | } 180 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # File Context Server 2 | [![smithery badge](https://smithery.ai/badge/@bsmi021/mcp-file-context-server)](https://smithery.ai/server/@bsmi021/mcp-file-context-server) 3 | 4 | A Model Context Protocol (MCP) server that provides file system context to Large Language Models (LLMs). This server enables LLMs to read, search, and analyze code files with advanced caching and real-time file watching capabilities. 5 | 6 | ## Features 7 | 8 | - **File Operations** 9 | - Read file and directory contents 10 | - List files with detailed metadata 11 | - Real-time file watching and cache invalidation 12 | - Support for multiple file encodings 13 | - Recursive directory traversal 14 | - File type filtering 15 | 16 | - **Code Analysis** 17 | - Cyclomatic complexity calculation 18 | - Dependency extraction 19 | - Comment analysis 20 | - Quality metrics: 21 | - Duplicate lines detection 22 | - Long lines detection (>100 characters) 23 | - Complex function identification 24 | - Line counts (total, non-empty, comments) 25 | 26 | - **Smart Caching** 27 | - LRU (Least Recently Used) caching strategy 28 | - Automatic cache invalidation on file changes 29 | - Size-aware caching with configurable limits 30 | - Cache statistics and performance metrics 31 | - Last read result caching for efficient searches 32 | 33 | - **Advanced Search** 34 | - Regex pattern matching 35 | - Context-aware results with configurable surrounding lines 36 | - File type filtering 37 | - Multi-pattern search support 38 | - Cached result searching 39 | - Exclusion patterns 40 | 41 | ## Installation 42 | 43 | ### Installing via Smithery 44 | 45 | To install File Context Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@bsmi021/mcp-file-context-server): 46 | 47 | ```bash 48 | npx -y @smithery/cli install @bsmi021/mcp-file-context-server --client claude 49 | ``` 50 | 51 | ### Manual Installation 52 | ```bash 53 | npm install @modelcontextprotocol/file-context-server 54 | ``` 55 | 56 | ## Usage 57 | 58 | ### Starting the Server 59 | 60 | ```bash 61 | npx file-context-server 62 | ``` 63 | 64 | ### Available Tools 65 | 66 | 1. **list_context_files** 67 | - Lists files in a directory with detailed metadata 68 | 69 | ```json 70 | { 71 | "path": "./src", 72 | "recursive": true, 73 | "includeHidden": false 74 | } 75 | ``` 76 | 77 | 2. **read_context** 78 | - Reads file or directory contents with metadata 79 | 80 | ```json 81 | { 82 | "path": "./src/index.ts", 83 | "encoding": "utf8", 84 | "maxSize": 1000000, 85 | "recursive": true, 86 | "fileTypes": ["ts", "js"] 87 | } 88 | ``` 89 | 90 | 3. **search_context** 91 | - Searches for patterns in files with context 92 | 93 | ```json 94 | { 95 | "pattern": "function.*", 96 | "path": "./src", 97 | "options": { 98 | "recursive": true, 99 | "contextLines": 2, 100 | "fileTypes": ["ts"] 101 | } 102 | } 103 | ``` 104 | 105 | 4. **analyze_code** 106 | - Analyzes code files for quality metrics 107 | 108 | ```json 109 | { 110 | "path": "./src", 111 | "recursive": true, 112 | "metrics": ["complexity", "dependencies", "quality"] 113 | } 114 | ``` 115 | 116 | 5. **cache_stats** 117 | - Gets cache statistics and performance metrics 118 | 119 | ```json 120 | { 121 | "detailed": true 122 | } 123 | ``` 124 | 125 | ## Error Handling 126 | 127 | The server provides detailed error messages with specific error codes: 128 | 129 | - `FILE_NOT_FOUND`: File or directory does not exist 130 | - `PERMISSION_DENIED`: Access permission issues 131 | - `INVALID_PATH`: Invalid file path format 132 | - `FILE_TOO_LARGE`: File exceeds size limit 133 | - `ENCODING_ERROR`: File encoding issues 134 | - `UNKNOWN_ERROR`: Unexpected errors 135 | 136 | ## Configuration 137 | 138 | Environment variables for customization: 139 | 140 | - `MAX_CACHE_SIZE`: Maximum number of cached entries (default: 1000) 141 | - `CACHE_TTL`: Cache time-to-live in milliseconds (default: 1 hour) 142 | - `MAX_FILE_SIZE`: Maximum file size in bytes for reading 143 | 144 | ## Development 145 | 146 | ```bash 147 | # Install dependencies 148 | npm install 149 | 150 | # Build 151 | npm run build 152 | 153 | # Run tests 154 | npm test 155 | 156 | # Start in development mode 157 | npm run dev 158 | ``` 159 | 160 | ## License 161 | 162 | MIT 163 | 164 | ## Contributing 165 | 166 | Contributions are welcome! Please read our [Contributing Guide](CONTRIBUTING.md) for details on our code of conduct and the process for submitting pull requests. 167 | 168 | ## Cross-Platform Path Compatibility 169 | 170 | **Note:** As of April 2025, all file and directory path handling in File Context Server has been updated for improved cross-platform compatibility (Windows, macOS, Linux): 171 | 172 | - All glob patterns use POSIX-style paths (forward slashes) internally, ensuring consistent file matching regardless of OS. 173 | - All file system operations (reading, writing, stat, etc.) use normalized absolute paths for reliability. 174 | - If you are developing or extending the server, use `path.posix.join` for glob patterns and `path.normalize` for file system access. 175 | - This change prevents issues with path separators and file matching on different operating systems. 176 | 177 | No changes are required for end users, but developers should follow these conventions when contributing to the project. 178 | -------------------------------------------------------------------------------- /src/services/FileWatcherService.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { watch } from 'chokidar'; 3 | import * as path from 'path'; 4 | import { LoggingService } from './LoggingService.js'; 5 | 6 | /** 7 | * Service for monitoring file system changes 8 | */ 9 | export class FileWatcherService extends EventEmitter { 10 | private watchers: Map = new Map(); 11 | private logger?: LoggingService; 12 | 13 | constructor(logger?: LoggingService) { 14 | super(); 15 | this.logger = logger; 16 | } 17 | 18 | /** 19 | * Start watching a path for changes 20 | */ 21 | public async watch(targetPath: string): Promise { 22 | if (this.watchers.has(targetPath)) { 23 | return; 24 | } 25 | 26 | if (!this.watchers.has(targetPath)) { 27 | this.watchers.set(targetPath, watch(targetPath, { 28 | persistent: true, 29 | ignoreInitial: true, 30 | usePolling: true, 31 | interval: 100, 32 | binaryInterval: 300, 33 | awaitWriteFinish: { 34 | stabilityThreshold: 2000, 35 | pollInterval: 100 36 | } 37 | })); 38 | 39 | this.setupWatcherEvents(targetPath); 40 | } else { 41 | this.watchers.get(targetPath).add(targetPath); 42 | } 43 | 44 | await this.logger?.info('Started watching path', { 45 | targetPath, 46 | operation: 'file_watch_start' 47 | }); 48 | } 49 | 50 | /** 51 | * Stop watching a path 52 | */ 53 | public async unwatch(targetPath: string): Promise { 54 | if (this.watchers.has(targetPath)) { 55 | await this.watchers.get(targetPath).unwatch(targetPath); 56 | this.watchers.delete(targetPath); 57 | await this.logger?.info('Stopped watching path', { 58 | targetPath, 59 | operation: 'file_watch_stop' 60 | }); 61 | } 62 | } 63 | 64 | /** 65 | * Close all watchers 66 | */ 67 | public async close(): Promise { 68 | if (this.watchers.size > 0) { 69 | for (const watcher of this.watchers.values()) { 70 | await watcher.close(); 71 | } 72 | this.watchers.clear(); 73 | await this.logger?.info('File watcher closed', { 74 | operation: 'file_watch_close' 75 | }); 76 | } 77 | } 78 | 79 | /** 80 | * Get list of watched paths 81 | */ 82 | public getWatchedPaths(): string[] { 83 | return Array.from(this.watchers.keys()); 84 | } 85 | 86 | /** 87 | * Setup watcher event handlers 88 | */ 89 | private setupWatcherEvents(targetPath: string): void { 90 | if (!this.watchers.has(targetPath)) return; 91 | 92 | const watcher = this.watchers.get(targetPath); 93 | 94 | // File added 95 | watcher.on('add', (filePath: string) => { 96 | this.logger?.debug('File added', { 97 | filePath, 98 | targetPath, 99 | operation: 'file_watch_event' 100 | }); 101 | this.emit('fileAdded', filePath); 102 | }); 103 | 104 | // File changed 105 | watcher.on('change', (filePath: string) => { 106 | this.logger?.debug('File changed', { 107 | filePath, 108 | targetPath, 109 | operation: 'file_watch_event' 110 | }); 111 | this.emit('fileChanged', filePath); 112 | }); 113 | 114 | // File deleted 115 | watcher.on('unlink', (filePath: string) => { 116 | this.logger?.debug('File removed', { 117 | filePath, 118 | targetPath, 119 | operation: 'file_watch_event' 120 | }); 121 | this.emit('fileDeleted', filePath); 122 | }); 123 | 124 | // Directory added 125 | watcher.on('addDir', (dirPath: string) => { 126 | this.logger?.debug('Directory added', { 127 | dirPath, 128 | targetPath, 129 | operation: 'file_watch_event' 130 | }); 131 | this.emit('directoryAdded', dirPath); 132 | }); 133 | 134 | // Directory deleted 135 | watcher.on('unlinkDir', (dirPath: string) => { 136 | this.logger?.debug('Directory removed', { 137 | dirPath, 138 | targetPath, 139 | operation: 'file_watch_event' 140 | }); 141 | this.emit('directoryDeleted', dirPath); 142 | }); 143 | 144 | // Error handling 145 | watcher.on('error', (error: Error) => { 146 | this.logger?.error('Watcher error', error, { 147 | targetPath, 148 | operation: 'file_watch_event' 149 | }); 150 | this.emit('error', error); 151 | }); 152 | 153 | // Ready event 154 | watcher.on('ready', () => { 155 | this.logger?.info('Initial scan complete, ready for changes', { 156 | targetPath, 157 | operation: 'file_watch_ready' 158 | }); 159 | this.emit('ready'); 160 | }); 161 | } 162 | 163 | /** 164 | * Check if a path is being watched 165 | */ 166 | public isWatching(targetPath: string): boolean { 167 | return this.watchers.has(targetPath); 168 | } 169 | 170 | /** 171 | * Get watcher status 172 | */ 173 | public getStatus(): { isWatching: boolean; watchedPaths: string[]; ready: boolean } { 174 | return { 175 | isWatching: this.watchers.size > 0, 176 | watchedPaths: Array.from(this.watchers.keys()), 177 | ready: this.watchers.size > 0 && this.watchers.get(Array.from(this.watchers.keys())[0])?.getWatched !== undefined 178 | }; 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/services/TemplateService.ts: -------------------------------------------------------------------------------- 1 | import { promises as fs } from 'fs'; 2 | import * as path from 'path'; 3 | import Handlebars from 'handlebars'; 4 | import { LoggingService } from './LoggingService.js'; 5 | 6 | type CompiledTemplate = ReturnType; 7 | 8 | interface TemplateMap { 9 | context: string; 10 | files: string; 11 | highlights: string; 12 | prompt: string; 13 | } 14 | 15 | const DEFAULT_TEMPLATES: TemplateMap = { 16 | context: `{{#if prompt}} 17 | {{{prompt}}} 18 | {{/if}} 19 | {{#if project_notes}} 20 | {{{project_notes}}} 21 | {{/if}} 22 | {{#if user_notes}} 23 | {{{user_notes}}} 24 | {{/if}} 25 | # Repository Content: **{{project_name}}** 26 | 27 | > 🕒 Generation timestamp: {{timestamp}} 28 | > 📝 Use \`lc-list-modified-files\` to track changes since generation 29 | 30 | {{#if sample_requested_files}} 31 | ## 📂 File Access Guide 32 | 33 | Files in the repository structure are marked as: 34 | - ✓ Full content available 35 | - ○ Outline available 36 | - ✗ Excluded/not loaded 37 | 38 | To retrieve missing files, use the \`lc-get-files\` tool: 39 | \`\`\`json 40 | { 41 | "path": "{{project_root}}", 42 | "files": ["path/to/file"] 43 | } 44 | \`\`\` 45 | {{/if}} 46 | 47 | ## 📁 Repository Structure 48 | \`\`\` 49 | {{{folder_structure_diagram}}} 50 | \`\`\` 51 | 52 | {{#if files}} 53 | ## 📄 Current Files 54 | {{> files}} 55 | {{/if}} 56 | 57 | {{#if highlights}} 58 | ## 🔍 Code Outlines 59 | {{> highlights}} 60 | {{/if}} 61 | 62 | ## 🔄 Next Steps 63 | 1. Use \`lc-list-modified-files\` to check for changes 64 | 2. Request specific files with \`lc-get-files\` 65 | 3. Search code with \`search_context\``, 66 | 67 | files: `{{#each files}} 68 | ### 📄 {{path}} 69 | {{#if metadata.analysis}} 70 | > 📊 Complexity: {{metadata.analysis.complexity}} | 🔗 Dependencies: {{metadata.analysis.imports.length}} 71 | {{/if}} 72 | 73 | \`\`\`{{language}} 74 | {{{content}}} 75 | \`\`\` 76 | 77 | {{/each}}`, 78 | 79 | highlights: `{{#each highlights}} 80 | ### 🔍 {{path}} 81 | {{#if metadata.analysis}} 82 | > 📊 Complexity: {{metadata.analysis.complexity}} 83 | {{/if}} 84 | 85 | \`\`\` 86 | {{{outline}}} 87 | \`\`\` 88 | 89 | {{/each}}`, 90 | 91 | prompt: `# LLM Analysis Guide 92 | 93 | ## 🎯 Role 94 | Expert code analyst and developer focusing on understanding and improving the codebase. 95 | 96 | ## 📋 Guidelines 97 | 1. 🔍 Analyze context before suggesting changes 98 | 2. 🔗 Consider dependencies and side effects 99 | 3. 📝 Follow project's code style 100 | 4. ⚠️ Preserve existing functionality 101 | 5. 📚 Document significant changes 102 | 6. 🛡️ Handle errors gracefully 103 | 104 | ## 💡 Response Structure 105 | 1. Acknowledge files/code being analyzed 106 | 2. Explain current implementation 107 | 3. Present suggestions clearly 108 | 4. Highlight potential impacts 109 | 5. Provide rationale for decisions 110 | 111 | ## 🎨 Code Style 112 | - Match existing conventions 113 | - Use consistent formatting 114 | - Choose clear names 115 | - Add helpful comments 116 | 117 | ## 🔒 Security 118 | - Protect sensitive data 119 | - Validate inputs 120 | - Handle errors securely 121 | - Follow best practices 122 | 123 | ## ⚡ Performance 124 | - Consider efficiency 125 | - Note performance impacts 126 | - Suggest optimizations 127 | 128 | Remember to balance ideal solutions with practical constraints.` 129 | }; 130 | 131 | export class TemplateService { 132 | private templates: Map; 133 | private projectRoot: string; 134 | private templatesDir: string; 135 | private logger?: LoggingService; 136 | 137 | constructor(projectRoot: string, logger?: LoggingService) { 138 | this.logger = logger; 139 | this.projectRoot = projectRoot; 140 | this.templatesDir = path.join(projectRoot, '.llm-context', 'templates'); 141 | this.templates = new Map(); 142 | } 143 | 144 | public async initialize(): Promise { 145 | // Create templates directory if it doesn't exist 146 | await fs.mkdir(this.templatesDir, { recursive: true }); 147 | 148 | // Initialize default templates if they don't exist 149 | for (const [name, content] of Object.entries(DEFAULT_TEMPLATES)) { 150 | const templatePath = path.join(this.templatesDir, `${name}.hbs`); 151 | if (!await this.fileExists(templatePath)) { 152 | await fs.writeFile(templatePath, content); 153 | } 154 | } 155 | 156 | // Load all templates 157 | await this.loadTemplates(); 158 | } 159 | 160 | private async fileExists(filePath: string): Promise { 161 | try { 162 | await fs.access(filePath); 163 | return true; 164 | } catch { 165 | return false; 166 | } 167 | } 168 | 169 | private async loadTemplates(): Promise { 170 | // Register partials first 171 | const filesContent = await this.readTemplate('files'); 172 | const highlightsContent = await this.readTemplate('highlights'); 173 | Handlebars.registerPartial('files', filesContent); 174 | Handlebars.registerPartial('highlights', highlightsContent); 175 | 176 | // Compile and cache templates 177 | for (const name of Object.keys(DEFAULT_TEMPLATES)) { 178 | const content = await this.readTemplate(name as keyof TemplateMap); 179 | this.templates.set(name, Handlebars.compile(content)); 180 | } 181 | } 182 | 183 | private async readTemplate(name: keyof TemplateMap): Promise { 184 | const templatePath = path.join(this.templatesDir, `${name}.hbs`); 185 | try { 186 | return await fs.readFile(templatePath, 'utf8'); 187 | } catch (error) { 188 | await this.logger?.warning('Error reading template, using default', { 189 | templateName: name, 190 | templatePath, 191 | error: error instanceof Error ? error.message : String(error), 192 | operation: 'read_template' 193 | }); 194 | return DEFAULT_TEMPLATES[name]; 195 | } 196 | } 197 | 198 | public async render(templateName: string, context: any): Promise { 199 | const template = this.templates.get(templateName); 200 | if (!template) { 201 | throw new Error(`Template '${templateName}' not found`); 202 | } 203 | 204 | try { 205 | return template(context); 206 | } catch (error) { 207 | await this.logger?.error('Error rendering template', error as Error, { 208 | templateName, 209 | operation: 'render_template' 210 | }); 211 | throw error; 212 | } 213 | } 214 | 215 | public async getPrompt(): Promise { 216 | return this.render('prompt', {}); 217 | } 218 | } -------------------------------------------------------------------------------- /PRPs/templates/prp_base.md: -------------------------------------------------------------------------------- 1 | name: "Base PRP Template v2 - Context-Rich with Validation Loops" 2 | description: | 3 | 4 | ## Purpose 5 | Template optimized for AI agents to implement features with sufficient context and self-validation capabilities to achieve working code through iterative refinement. 6 | 7 | ## Core Principles 8 | 1. **Context is King**: Include ALL necessary documentation, examples, and caveats 9 | 2. **Validation Loops**: Provide executable tests/lints the AI can run and fix 10 | 3. **Information Dense**: Use keywords and patterns from the codebase 11 | 4. **Progressive Success**: Start simple, validate, then enhance 12 | 5. **Global rules**: Be sure to follow all rules in CLAUDE.md 13 | 14 | --- 15 | 16 | ## Goal 17 | [What needs to be built - be specific about the end state and desires] 18 | 19 | ## Why 20 | - [Business value and user impact] 21 | - [Integration with existing features] 22 | - [Problems this solves and for whom] 23 | 24 | ## What 25 | [User-visible behavior and technical requirements] 26 | 27 | ### Success Criteria 28 | - [ ] [Specific measurable outcomes] 29 | 30 | ## All Needed Context 31 | 32 | ### Documentation & References (list all context needed to implement the feature) 33 | ```yaml 34 | # MUST READ - Include these in your context window 35 | - url: [Official API docs URL] 36 | why: [Specific sections/methods you'll need] 37 | 38 | - file: [path/to/example.py] 39 | why: [Pattern to follow, gotchas to avoid] 40 | 41 | - doc: [Library documentation URL] 42 | section: [Specific section about common pitfalls] 43 | critical: [Key insight that prevents common errors] 44 | 45 | - docfile: [PRPs/ai_docs/file.md] 46 | why: [docs that the user has pasted in to the project] 47 | 48 | ``` 49 | 50 | ### Current Codebase tree (run `tree` in the root of the project) to get an overview of the codebase 51 | ```bash 52 | 53 | ``` 54 | 55 | ### Desired Codebase tree with files to be added and responsibility of file 56 | ```bash 57 | 58 | ``` 59 | 60 | ### Known Gotchas of our codebase & Library Quirks 61 | ```python 62 | # CRITICAL: [Library name] requires [specific setup] 63 | # Example: FastAPI requires async functions for endpoints 64 | # Example: This ORM doesn't support batch inserts over 1000 records 65 | # Example: We use pydantic v2 and 66 | ``` 67 | 68 | ## Implementation Blueprint 69 | 70 | ### Data models and structure 71 | 72 | Create the core data models, we ensure type safety and consistency. 73 | ```python 74 | Examples: 75 | - orm models 76 | - pydantic models 77 | - pydantic schemas 78 | - pydantic validators 79 | 80 | ``` 81 | 82 | ### list of tasks to be completed to fullfill the PRP in the order they should be completed 83 | 84 | ```yaml 85 | Task 1: 86 | MODIFY src/existing_module.py: 87 | - FIND pattern: "class OldImplementation" 88 | - INJECT after line containing "def __init__" 89 | - PRESERVE existing method signatures 90 | 91 | CREATE src/new_feature.py: 92 | - MIRROR pattern from: src/similar_feature.py 93 | - MODIFY class name and core logic 94 | - KEEP error handling pattern identical 95 | 96 | ...(...) 97 | 98 | Task N: 99 | ... 100 | 101 | ``` 102 | 103 | 104 | ### Per task pseudocode as needed added to each task 105 | ```python 106 | 107 | # Task 1 108 | # Pseudocode with CRITICAL details dont write entire code 109 | async def new_feature(param: str) -> Result: 110 | # PATTERN: Always validate input first (see src/validators.py) 111 | validated = validate_input(param) # raises ValidationError 112 | 113 | # GOTCHA: This library requires connection pooling 114 | async with get_connection() as conn: # see src/db/pool.py 115 | # PATTERN: Use existing retry decorator 116 | @retry(attempts=3, backoff=exponential) 117 | async def _inner(): 118 | # CRITICAL: API returns 429 if >10 req/sec 119 | await rate_limiter.acquire() 120 | return await external_api.call(validated) 121 | 122 | result = await _inner() 123 | 124 | # PATTERN: Standardized response format 125 | return format_response(result) # see src/utils/responses.py 126 | ``` 127 | 128 | ### Integration Points 129 | ```yaml 130 | DATABASE: 131 | - migration: "Add column 'feature_enabled' to users table" 132 | - index: "CREATE INDEX idx_feature_lookup ON users(feature_id)" 133 | 134 | CONFIG: 135 | - add to: config/settings.py 136 | - pattern: "FEATURE_TIMEOUT = int(os.getenv('FEATURE_TIMEOUT', '30'))" 137 | 138 | ROUTES: 139 | - add to: src/api/routes.py 140 | - pattern: "router.include_router(feature_router, prefix='/feature')" 141 | ``` 142 | 143 | ## Validation Loop 144 | 145 | ### Level 1: Syntax & Style 146 | ```bash 147 | # Run these FIRST - fix any errors before proceeding 148 | ruff check src/new_feature.py --fix # Auto-fix what's possible 149 | mypy src/new_feature.py # Type checking 150 | 151 | # Expected: No errors. If errors, READ the error and fix. 152 | ``` 153 | 154 | ### Level 2: Unit Tests each new feature/file/function use existing test patterns 155 | ```python 156 | # CREATE test_new_feature.py with these test cases: 157 | def test_happy_path(): 158 | """Basic functionality works""" 159 | result = new_feature("valid_input") 160 | assert result.status == "success" 161 | 162 | def test_validation_error(): 163 | """Invalid input raises ValidationError""" 164 | with pytest.raises(ValidationError): 165 | new_feature("") 166 | 167 | def test_external_api_timeout(): 168 | """Handles timeouts gracefully""" 169 | with mock.patch('external_api.call', side_effect=TimeoutError): 170 | result = new_feature("valid") 171 | assert result.status == "error" 172 | assert "timeout" in result.message 173 | ``` 174 | 175 | ```bash 176 | # Run and iterate until passing: 177 | uv run pytest test_new_feature.py -v 178 | # If failing: Read error, understand root cause, fix code, re-run (never mock to pass) 179 | ``` 180 | 181 | ### Level 3: Integration Test 182 | ```bash 183 | # Start the service 184 | uv run python -m src.main --dev 185 | 186 | # Test the endpoint 187 | curl -X POST http://localhost:8000/feature \ 188 | -H "Content-Type: application/json" \ 189 | -d '{"param": "test_value"}' 190 | 191 | # Expected: {"status": "success", "data": {...}} 192 | # If error: Check logs at logs/app.log for stack trace 193 | ``` 194 | 195 | ## Final validation Checklist 196 | - [ ] All tests pass: `uv run pytest tests/ -v` 197 | - [ ] No linting errors: `uv run ruff check src/` 198 | - [ ] No type errors: `uv run mypy src/` 199 | - [ ] Manual test successful: [specific curl/command] 200 | - [ ] Error cases handled gracefully 201 | - [ ] Logs are informative but not verbose 202 | - [ ] Documentation updated if needed 203 | 204 | --- 205 | 206 | ## Anti-Patterns to Avoid 207 | - ❌ Don't create new patterns when existing ones work 208 | - ❌ Don't skip validation because "it should work" 209 | - ❌ Don't ignore failing tests - fix them 210 | - ❌ Don't use sync functions in async context 211 | - ❌ Don't hardcode values that should be config 212 | - ❌ Don't catch all exceptions - be specific -------------------------------------------------------------------------------- /docs/mcp-logging-implementation-summary.md: -------------------------------------------------------------------------------- 1 | # MCP Logging Implementation Summary 2 | 3 | ## Quick Reference Guide 4 | 5 | ### Current State 6 | - **43+ console.error statements** across 5 files needing migration 7 | - **No structured logging** or client visibility 8 | - **Stderr output only** - not visible to MCP clients 9 | 10 | ### MCP SDK Logging API (v1.15.0) 11 | ```typescript 12 | // Server method available 13 | await server.sendLoggingMessage({ 14 | level: "debug" | "info" | "notice" | "warning" | "error" | "critical" | "alert" | "emergency", 15 | logger?: "file-context-server", // Optional logger name 16 | data: message | object // Any JSON serializable content 17 | }); 18 | 19 | // Client can control logging via: 20 | // logging/setLevel request with desired level 21 | ``` 22 | 23 | ### Implementation Architecture 24 | 25 | #### LoggingService Interface 26 | ```typescript 27 | class LoggingService { 28 | constructor(server: Server, config: LoggingConfig); 29 | 30 | // Core methods 31 | async debug(message: string, context?: object): Promise; 32 | async info(message: string, context?: object): Promise; 33 | async warning(message: string, context?: object): Promise; 34 | async error(message: string, error?: Error, context?: object): Promise; 35 | 36 | // Configuration 37 | setLevel(level: LoggingLevel): void; 38 | shouldLog(level: LoggingLevel): boolean; 39 | } 40 | ``` 41 | 42 | ### Migration Strategy 43 | 44 | #### Phase 1: Service Creation (Week 1) 45 | 1. Create `src/services/LoggingService.ts` 46 | 2. Implement core logging methods with level filtering 47 | 3. Add configuration support and fallback to console 48 | 49 | #### Phase 2: Server Integration (Week 2) 50 | 1. Integrate LoggingService into FileContextServer constructor 51 | 2. Handle client `logging/setLevel` requests 52 | 3. Add logging to server capabilities 53 | 54 | #### Phase 3: Replace Console Logging (Week 3-4) 55 | ```typescript 56 | // Before 57 | console.error(`Error reading file ${path}:`, error); 58 | 59 | // After 60 | await logger.error("Failed to read file", error, { 61 | filePath: path, 62 | operation: "read_context", 63 | toolName: request.params.name 64 | }); 65 | ``` 66 | 67 | ### File-by-File Migration Plan 68 | 69 | | File | Console Calls | Migration Priority | Context Needed | 70 | |------|---------------|-------------------|----------------| 71 | | `src/index.ts` | 17 | High | Tool name, file paths, request context | 72 | | `src/services/ProfileService.ts` | 13 | Medium | Profile names, config paths | 73 | | `src/services/FileWatcherService.ts` | 9 | Medium | File paths, watcher events | 74 | | `src/services/CodeAnalysisService.ts` | 2 | Low | Analysis type, file info | 75 | | `src/services/TemplateService.ts` | 2 | Low | Template names, render context | 76 | 77 | ### Key Implementation Points 78 | 79 | #### 1. Server Integration 80 | ```typescript 81 | // In FileContextServer constructor 82 | private loggingService: LoggingService; 83 | 84 | constructor(config: Partial = {}) { 85 | // ... existing setup 86 | this.loggingService = new LoggingService(this.server, { 87 | defaultLevel: 'info', 88 | enableConsoleLogging: process.env.NODE_ENV === 'development', 89 | loggerName: 'file-context-server' 90 | }); 91 | 92 | // Handle client logging requests 93 | this.server.setRequestHandler(SetLevelRequestSchema, async (request) => { 94 | this.loggingService.setLevel(request.params.level); 95 | return {}; 96 | }); 97 | } 98 | ``` 99 | 100 | #### 2. Service Injection 101 | ```typescript 102 | // Pass logging service to all services 103 | this.fileWatcherService = new FileWatcherService(this.loggingService); 104 | this.profileService = new ProfileService(process.cwd(), this.loggingService); 105 | this.templateService = new TemplateService(process.cwd(), this.loggingService); 106 | this.codeAnalysisService = new CodeAnalysisService(this.loggingService); 107 | ``` 108 | 109 | #### 3. Enhanced Error Context 110 | ```typescript 111 | // Example enhanced logging 112 | await this.loggingService.error("File access validation failed", error, { 113 | filePath: resolvedPath, 114 | requestedPath: filePath, 115 | operation: "validateAccess", 116 | timestamp: Date.now(), 117 | workingDirectory: process.cwd() 118 | }); 119 | ``` 120 | 121 | ### Configuration Options 122 | 123 | #### Environment Variables 124 | ```bash 125 | MCP_LOG_LEVEL=info # Default: info 126 | MCP_LOG_CONSOLE_FALLBACK=true # Default: false in production 127 | MCP_LOG_PERFORMANCE=true # Default: false 128 | ``` 129 | 130 | #### Runtime Configuration 131 | ```typescript 132 | interface LoggingConfig { 133 | defaultLevel: LoggingLevel; 134 | enableConsoleLogging: boolean; 135 | loggerName: string; 136 | performance: { 137 | enabled: boolean; 138 | slowOperationThreshold: number; // ms 139 | }; 140 | } 141 | ``` 142 | 143 | ### Testing Validation 144 | 145 | #### Critical Tests Needed 146 | 1. **Level Filtering**: Verify only appropriate levels are sent to client 147 | 2. **Message Format**: Ensure all logged objects are JSON serializable 148 | 3. **Performance**: Measure logging overhead (<5% impact) 149 | 4. **Error Handling**: Fallback when sendLoggingMessage fails 150 | 5. **Client Integration**: Test with actual MCP clients 151 | 152 | #### Manual Verification 153 | 1. Start server with MCP client 154 | 2. Send `logging/setLevel` request with level "debug" 155 | 3. Trigger various operations (file reads, profile changes, etc.) 156 | 4. Verify structured log messages appear in client with proper context 157 | 158 | ### Success Metrics 159 | 160 | - ✅ Zero console.error statements remain in production code 161 | - ✅ All log messages include relevant context objects 162 | - ✅ Client can control log levels and see real-time messages 163 | - ✅ Performance impact <5% for typical operations 164 | - ✅ Error messages include stack traces and operational context 165 | 166 | ### Implementation Checklist 167 | 168 | #### Core Service (Week 1) 169 | - [ ] Create LoggingService class with all methods 170 | - [ ] Implement level filtering logic 171 | - [ ] Add JSON serialization with circular reference handling 172 | - [ ] Create configuration interface and defaults 173 | - [ ] Add fallback console logging for development 174 | 175 | #### Server Integration (Week 2) 176 | - [ ] Modify FileContextServer to use LoggingService 177 | - [ ] Handle logging/setLevel requests from clients 178 | - [ ] Add logging capability to server initialization 179 | - [ ] Update service constructors to accept LoggingService 180 | 181 | #### Migration (Weeks 3-4) 182 | - [ ] Replace all 17 console.error calls in index.ts 183 | - [ ] Replace all 13 console.error calls in ProfileService 184 | - [ ] Replace all 9 console.error calls in FileWatcherService 185 | - [ ] Replace all 2 console.error calls in CodeAnalysisService 186 | - [ ] Replace all 2 console.error calls in TemplateService 187 | - [ ] Add structured context to all log messages 188 | - [ ] Remove console logging dependencies 189 | 190 | #### Testing & Documentation 191 | - [ ] Create unit tests for LoggingService 192 | - [ ] Add integration tests with mock MCP client 193 | - [ ] Update README with logging configuration options 194 | - [ ] Create example logging configurations 195 | - [ ] Performance benchmarking and optimization 196 | 197 | This implementation will provide professional, structured logging while maintaining all existing functionality and improving debuggability for MCP clients. -------------------------------------------------------------------------------- /src/services/LoggingService.ts: -------------------------------------------------------------------------------- 1 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 2 | import { LoggingLevel } from '@modelcontextprotocol/sdk/types.js'; 3 | 4 | export interface LoggingConfig { 5 | defaultLevel: LoggingLevel; 6 | enableConsoleLogging: boolean; 7 | loggerName: string; 8 | performance: { 9 | enabled: boolean; 10 | slowOperationThreshold: number; // milliseconds 11 | }; 12 | serialization: { 13 | maxDepth: number; 14 | maxLength: number; 15 | includeStackTrace: boolean; 16 | }; 17 | } 18 | 19 | export interface LogContext { 20 | [key: string]: unknown; 21 | timestamp?: number; 22 | operation?: string; 23 | filePath?: string; 24 | toolName?: string; 25 | requestId?: string; 26 | duration?: number; 27 | } 28 | 29 | export interface PerformanceTimer { 30 | start(): void; 31 | end(operation: string, context?: LogContext): Promise; 32 | } 33 | 34 | export const DEFAULT_LOGGING_CONFIG: LoggingConfig = { 35 | defaultLevel: 'info', 36 | enableConsoleLogging: false, 37 | loggerName: 'file-context-server', 38 | performance: { 39 | enabled: false, 40 | slowOperationThreshold: 1000 41 | }, 42 | serialization: { 43 | maxDepth: 5, 44 | maxLength: 2000, 45 | includeStackTrace: true 46 | } 47 | }; 48 | 49 | class PerformanceTimerImpl implements PerformanceTimer { 50 | private startTime: number = 0; 51 | 52 | constructor(private loggingService: LoggingService) {} 53 | 54 | start(): void { 55 | this.startTime = performance.now(); 56 | } 57 | 58 | async end(operation: string, context: LogContext = {}): Promise { 59 | const duration = performance.now() - this.startTime; 60 | const logContext = { 61 | ...context, 62 | operation, 63 | duration: Math.round(duration * 100) / 100 // Round to 2 decimal places 64 | }; 65 | 66 | if (duration > this.loggingService.config.performance.slowOperationThreshold) { 67 | await this.loggingService.warning(`Slow operation detected: ${operation}`, logContext); 68 | } else if (this.loggingService.config.performance.enabled) { 69 | await this.loggingService.debug(`Operation completed: ${operation}`, logContext); 70 | } 71 | } 72 | } 73 | 74 | export class LoggingService { 75 | private readonly levelPriority: Record = { 76 | debug: 0, 77 | info: 1, 78 | notice: 2, 79 | warning: 3, 80 | error: 4, 81 | critical: 5, 82 | alert: 6, 83 | emergency: 7 84 | }; 85 | 86 | public readonly config: LoggingConfig; 87 | private currentLevel: LoggingLevel; 88 | 89 | constructor(private server: Server, config: Partial = {}) { 90 | this.config = { 91 | ...DEFAULT_LOGGING_CONFIG, 92 | ...config 93 | }; 94 | this.currentLevel = this.config.defaultLevel; 95 | } 96 | 97 | // Core logging methods 98 | async debug(message: string, context?: LogContext): Promise { 99 | await this.log('debug', message, context); 100 | } 101 | 102 | async info(message: string, context?: LogContext): Promise { 103 | await this.log('info', message, context); 104 | } 105 | 106 | async notice(message: string, context?: LogContext): Promise { 107 | await this.log('notice', message, context); 108 | } 109 | 110 | async warning(message: string, context?: LogContext): Promise { 111 | await this.log('warning', message, context); 112 | } 113 | 114 | async error(message: string, error?: Error, context?: LogContext): Promise { 115 | await this.log('error', message, context, error); 116 | } 117 | 118 | async critical(message: string, error?: Error, context?: LogContext): Promise { 119 | await this.log('critical', message, context, error); 120 | } 121 | 122 | async alert(message: string, error?: Error, context?: LogContext): Promise { 123 | await this.log('alert', message, context, error); 124 | } 125 | 126 | async emergency(message: string, error?: Error, context?: LogContext): Promise { 127 | await this.log('emergency', message, context, error); 128 | } 129 | 130 | // Configuration methods 131 | setLevel(level: LoggingLevel): void { 132 | this.currentLevel = level; 133 | } 134 | 135 | getLevel(): LoggingLevel { 136 | return this.currentLevel; 137 | } 138 | 139 | shouldLog(level: LoggingLevel): boolean { 140 | return this.levelPriority[level] >= this.levelPriority[this.currentLevel]; 141 | } 142 | 143 | // Utility methods 144 | createTimer(): PerformanceTimer { 145 | return new PerformanceTimerImpl(this); 146 | } 147 | 148 | private async log(level: LoggingLevel, message: string, context?: LogContext, error?: Error): Promise { 149 | if (!this.shouldLog(level)) { 150 | return; 151 | } 152 | 153 | try { 154 | const logData = { 155 | message, 156 | timestamp: Date.now(), 157 | level, 158 | logger: this.config.loggerName, 159 | ...this.sanitizeContext(context || {}), 160 | ...(error && { error: this.formatError(error) }) 161 | }; 162 | 163 | await this.server.sendLoggingMessage({ 164 | level, 165 | logger: this.config.loggerName, 166 | data: logData 167 | }); 168 | } catch (loggingError) { 169 | // Fallback to console if MCP logging fails 170 | this.fallbackLog(level, message, { context, error, loggingError }); 171 | } 172 | } 173 | 174 | private sanitizeContext(context: LogContext): LogContext { 175 | try { 176 | // Remove circular references and limit depth 177 | const sanitized = JSON.parse(JSON.stringify(context, this.getCircularReplacer(), 2)); 178 | 179 | // Truncate large strings 180 | return this.truncateValues(sanitized) as LogContext; 181 | } catch (error) { 182 | return { 183 | contextError: 'Failed to serialize context', 184 | originalKeys: Object.keys(context) 185 | }; 186 | } 187 | } 188 | 189 | private getCircularReplacer() { 190 | const seen = new WeakSet(); 191 | return (key: string, value: unknown) => { 192 | if (typeof value === 'object' && value !== null) { 193 | if (seen.has(value)) { 194 | return '[Circular Reference]'; 195 | } 196 | seen.add(value); 197 | } 198 | return value; 199 | }; 200 | } 201 | 202 | private truncateValues(obj: unknown, maxLength = this.config.serialization.maxLength): unknown { 203 | if (typeof obj === 'string') { 204 | return obj.length > maxLength ? obj.substring(0, maxLength) + '...' : obj; 205 | } 206 | if (Array.isArray(obj)) { 207 | return obj.map(item => this.truncateValues(item, maxLength)); 208 | } 209 | if (typeof obj === 'object' && obj !== null) { 210 | const result: Record = {}; 211 | for (const [key, value] of Object.entries(obj)) { 212 | result[key] = this.truncateValues(value, maxLength); 213 | } 214 | return result; 215 | } 216 | return obj; 217 | } 218 | 219 | private formatError(error: Error): object { 220 | const errorObj = error as any; // Type assertion to access potential cause property 221 | return { 222 | name: error.name, 223 | message: error.message, 224 | stack: this.config.serialization.includeStackTrace ? error.stack : undefined, 225 | ...(errorObj.cause && { cause: this.formatError(errorObj.cause as Error) }) 226 | }; 227 | } 228 | 229 | private fallbackLog(level: LoggingLevel, message: string, data?: unknown): void { 230 | if (this.config.enableConsoleLogging) { 231 | const timestamp = new Date().toISOString(); 232 | const logLine = `[${timestamp}] ${level.toUpperCase()}: ${message}`; 233 | 234 | if (level === 'error' || level === 'critical') { 235 | console.error(logLine, data); 236 | } else { 237 | console.log(logLine, data); 238 | } 239 | } 240 | } 241 | } -------------------------------------------------------------------------------- /src/services/ProfileService.ts: -------------------------------------------------------------------------------- 1 | import { promises as fs } from 'fs'; 2 | import * as path from 'path'; 3 | import { Profile, ProfileConfig, ProfileState, ContextSpec } from '../types.js'; 4 | import { glob } from 'glob'; 5 | import { promisify } from 'util'; 6 | import { LoggingService } from './LoggingService.js'; 7 | 8 | const globAsync = promisify(glob); 9 | 10 | const DEFAULT_IGNORE_PATTERNS = [ 11 | '.git/', 12 | 'node_modules/', 13 | 'dist/', 14 | 'build/', 15 | '.env', 16 | '.env.*', 17 | '*.min.*', 18 | '*.bundle.*', 19 | ]; 20 | 21 | const INCLUDE_ALL = ['**/*']; 22 | 23 | export class ProfileService { 24 | private config: ProfileConfig; 25 | private state: ProfileState; 26 | private projectRoot: string; 27 | private activeProfile: Profile | null; 28 | private readonly configPath: string; 29 | private logger?: LoggingService; 30 | 31 | constructor(projectRoot: string, logger?: LoggingService) { 32 | this.logger = logger; 33 | this.logger?.debug('ProfileService initializing', { 34 | projectRoot, 35 | operation: 'profile_service_init' 36 | }); 37 | this.projectRoot = projectRoot; 38 | this.configPath = path.join(projectRoot, '.llm-context', 'config.toml'); 39 | this.config = this.createDefaultConfig(); 40 | this.state = { 41 | profile_name: 'code', 42 | full_files: [], 43 | outline_files: [], 44 | excluded_files: [], 45 | timestamp: Date.now() 46 | }; 47 | this.activeProfile = null; 48 | } 49 | 50 | private createDefaultConfig(): ProfileConfig { 51 | this.logger?.debug('Creating default configuration', { 52 | operation: 'create_default_config' 53 | }); 54 | const defaultProfile = this.createDefaultProfile(); 55 | return { 56 | profiles: { 57 | code: defaultProfile, 58 | 'code-prompt': { 59 | ...defaultProfile, 60 | name: 'code-prompt', 61 | prompt: 'prompt.md' 62 | } 63 | }, 64 | default_profile: 'code' 65 | }; 66 | } 67 | 68 | private createDefaultProfile(): Profile { 69 | return { 70 | name: 'code', 71 | gitignores: { 72 | full_files: DEFAULT_IGNORE_PATTERNS, 73 | outline_files: DEFAULT_IGNORE_PATTERNS 74 | }, 75 | only_includes: { 76 | full_files: INCLUDE_ALL, 77 | outline_files: INCLUDE_ALL 78 | }, 79 | settings: { 80 | no_media: true, 81 | with_user_notes: false 82 | } 83 | }; 84 | } 85 | 86 | public async initialize(): Promise { 87 | await this.logger?.info('ProfileService starting initialization', { 88 | projectRoot: this.projectRoot, 89 | configPath: this.configPath, 90 | operation: 'profile_service_init' 91 | }); 92 | await this.loadConfig(); 93 | await this.loadState(); 94 | } 95 | 96 | private async loadConfig(): Promise { 97 | const configPath = path.join(this.projectRoot, '.llm-context'); 98 | try { 99 | await fs.mkdir(configPath, { recursive: true }); 100 | await this.logger?.debug('Created config directory', { 101 | configPath, 102 | operation: 'load_config' 103 | }); 104 | 105 | // Create default config if it doesn't exist 106 | const configFile = path.join(configPath, 'config.json'); 107 | if (!await this.fileExists(configFile)) { 108 | await this.logger?.info('Creating default config file', { 109 | configFile, 110 | operation: 'load_config' 111 | }); 112 | const defaultConfig = this.createDefaultConfig(); 113 | await fs.writeFile(configFile, JSON.stringify(defaultConfig, null, 2)); 114 | this.config = defaultConfig; 115 | } else { 116 | await this.logger?.debug('Loading existing config file', { 117 | configFile, 118 | operation: 'load_config' 119 | }); 120 | const content = await fs.readFile(configFile, 'utf8'); 121 | this.config = JSON.parse(content); 122 | } 123 | 124 | // Log available profiles 125 | await this.logger?.info('Configuration loaded successfully', { 126 | availableProfiles: Object.keys(this.config.profiles), 127 | currentProfile: this.state.profile_name, 128 | operation: 'load_config' 129 | }); 130 | } catch (error) { 131 | await this.logger?.error('Failed to initialize configuration', error as Error, { 132 | projectRoot: this.projectRoot, 133 | configPath, 134 | operation: 'load_config' 135 | }); 136 | throw error; 137 | } 138 | } 139 | 140 | private async loadState(): Promise { 141 | const statePath = path.join(this.projectRoot, '.llm-context', 'state.json'); 142 | if (!await this.fileExists(statePath)) { 143 | await this.logger?.info('Creating default state file', { 144 | statePath, 145 | operation: 'load_state' 146 | }); 147 | await fs.writeFile(statePath, JSON.stringify(this.state, null, 2)); 148 | } else { 149 | await this.logger?.debug('Loading existing state file', { 150 | statePath, 151 | operation: 'load_state' 152 | }); 153 | const content = await fs.readFile(statePath, 'utf8'); 154 | this.state = JSON.parse(content); 155 | } 156 | } 157 | 158 | private async fileExists(filePath: string): Promise { 159 | try { 160 | await fs.access(filePath); 161 | return true; 162 | } catch { 163 | return false; 164 | } 165 | } 166 | 167 | public async setProfile(profileName: string): Promise { 168 | await this.logger?.info('Attempting to set profile', { 169 | profileName, 170 | availableProfiles: Object.keys(this.config.profiles), 171 | operation: 'set_profile' 172 | }); 173 | 174 | if (!this.config.profiles[profileName]) { 175 | throw new Error(`Profile '${profileName}' does not exist. Available profiles: ${Object.keys(this.config.profiles).join(', ')}`); 176 | } 177 | 178 | this.state = { 179 | ...this.state, 180 | profile_name: profileName, 181 | timestamp: Date.now() 182 | }; 183 | 184 | await this.saveState(); 185 | await this.logger?.info('Successfully set profile', { 186 | profileName, 187 | operation: 'set_profile' 188 | }); 189 | } 190 | 191 | public getContextSpec(): ContextSpec { 192 | const profile = this.resolveProfile(this.state.profile_name); 193 | return { 194 | profile, 195 | state: this.state 196 | }; 197 | } 198 | 199 | private resolveProfile(profileName: string): Profile { 200 | const profile = this.config.profiles[profileName]; 201 | if (!profile) { 202 | this.logger?.warning('Profile not found, using default', { 203 | requestedProfile: profileName, 204 | defaultProfile: this.config.default_profile, 205 | operation: 'resolve_profile' 206 | }); 207 | return this.config.profiles[this.config.default_profile]; 208 | } 209 | return profile; 210 | } 211 | 212 | private async saveState(): Promise { 213 | const statePath = path.join(this.projectRoot, '.llm-context', 'state.json'); 214 | await fs.writeFile(statePath, JSON.stringify(this.state, null, 2)); 215 | await this.logger?.debug('State saved successfully', { 216 | statePath, 217 | state: this.state, 218 | operation: 'save_state' 219 | }); 220 | } 221 | 222 | public async updateFileSelection(fullFiles: string[], outlineFiles: string[]): Promise { 223 | this.state = { 224 | ...this.state, 225 | full_files: fullFiles, 226 | outline_files: outlineFiles, 227 | timestamp: Date.now() 228 | }; 229 | 230 | await this.saveState(); 231 | } 232 | 233 | public getProfile(): Profile { 234 | return this.resolveProfile(this.state.profile_name); 235 | } 236 | 237 | public getState(): ProfileState { 238 | return this.state; 239 | } 240 | 241 | public async getActiveProfile(): Promise<{ profile: Profile }> { 242 | if (!this.activeProfile) { 243 | throw new Error('No active profile'); 244 | } 245 | return { profile: this.activeProfile }; 246 | } 247 | 248 | public async selectFiles(): Promise { 249 | if (!this.activeProfile) { 250 | throw new Error('No active profile'); 251 | } 252 | 253 | const fullFiles = await this.getFilteredFiles( 254 | this.activeProfile.gitignores.full_files, 255 | this.activeProfile.only_includes.full_files 256 | ); 257 | 258 | const outlineFiles = await this.getFilteredFiles( 259 | this.activeProfile.gitignores.outline_files, 260 | this.activeProfile.only_includes.outline_files 261 | ); 262 | 263 | this.state = { 264 | ...this.state, 265 | full_files: fullFiles, 266 | outline_files: outlineFiles, 267 | timestamp: Date.now() 268 | }; 269 | 270 | await this.saveState(); 271 | } 272 | 273 | private async getFilteredFiles(ignorePatterns: string[], includePatterns: string[]): Promise { 274 | const allFiles: string[] = []; 275 | for (const pattern of includePatterns) { 276 | const files = await globAsync(pattern, { 277 | ignore: ignorePatterns, 278 | nodir: true, 279 | dot: true 280 | }) as string[]; 281 | allFiles.push(...files); 282 | } 283 | return [...new Set(allFiles)]; 284 | } 285 | } -------------------------------------------------------------------------------- /docs/technical-design.md: -------------------------------------------------------------------------------- 1 | # Technical Design: MCP File Context Server Initial Read Optimization 2 | 3 | ## 1. System Requirements 4 | 5 | ### 1.1 Hardware Requirements 6 | - CPU: Multi-core processor (recommended minimum 4 cores) 7 | - Memory: Minimum 8GB RAM, recommended 16GB for large codebases 8 | - Storage: SSD recommended for optimal I/O performance 9 | 10 | ### 1.2 Software Requirements 11 | - Node.js 18.0 or higher 12 | - TypeScript 4.5 or higher 13 | - Operating System: Cross-platform (Windows, Linux, macOS) 14 | 15 | ### 1.3 Dependencies 16 | ```typescript 17 | interface DependencyRequirements { 18 | required: { 19 | 'lru-cache': '^7.0.0', 20 | 'chokidar': '^3.5.0', 21 | '@modelcontextprotocol/sdk': '^1.0.0', 22 | 'mime-types': '^2.1.0' 23 | }, 24 | optional: { 25 | 'worker-threads': '^1.0.0', // For parallel processing 26 | 'node-worker-threads-pool': '^1.5.0' // For worker pool management 27 | } 28 | } 29 | ``` 30 | 31 | ## 2. Architecture Components 32 | 33 | ### 2.1 Memory Management System 34 | ```typescript 35 | interface MemoryConfig { 36 | maxCacheSize: number; // Maximum memory for cache in MB 37 | workerMemoryLimit: number; // Memory limit per worker in MB 38 | gcThreshold: number; // GC trigger threshold (0.8 = 80%) 39 | emergencyFreeThreshold: number; // Emergency memory release threshold 40 | } 41 | 42 | class MemoryManager { 43 | private memoryUsage: number; 44 | private totalAllocated: number; 45 | 46 | constructor(private config: MemoryConfig) { 47 | this.setupMemoryMonitoring(); 48 | } 49 | 50 | private setupMemoryMonitoring() { 51 | if (globalThis.gc) { 52 | // Register memory pressure handlers 53 | this.setupMemoryPressureHandlers(); 54 | } 55 | } 56 | 57 | private shouldTriggerGC(): boolean { 58 | const usage = process.memoryUsage(); 59 | return (usage.heapUsed / usage.heapTotal) > this.config.gcThreshold; 60 | } 61 | } 62 | ``` 63 | 64 | ### 2.2 Worker Pool Management 65 | ```typescript 66 | interface WorkerPoolConfig { 67 | minWorkers: number; 68 | maxWorkers: number; 69 | idleTimeout: number; // ms before releasing idle worker 70 | taskTimeout: number; // ms before task timeout 71 | } 72 | 73 | class WorkerPoolManager { 74 | private workerPool: Map; 75 | private taskQueue: Queue; 76 | private activeWorkers: number; 77 | 78 | constructor(private config: WorkerPoolConfig) { 79 | this.initializeWorkerPool(); 80 | } 81 | 82 | private async initializeWorkerPool() { 83 | const initialWorkers = Math.min( 84 | this.config.minWorkers, 85 | os.cpus().length 86 | ); 87 | 88 | for (let i = 0; i < initialWorkers; i++) { 89 | await this.addWorker(); 90 | } 91 | } 92 | } 93 | ``` 94 | 95 | ### 2.3 Enhanced Cache Service Configuration 96 | ```typescript 97 | interface CacheConfig { 98 | maxSize: number; 99 | parallelProcessing: { 100 | enabled: boolean; 101 | maxWorkers?: number; 102 | chunkSize?: number; 103 | }; 104 | preloadStrategy: { 105 | enabled: boolean; 106 | maxPreloadItems: number; 107 | preloadDepth: number; 108 | }; 109 | progressiveLoading: { 110 | enabled: boolean; 111 | priorityLevels: number; 112 | }; 113 | memoryManagement: { 114 | maxMemoryPercent: number; 115 | gcThreshold: number; 116 | emergencyThreshold: number; 117 | }; 118 | storage: { 119 | persistToDisk: boolean; 120 | compressionLevel?: number; 121 | storageLocation?: string; 122 | }; 123 | } 124 | ``` 125 | 126 | ## 3. Core Components 127 | 128 | ### 3.1 Parallel File Reader Service 129 | ```typescript 130 | class ParallelFileReader { 131 | private workerPool: WorkerPoolManager; 132 | private memoryManager: MemoryManager; 133 | 134 | constructor(config: FileReaderConfig) { 135 | this.workerPool = new WorkerPoolManager({ 136 | minWorkers: 2, 137 | maxWorkers: os.cpus().length, 138 | idleTimeout: 60000, 139 | taskTimeout: 30000 140 | }); 141 | 142 | this.memoryManager = new MemoryManager({ 143 | maxCacheSize: config.maxCacheSize, 144 | workerMemoryLimit: config.workerMemoryLimit, 145 | gcThreshold: 0.8, 146 | emergencyFreeThreshold: 0.95 147 | }); 148 | } 149 | 150 | async readFileChunked(filepath: string, options: ReadOptions): Promise { 151 | const stats = await fs.stat(filepath); 152 | const chunks: Buffer[] = []; 153 | const chunkSize = this.calculateOptimalChunkSize(stats.size); 154 | 155 | // Distribute chunks to worker pool 156 | const chunkTasks = this.createChunkTasks(filepath, stats.size, chunkSize); 157 | const results = await this.workerPool.executeBatch(chunkTasks); 158 | 159 | return this.assembleResults(results, stats); 160 | } 161 | } 162 | ``` 163 | 164 | ### 3.2 Messaging Architecture 165 | ```typescript 166 | interface Message { 167 | id: string; 168 | type: MessageType; 169 | payload: any; 170 | metadata: { 171 | timestamp: number; 172 | priority: number; 173 | timeout?: number; 174 | }; 175 | } 176 | 177 | class MessageBroker { 178 | private subscriptions: Map>; 179 | private priorityQueue: PriorityQueue; 180 | 181 | constructor() { 182 | this.subscriptions = new Map(); 183 | this.priorityQueue = new PriorityQueue(); 184 | } 185 | 186 | async publish(message: Message): Promise { 187 | const handlers = this.subscriptions.get(message.type); 188 | if (handlers) { 189 | await Promise.all( 190 | Array.from(handlers).map(handler => 191 | handler(message.payload) 192 | ) 193 | ); 194 | } 195 | } 196 | } 197 | ``` 198 | 199 | ### 3.3 File Analysis Pipeline 200 | ```typescript 201 | interface AnalysisPipeline { 202 | stages: Array<{ 203 | name: string; 204 | processor: (chunk: Buffer) => Promise; 205 | priority: number; 206 | }>; 207 | } 208 | 209 | class FileAnalyzer { 210 | private pipeline: AnalysisPipeline; 211 | private messageBroker: MessageBroker; 212 | 213 | constructor() { 214 | this.pipeline = { 215 | stages: [ 216 | { 217 | name: 'header', 218 | processor: this.analyzeFileHeader, 219 | priority: 1 220 | }, 221 | { 222 | name: 'content', 223 | processor: this.analyzeContent, 224 | priority: 2 225 | }, 226 | { 227 | name: 'metadata', 228 | processor: this.extractMetadata, 229 | priority: 3 230 | } 231 | ] 232 | }; 233 | } 234 | } 235 | ``` 236 | 237 | ## 4. Performance Optimization Strategies 238 | 239 | ### 4.1 Memory Management Strategies 240 | ```typescript 241 | class OptimizedMemoryStrategy { 242 | private readonly BUFFER_POOL_SIZE = 8192; 243 | private bufferPool: Buffer[]; 244 | 245 | constructor() { 246 | this.bufferPool = Array(10).fill(null) 247 | .map(() => Buffer.allocUnsafe(this.BUFFER_POOL_SIZE)); 248 | } 249 | 250 | private acquireBuffer(): Buffer { 251 | return this.bufferPool.pop() || 252 | Buffer.allocUnsafe(this.BUFFER_POOL_SIZE); 253 | } 254 | 255 | private releaseBuffer(buffer: Buffer) { 256 | if (this.bufferPool.length < 10) { 257 | this.bufferPool.push(buffer); 258 | } 259 | } 260 | } 261 | ``` 262 | 263 | ### 4.2 Worker Thread Management 264 | ```typescript 265 | class WorkerThreadManager { 266 | private workers: Worker[]; 267 | private taskQueue: PriorityQueue; 268 | private activeWorkers: Set; 269 | 270 | constructor(private config: WorkerConfig) { 271 | this.workers = []; 272 | this.taskQueue = new PriorityQueue(); 273 | this.activeWorkers = new Set(); 274 | } 275 | 276 | async executeTask(task: Task): Promise { 277 | const worker = await this.getAvailableWorker(); 278 | this.activeWorkers.add(worker); 279 | 280 | try { 281 | return await this.runTaskInWorker(worker, task); 282 | } finally { 283 | this.activeWorkers.delete(worker); 284 | this.releaseWorker(worker); 285 | } 286 | } 287 | } 288 | ``` 289 | 290 | ### 4.3 I/O Optimization 291 | ```typescript 292 | class IOOptimizer { 293 | private readonly PAGE_SIZE = 4096; 294 | private readonly READ_AHEAD = 4; 295 | 296 | constructor(private config: IOConfig) { 297 | this.initializeIOBuffers(); 298 | } 299 | 300 | private async readWithReadAhead( 301 | fd: number, 302 | position: number, 303 | size: number 304 | ): Promise { 305 | // Implement read-ahead buffering 306 | const readAheadSize = this.PAGE_SIZE * this.READ_AHEAD; 307 | const buffer = Buffer.allocUnsafe(readAheadSize); 308 | 309 | await fs.read(fd, buffer, 0, readAheadSize, position); 310 | return buffer.slice(0, size); 311 | } 312 | } 313 | ``` 314 | 315 | ## 5. Integration Points 316 | 317 | ### 5.1 Cache Service Integration 318 | ```typescript 319 | class EnhancedCacheService extends CacheService { 320 | private fileReader: ParallelFileReader; 321 | private memoryManager: MemoryManager; 322 | private messageBroker: MessageBroker; 323 | 324 | constructor(config: CacheServiceConfig) { 325 | super(config); 326 | this.initializeServices(config); 327 | } 328 | 329 | private async initializeServices(config: CacheServiceConfig) { 330 | this.fileReader = new ParallelFileReader(config); 331 | this.memoryManager = new MemoryManager(config.memoryManagement); 332 | this.messageBroker = new MessageBroker(); 333 | 334 | await this.setupMessageHandlers(); 335 | } 336 | } 337 | ``` 338 | 339 | ### 5.2 Event System Integration 340 | ```typescript 341 | interface EventConfig { 342 | maxListeners: number; 343 | errorThreshold: number; 344 | debugMode: boolean; 345 | } 346 | 347 | class EventSystem { 348 | private eventEmitter: EventEmitter; 349 | private errorCount: Map; 350 | 351 | constructor(private config: EventConfig) { 352 | this.eventEmitter = new EventEmitter(); 353 | this.errorCount = new Map(); 354 | 355 | this.setupErrorHandling(); 356 | } 357 | 358 | private setupErrorHandling() { 359 | this.eventEmitter.on('error', (error: Error) => { 360 | this.handleError(error); 361 | }); 362 | } 363 | } 364 | ``` 365 | 366 | ## 6. Configuration Examples 367 | 368 | ### 6.1 Development Configuration 369 | ```typescript 370 | const devConfig: CacheServiceConfig = { 371 | maxSize: 1000, 372 | parallelProcessing: { 373 | enabled: true, 374 | maxWorkers: 4, 375 | chunkSize: 1024 * 1024 376 | }, 377 | memoryManagement: { 378 | maxMemoryPercent: 70, 379 | gcThreshold: 0.8, 380 | emergencyThreshold: 0.95 381 | }, 382 | storage: { 383 | persistToDisk: true, 384 | compressionLevel: 1, 385 | storageLocation: './cache' 386 | } 387 | }; 388 | ``` 389 | 390 | ### 6.2 Production Configuration 391 | ```typescript 392 | const prodConfig: CacheServiceConfig = { 393 | maxSize: 5000, 394 | parallelProcessing: { 395 | enabled: true, 396 | maxWorkers: 8, 397 | chunkSize: 2 * 1024 * 1024 398 | }, 399 | memoryManagement: { 400 | maxMemoryPercent: 85, 401 | gcThreshold: 0.75, 402 | emergencyThreshold: 0.9 403 | }, 404 | storage: { 405 | persistToDisk: true, 406 | compressionLevel: 4, 407 | storageLocation: '/var/cache/mcp' 408 | } 409 | }; 410 | ``` 411 | 412 | ### 6.3 Containerized Configuration 413 | ```typescript 414 | const containerConfig: CacheServiceConfig = { 415 | maxSize: 2000, 416 | parallelProcessing: { 417 | enabled: true, 418 | maxWorkers: 2, 419 | chunkSize: 512 * 1024 420 | }, 421 | memoryManagement: { 422 | maxMemoryPercent: 60, 423 | gcThreshold: 0.7, 424 | emergencyThreshold: 0.85 425 | }, 426 | storage: { 427 | persistToDisk: false 428 | } 429 | }; 430 | ``` -------------------------------------------------------------------------------- /docs/mcp-logging-implementation-brief.md: -------------------------------------------------------------------------------- 1 | # MCP Logging Implementation Project Brief 2 | 3 | ## Executive Summary 4 | 5 | This document outlines the implementation plan for integrating the Model Context Protocol (MCP) SDK's logging API into the file-context-server. The goal is to replace console-based logging with structured MCP logging that provides better visibility, control, and debugging capabilities for clients. 6 | 7 | ## Current State Analysis 8 | 9 | ### Existing Logging Patterns 10 | The codebase currently uses `console.error()` extensively across multiple files: 11 | 12 | - **src/index.ts**: 17 console.error instances 13 | - **src/services/FileWatcherService.ts**: 9 console.error instances 14 | - **src/services/ProfileService.ts**: 13 console.error instances 15 | - **src/services/CodeAnalysisService.ts**: 2 console.error instances 16 | - **src/services/TemplateService.ts**: 2 console.error instances 17 | 18 | **Total**: 43+ logging statements that output to stderr 19 | 20 | ### Current Logging Categories 21 | 1. **Server Lifecycle**: Service initialization, startup, shutdown 22 | 2. **File Operations**: File access, validation, reading errors 23 | 3. **File Watching**: File system events, watcher lifecycle 24 | 4. **Profile Management**: Profile loading, switching, validation 25 | 5. **Cache Operations**: Cache hits, misses, invalidation 26 | 6. **Error Handling**: Tool execution errors, validation failures 27 | 28 | ### Limitations of Current Approach 29 | - No structured logging format 30 | - No log level control 31 | - Stderr output not visible to MCP clients 32 | - No centralized logging configuration 33 | - No log filtering or categorization 34 | - Limited debugging information for clients 35 | 36 | ## MCP Logging API Capabilities 37 | 38 | ### Core Components (@modelcontextprotocol/sdk@1.15.0) 39 | 40 | #### 1. Logging Levels 41 | ```typescript 42 | type LoggingLevel = "debug" | "info" | "notice" | "warning" | 43 | "error" | "critical" | "alert" | "emergency"; 44 | ``` 45 | 46 | #### 2. Server Method 47 | ```typescript 48 | async sendLoggingMessage(params: { 49 | level: LoggingLevel; 50 | logger?: string; // Optional logger name 51 | data: unknown; // Any JSON serializable content 52 | }): Promise 53 | ``` 54 | 55 | #### 3. Client Control 56 | - Clients can request specific log levels via `logging/setLevel` requests 57 | - Server receives logging level preferences and filters accordingly 58 | - Real-time log streaming to clients 59 | 60 | #### 4. Message Structure 61 | ```typescript 62 | { 63 | method: "notifications/message", 64 | params: { 65 | level: LoggingLevel, 66 | logger?: string, 67 | data: any, // Message, object, or structured data 68 | _meta?: object // Optional metadata 69 | } 70 | } 71 | ``` 72 | 73 | ## Implementation Strategy 74 | 75 | ### Phase 1: Logging Service Architecture 76 | 77 | #### 1.1 Create LoggingService 78 | ```typescript 79 | interface LoggingServiceConfig { 80 | defaultLevel: LoggingLevel; 81 | enableConsoleLogging: boolean; // Fallback for development 82 | loggerName: string; 83 | bufferSize?: number; // For batching 84 | enableTimestamps: boolean; 85 | } 86 | 87 | class LoggingService { 88 | private server: Server; 89 | private config: LoggingServiceConfig; 90 | private currentLevel: LoggingLevel; 91 | private loggerName: string; 92 | 93 | async log(level: LoggingLevel, message: string, context?: object): Promise; 94 | async debug(message: string, context?: object): Promise; 95 | async info(message: string, context?: object): Promise; 96 | async warning(message: string, context?: object): Promise; 97 | async error(message: string, error?: Error, context?: object): Promise; 98 | 99 | setLevel(level: LoggingLevel): void; 100 | shouldLog(level: LoggingLevel): boolean; 101 | } 102 | ``` 103 | 104 | #### 1.2 Integration Points 105 | - **FileContextServer**: Main server class integration 106 | - **Services**: Inject logging service into all service classes 107 | - **Error Handling**: Structured error logging with stack traces 108 | - **Performance Monitoring**: Log timing and cache statistics 109 | 110 | ### Phase 2: Migration Strategy 111 | 112 | #### 2.1 Gradual Replacement 113 | 1. Create LoggingService alongside existing console logging 114 | 2. Replace console.error with structured logging methods 115 | 3. Add contextual information to log messages 116 | 4. Remove console logging after validation 117 | 118 | #### 2.2 Log Level Mapping 119 | ```typescript 120 | // Current -> MCP Logging Level 121 | console.error("Error: ...") -> logger.error(message, error) 122 | console.error("Debug: ...") -> logger.debug(message) 123 | console.error("Info: ...") -> logger.info(message) 124 | console.error("Warning: ...") -> logger.warning(message) 125 | ``` 126 | 127 | #### 2.3 Enhanced Context 128 | ```typescript 129 | // Before 130 | console.error(`Error reading file ${path}:`, error); 131 | 132 | // After 133 | await logger.error("Failed to read file", error, { 134 | filePath: path, 135 | operation: "read_context", 136 | fileSize: stats?.size, 137 | encoding: "utf8" 138 | }); 139 | ``` 140 | 141 | ### Phase 3: Advanced Features 142 | 143 | #### 3.1 Structured Logging Categories 144 | ```typescript 145 | enum LogCategory { 146 | SERVER_LIFECYCLE = "server", 147 | FILE_OPERATIONS = "files", 148 | CACHE_OPERATIONS = "cache", 149 | SECURITY = "security", 150 | PERFORMANCE = "performance", 151 | USER_ACTIONS = "user" 152 | } 153 | ``` 154 | 155 | #### 3.2 Performance Logging 156 | ```typescript 157 | interface PerformanceContext { 158 | operation: string; 159 | duration: number; 160 | cacheHit?: boolean; 161 | fileCount?: number; 162 | bytesProcessed?: number; 163 | } 164 | 165 | await logger.info("Operation completed", { 166 | category: LogCategory.PERFORMANCE, 167 | ...performanceContext 168 | }); 169 | ``` 170 | 171 | #### 3.3 Request Correlation 172 | ```typescript 173 | interface RequestContext { 174 | requestId: string; 175 | toolName: string; 176 | userId?: string; 177 | startTime: number; 178 | } 179 | ``` 180 | 181 | ## Detailed Implementation Plan 182 | 183 | ### Step 1: LoggingService Foundation 184 | 1. Create `src/services/LoggingService.ts` 185 | 2. Define interfaces and configuration types 186 | 3. Implement core logging methods 187 | 4. Add level filtering logic 188 | 5. Create factory function for service instances 189 | 190 | ### Step 2: Server Integration 191 | 1. Modify `FileContextServer` constructor to accept LoggingService 192 | 2. Handle `logging/setLevel` requests from clients 193 | 3. Initialize default logging configuration 194 | 4. Add logging service to server capabilities 195 | 196 | ### Step 3: Service Migration 197 | 1. **FileWatcherService**: Replace 9 console.error calls 198 | 2. **ProfileService**: Replace 13 console.error calls 199 | 3. **CodeAnalysisService**: Replace 2 console.error calls 200 | 4. **TemplateService**: Replace 2 console.error calls 201 | 202 | ### Step 4: Main Server Migration 203 | 1. Replace 17 console.error calls in index.ts 204 | 2. Add structured context to file operations 205 | 3. Enhance error reporting with stack traces 206 | 4. Add performance logging for tool operations 207 | 208 | ### Step 5: Configuration & Environment 209 | 1. Add logging configuration to environment variables 210 | 2. Create development vs production logging profiles 211 | 3. Add configuration for log levels per category 212 | 4. Support for client-requested logging levels 213 | 214 | ## Configuration Design 215 | 216 | ### Environment Variables 217 | ```bash 218 | MCP_LOG_LEVEL=info # Default log level 219 | MCP_LOG_CONSOLE_FALLBACK=true # Enable console logging 220 | MCP_LOG_BUFFER_SIZE=100 # Message buffering 221 | MCP_LOG_PERFORMANCE_ENABLED=true # Performance logging 222 | MCP_LOG_CATEGORIES=server,files,cache # Enabled categories 223 | ``` 224 | 225 | ### Configuration File 226 | ```typescript 227 | interface LoggingConfig { 228 | defaultLevel: LoggingLevel; 229 | enableConsoleLogging: boolean; 230 | categories: { 231 | [category: string]: { 232 | enabled: boolean; 233 | level: LoggingLevel; 234 | }; 235 | }; 236 | performance: { 237 | enabled: boolean; 238 | thresholds: { 239 | slow_operation_ms: number; 240 | large_file_bytes: number; 241 | }; 242 | }; 243 | formatting: { 244 | includeTimestamp: boolean; 245 | includeStackTrace: boolean; 246 | maxMessageLength: number; 247 | }; 248 | } 249 | ``` 250 | 251 | ## Testing Strategy 252 | 253 | ### Unit Tests 254 | 1. LoggingService level filtering 255 | 2. Message formatting and serialization 256 | 3. Configuration loading and validation 257 | 4. Error handling and fallback behavior 258 | 259 | ### Integration Tests 260 | 1. Client logging level requests 261 | 2. Service integration with structured logging 262 | 3. Performance logging accuracy 263 | 4. Message delivery to clients 264 | 265 | ### Manual Testing 266 | 1. Client log visibility in MCP applications 267 | 2. Log level filtering behavior 268 | 3. Performance impact measurement 269 | 4. Error reproduction with enhanced context 270 | 271 | ## Performance Considerations 272 | 273 | ### Logging Overhead 274 | - **Asynchronous logging**: Non-blocking message sending 275 | - **Level filtering**: Early return for disabled levels 276 | - **Message buffering**: Batch small messages 277 | - **Lazy evaluation**: Context objects computed only when needed 278 | 279 | ### Memory Management 280 | - **Buffer limits**: Prevent memory leaks from log buffering 281 | - **Message size limits**: Truncate large objects 282 | - **Circular reference detection**: Safe serialization 283 | 284 | ### Network Efficiency 285 | - **Compression**: Consider message compression for large logs 286 | - **Batching**: Group related log messages 287 | - **Throttling**: Rate limiting for high-frequency events 288 | 289 | ## Migration Timeline 290 | 291 | ### Week 1: Foundation 292 | - [ ] Create LoggingService implementation 293 | - [ ] Add configuration interfaces 294 | - [ ] Create unit tests for core functionality 295 | 296 | ### Week 2: Server Integration 297 | - [ ] Integrate LoggingService into FileContextServer 298 | - [ ] Handle client logging/setLevel requests 299 | - [ ] Add logging capability advertisement 300 | 301 | ### Week 3: Service Migration 302 | - [ ] Migrate FileWatcherService logging 303 | - [ ] Migrate ProfileService logging 304 | - [ ] Migrate CodeAnalysisService logging 305 | - [ ] Migrate TemplateService logging 306 | 307 | ### Week 4: Main Server & Polish 308 | - [ ] Migrate main server logging 309 | - [ ] Add structured context to all log messages 310 | - [ ] Performance logging implementation 311 | - [ ] Documentation and examples 312 | 313 | ## Risk Assessment 314 | 315 | ### High Risk 316 | - **Breaking Changes**: Ensure logging doesn't affect core functionality 317 | - **Performance Impact**: Monitor CPU and memory usage 318 | - **Client Compatibility**: Verify logging works across MCP clients 319 | 320 | ### Medium Risk 321 | - **Message Format**: Ensure JSON serialization handles all data types 322 | - **Error Handling**: Fallback when logging service fails 323 | - **Configuration**: Validate all configuration options 324 | 325 | ### Low Risk 326 | - **Log Level Changes**: Runtime log level adjustment 327 | - **Message Content**: Formatting and truncation logic 328 | 329 | ## Success Criteria 330 | 331 | ### Functional Requirements 332 | 1. ✅ All console.error calls replaced with structured logging 333 | 2. ✅ Client-controlled log level filtering working 334 | 3. ✅ Structured context included in all log messages 335 | 4. ✅ Performance logging for operations >100ms 336 | 5. ✅ Error logs include stack traces and context 337 | 338 | ### Non-Functional Requirements 339 | 1. ✅ <5% performance overhead from logging 340 | 2. ✅ Memory usage remains stable under load 341 | 3. ✅ Log messages appear in real-time in clients 342 | 4. ✅ Configuration supports all deployment environments 343 | 5. ✅ 100% backwards compatibility maintained 344 | 345 | ## Future Enhancements 346 | 347 | ### Advanced Features 348 | - **Log Analytics**: Structured data for log analysis 349 | - **Metrics Collection**: Operational metrics via logging 350 | - **Distributed Tracing**: Request correlation across services 351 | - **Log Rotation**: File-based logging for debugging 352 | 353 | ### Monitoring Integration 354 | - **Health Checks**: Server health via log patterns 355 | - **Alerting**: Critical error detection and notification 356 | - **Dashboard Integration**: Log data visualization 357 | 358 | ## Conclusion 359 | 360 | This implementation will significantly improve the debugging and monitoring capabilities of the file-context-server while maintaining all existing functionality. The structured approach ensures minimal risk while providing maximum benefit to both developers and end users. 361 | 362 | The phased implementation allows for incremental testing and validation, ensuring the logging system is robust and performant before full deployment. -------------------------------------------------------------------------------- /docs/logging-service-technical-spec.md: -------------------------------------------------------------------------------- 1 | # LoggingService Technical Specification 2 | 3 | ## Interface Definition 4 | 5 | ### Core Types 6 | ```typescript 7 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 8 | import { LoggingLevel } from '@modelcontextprotocol/sdk/types.js'; 9 | 10 | export interface LoggingConfig { 11 | defaultLevel: LoggingLevel; 12 | enableConsoleLogging: boolean; 13 | loggerName: string; 14 | performance: { 15 | enabled: boolean; 16 | slowOperationThreshold: number; // milliseconds 17 | }; 18 | serialization: { 19 | maxDepth: number; 20 | maxLength: number; 21 | includeStackTrace: boolean; 22 | }; 23 | } 24 | 25 | export interface LogContext { 26 | [key: string]: unknown; 27 | timestamp?: number; 28 | operation?: string; 29 | filePath?: string; 30 | toolName?: string; 31 | requestId?: string; 32 | duration?: number; 33 | } 34 | 35 | export interface PerformanceTimer { 36 | start(): void; 37 | end(operation: string, context?: LogContext): Promise; 38 | } 39 | ``` 40 | 41 | ### LoggingService Class 42 | ```typescript 43 | export class LoggingService { 44 | private server: Server; 45 | private config: LoggingConfig; 46 | private currentLevel: LoggingLevel; 47 | private readonly levelPriority: Record; 48 | 49 | constructor(server: Server, config: Partial = {}); 50 | 51 | // Core logging methods 52 | async debug(message: string, context?: LogContext): Promise; 53 | async info(message: string, context?: LogContext): Promise; 54 | async notice(message: string, context?: LogContext): Promise; 55 | async warning(message: string, context?: LogContext): Promise; 56 | async error(message: string, error?: Error, context?: LogContext): Promise; 57 | async critical(message: string, error?: Error, context?: LogContext): Promise; 58 | 59 | // Configuration methods 60 | setLevel(level: LoggingLevel): void; 61 | getLevel(): LoggingLevel; 62 | shouldLog(level: LoggingLevel): boolean; 63 | 64 | // Utility methods 65 | createTimer(): PerformanceTimer; 66 | private sanitizeContext(context: LogContext): LogContext; 67 | private formatError(error: Error): object; 68 | private fallbackLog(level: LoggingLevel, message: string, data?: unknown): void; 69 | } 70 | ``` 71 | 72 | ## Implementation Details 73 | 74 | ### Level Priority System 75 | ```typescript 76 | private readonly levelPriority: Record = { 77 | debug: 0, 78 | info: 1, 79 | notice: 2, 80 | warning: 3, 81 | error: 4, 82 | critical: 5, 83 | alert: 6, 84 | emergency: 7 85 | }; 86 | ``` 87 | 88 | ### Core Logging Implementation 89 | ```typescript 90 | private async log(level: LoggingLevel, message: string, context?: LogContext, error?: Error): Promise { 91 | if (!this.shouldLog(level)) { 92 | return; 93 | } 94 | 95 | try { 96 | const logData = { 97 | message, 98 | timestamp: Date.now(), 99 | level, 100 | logger: this.config.loggerName, 101 | ...this.sanitizeContext(context || {}), 102 | ...(error && { error: this.formatError(error) }) 103 | }; 104 | 105 | await this.server.sendLoggingMessage({ 106 | level, 107 | logger: this.config.loggerName, 108 | data: logData 109 | }); 110 | } catch (loggingError) { 111 | // Fallback to console if MCP logging fails 112 | this.fallbackLog(level, message, { context, error, loggingError }); 113 | } 114 | } 115 | ``` 116 | 117 | ### Performance Timer Implementation 118 | ```typescript 119 | class PerformanceTimerImpl implements PerformanceTimer { 120 | private startTime: number; 121 | 122 | constructor(private loggingService: LoggingService) {} 123 | 124 | start(): void { 125 | this.startTime = performance.now(); 126 | } 127 | 128 | async end(operation: string, context: LogContext = {}): Promise { 129 | const duration = performance.now() - this.startTime; 130 | const logContext = { 131 | ...context, 132 | operation, 133 | duration: Math.round(duration * 100) / 100 // Round to 2 decimal places 134 | }; 135 | 136 | if (duration > this.loggingService.config.performance.slowOperationThreshold) { 137 | await this.loggingService.warning(`Slow operation detected: ${operation}`, logContext); 138 | } else if (this.loggingService.config.performance.enabled) { 139 | await this.loggingService.debug(`Operation completed: ${operation}`, logContext); 140 | } 141 | } 142 | } 143 | ``` 144 | 145 | ### Context Sanitization 146 | ```typescript 147 | private sanitizeContext(context: LogContext): LogContext { 148 | try { 149 | // Remove circular references and limit depth 150 | const sanitized = JSON.parse(JSON.stringify(context, this.getCircularReplacer(), 2)); 151 | 152 | // Truncate large strings 153 | return this.truncateValues(sanitized); 154 | } catch (error) { 155 | return { 156 | contextError: 'Failed to serialize context', 157 | originalKeys: Object.keys(context) 158 | }; 159 | } 160 | } 161 | 162 | private getCircularReplacer() { 163 | const seen = new WeakSet(); 164 | return (key: string, value: unknown) => { 165 | if (typeof value === 'object' && value !== null) { 166 | if (seen.has(value)) { 167 | return '[Circular Reference]'; 168 | } 169 | seen.add(value); 170 | } 171 | return value; 172 | }; 173 | } 174 | 175 | private truncateValues(obj: unknown, maxLength = 1000): unknown { 176 | if (typeof obj === 'string') { 177 | return obj.length > maxLength ? obj.substring(0, maxLength) + '...' : obj; 178 | } 179 | if (Array.isArray(obj)) { 180 | return obj.map(item => this.truncateValues(item, maxLength)); 181 | } 182 | if (typeof obj === 'object' && obj !== null) { 183 | const result: Record = {}; 184 | for (const [key, value] of Object.entries(obj)) { 185 | result[key] = this.truncateValues(value, maxLength); 186 | } 187 | return result; 188 | } 189 | return obj; 190 | } 191 | ``` 192 | 193 | ### Error Formatting 194 | ```typescript 195 | private formatError(error: Error): object { 196 | return { 197 | name: error.name, 198 | message: error.message, 199 | stack: this.config.serialization.includeStackTrace ? error.stack : undefined, 200 | ...(error.cause && { cause: this.formatError(error.cause as Error) }) 201 | }; 202 | } 203 | ``` 204 | 205 | ## Integration Pattern 206 | 207 | ### Server Integration 208 | ```typescript 209 | // In FileContextServer constructor 210 | export class FileContextServer { 211 | private loggingService: LoggingService; 212 | 213 | constructor(config: Partial = {}) { 214 | // ... existing initialization 215 | 216 | // Initialize logging service 217 | this.loggingService = new LoggingService(this.server, { 218 | defaultLevel: process.env.MCP_LOG_LEVEL as LoggingLevel || 'info', 219 | enableConsoleLogging: process.env.NODE_ENV === 'development', 220 | loggerName: 'file-context-server', 221 | performance: { 222 | enabled: process.env.MCP_LOG_PERFORMANCE === 'true', 223 | slowOperationThreshold: 1000 224 | }, 225 | serialization: { 226 | maxDepth: 5, 227 | maxLength: 2000, 228 | includeStackTrace: true 229 | } 230 | }); 231 | 232 | // Handle client logging level requests 233 | this.server.setRequestHandler(SetLevelRequestSchema, async (request) => { 234 | this.loggingService.setLevel(request.params.level); 235 | await this.loggingService.info('Log level changed', { 236 | newLevel: request.params.level, 237 | requestedBy: 'client' 238 | }); 239 | return {}; 240 | }); 241 | 242 | // Pass logging service to other services 243 | this.fileWatcherService = new FileWatcherService(this.loggingService); 244 | this.profileService = new ProfileService(process.cwd(), this.loggingService); 245 | // ... etc 246 | } 247 | } 248 | ``` 249 | 250 | ### Service Integration Pattern 251 | ```typescript 252 | // Example: FileWatcherService integration 253 | export class FileWatcherService extends EventEmitter { 254 | constructor(private logger?: LoggingService) { 255 | super(); 256 | // ... existing initialization 257 | } 258 | 259 | async watch(targetPath: string): Promise { 260 | const timer = this.logger?.createTimer(); 261 | timer?.start(); 262 | 263 | try { 264 | // ... existing watch logic 265 | await this.logger?.info('File watcher started', { 266 | targetPath, 267 | operation: 'watch_start' 268 | }); 269 | 270 | timer?.end('watch_initialization', { targetPath }); 271 | } catch (error) { 272 | await this.logger?.error('Failed to start file watcher', error, { 273 | targetPath, 274 | operation: 'watch_start' 275 | }); 276 | throw error; 277 | } 278 | } 279 | } 280 | ``` 281 | 282 | ## Migration Examples 283 | 284 | ### Before/After Comparisons 285 | 286 | #### Simple Error Logging 287 | ```typescript 288 | // Before 289 | console.error(`Error reading file ${filePath}:`, error); 290 | 291 | // After 292 | await this.loggingService.error('Failed to read file', error, { 293 | filePath, 294 | operation: 'read_file', 295 | encoding: 'utf8' 296 | }); 297 | ``` 298 | 299 | #### Debug Information 300 | ```typescript 301 | // Before 302 | console.error('[FileContextServer] Services initialized'); 303 | 304 | // After 305 | await this.loggingService.info('Server services initialized', { 306 | operation: 'server_startup', 307 | services: ['fileWatcher', 'profile', 'template', 'codeAnalysis'] 308 | }); 309 | ``` 310 | 311 | #### Performance Sensitive Operations 312 | ```typescript 313 | // Before 314 | const files = await this.listFiles(dirPath); 315 | console.error(`Found files: ${files.length}`); 316 | 317 | // After 318 | const timer = this.loggingService.createTimer(); 319 | timer.start(); 320 | 321 | const files = await this.listFiles(dirPath); 322 | 323 | await timer.end('list_files', { 324 | directory: dirPath, 325 | fileCount: files.length, 326 | recursive: options.recursive 327 | }); 328 | ``` 329 | 330 | ## Default Configuration 331 | 332 | ```typescript 333 | export const DEFAULT_LOGGING_CONFIG: LoggingConfig = { 334 | defaultLevel: 'info', 335 | enableConsoleLogging: false, 336 | loggerName: 'file-context-server', 337 | performance: { 338 | enabled: false, 339 | slowOperationThreshold: 1000 340 | }, 341 | serialization: { 342 | maxDepth: 5, 343 | maxLength: 2000, 344 | includeStackTrace: true 345 | } 346 | }; 347 | ``` 348 | 349 | ## Error Handling Strategy 350 | 351 | ### Logging Service Failures 352 | ```typescript 353 | private fallbackLog(level: LoggingLevel, message: string, data?: unknown): void { 354 | if (this.config.enableConsoleLogging) { 355 | const timestamp = new Date().toISOString(); 356 | const logLine = `[${timestamp}] ${level.toUpperCase()}: ${message}`; 357 | 358 | if (level === 'error' || level === 'critical') { 359 | console.error(logLine, data); 360 | } else { 361 | console.log(logLine, data); 362 | } 363 | } 364 | } 365 | ``` 366 | 367 | ### Graceful Degradation 368 | 1. **MCP Connection Issues**: Fall back to console logging 369 | 2. **Serialization Errors**: Log error with minimal context 370 | 3. **Performance Timer Failures**: Continue operation without timing 371 | 4. **Configuration Errors**: Use default configuration with warning 372 | 373 | ## Testing Strategy 374 | 375 | ### Unit Tests 376 | ```typescript 377 | describe('LoggingService', () => { 378 | let mockServer: jest.Mocked; 379 | let loggingService: LoggingService; 380 | 381 | beforeEach(() => { 382 | mockServer = { 383 | sendLoggingMessage: jest.fn().mockResolvedValue(undefined) 384 | } as any; 385 | 386 | loggingService = new LoggingService(mockServer, { 387 | defaultLevel: 'debug', 388 | enableConsoleLogging: false, 389 | loggerName: 'test-logger' 390 | }); 391 | }); 392 | 393 | test('should filter messages based on log level', async () => { 394 | loggingService.setLevel('warning'); 395 | 396 | await loggingService.debug('debug message'); 397 | await loggingService.warning('warning message'); 398 | 399 | expect(mockServer.sendLoggingMessage).toHaveBeenCalledTimes(1); 400 | expect(mockServer.sendLoggingMessage).toHaveBeenCalledWith({ 401 | level: 'warning', 402 | logger: 'test-logger', 403 | data: expect.objectContaining({ 404 | message: 'warning message' 405 | }) 406 | }); 407 | }); 408 | 409 | test('should handle circular references in context', async () => { 410 | const circular: any = { name: 'test' }; 411 | circular.self = circular; 412 | 413 | await loggingService.info('test message', { circular }); 414 | 415 | expect(mockServer.sendLoggingMessage).toHaveBeenCalledWith({ 416 | level: 'info', 417 | logger: 'test-logger', 418 | data: expect.objectContaining({ 419 | circular: expect.objectContaining({ 420 | self: '[Circular Reference]' 421 | }) 422 | }) 423 | }); 424 | }); 425 | }); 426 | ``` 427 | 428 | This implementation provides a robust, feature-complete logging service that integrates seamlessly with the MCP protocol while maintaining performance and reliability. -------------------------------------------------------------------------------- /docs/mcp-logging-implementation-plan.md: -------------------------------------------------------------------------------- 1 | # MCP Logging Implementation Plan 2 | 3 | ## Executive Summary 4 | 5 | This plan outlines the step-by-step implementation of MCP SDK logging to replace the current 43+ console.error statements across 5 files. The implementation will provide real-time, structured logging visible to MCP clients with configurable log levels. 6 | 7 | ## Implementation Phases 8 | 9 | ### Phase 1: Foundation Setup (Week 1) 10 | 11 | #### 1.1 Create LoggingService Infrastructure 12 | **Priority: Critical** 13 | **Estimated Time: 2-3 days** 14 | 15 | **Tasks:** 16 | 1. Create `src/services/LoggingService.ts` with complete interface 17 | 2. Implement level filtering and context enrichment 18 | 3. Add graceful fallback to console logging 19 | 4. Create comprehensive TypeScript types 20 | 21 | **Files to Create:** 22 | - `src/services/LoggingService.ts` (new) 23 | - `src/types/logging.ts` (new, extract from types.ts) 24 | 25 | **Key Features:** 26 | - 8 log levels (debug → emergency) with priority filtering 27 | - Structured context enrichment with operation timing 28 | - JSON serialization with depth/length limits 29 | - Performance timer utilities 30 | - Error formatting with stack traces 31 | 32 | #### 1.2 Server Integration 33 | **Priority: Critical** 34 | **Estimated Time: 1-2 days** 35 | 36 | **Tasks:** 37 | 1. Integrate LoggingService into FileContextServer constructor 38 | 2. Add `logging/setLevel` request handler 39 | 3. Initialize logging service before other services 40 | 4. Add logging configuration to server startup 41 | 42 | **Files to Modify:** 43 | - `src/index.ts` (constructor, request handlers) 44 | 45 | ### Phase 2: Service Migration (Week 2) 46 | 47 | #### 2.1 High Priority Files 48 | **Priority: High** 49 | **Estimated Time: 2-3 days** 50 | 51 | **Target: `src/index.ts` (17 console statements)** 52 | ```typescript 53 | // Current 54 | console.error(`Error reading file ${path}:`, error); 55 | 56 | // Target 57 | await this.logger.error("Failed to read file", error, { 58 | filePath: path, 59 | operation: "read_context", 60 | toolName: request.params.name, 61 | fileSize: stats?.size, 62 | encoding: detectedEncoding 63 | }); 64 | ``` 65 | 66 | **Migration Strategy:** 67 | 1. Inject LoggingService into FileContextServer constructor 68 | 2. Replace console.error with structured logging calls 69 | 3. Add operational context (file paths, tool names, sizes) 70 | 4. Include performance timing for file operations 71 | 5. Maintain error object for stack traces 72 | 73 | #### 2.2 Medium Priority Files 74 | **Priority: Medium** 75 | **Estimated Time: 2-3 days** 76 | 77 | **Target Files:** 78 | - `src/services/ProfileService.ts` (13 console statements) 79 | - `src/services/FileWatcherService.ts` (9 console statements) 80 | 81 | **ProfileService Migration:** 82 | ```typescript 83 | // Current 84 | console.error('[ProfileService] Failed to initialize:', error); 85 | 86 | // Target 87 | await this.logger.error("Profile service initialization failed", error, { 88 | operation: "profile_init", 89 | projectRoot: this.projectRoot, 90 | configPath: this.configPath, 91 | availableProfiles: Object.keys(this.config?.profiles || {}) 92 | }); 93 | ``` 94 | 95 | **FileWatcherService Migration:** 96 | ```typescript 97 | // Current 98 | console.error(`File ${filePath} has been changed`); 99 | 100 | // Target 101 | await this.logger.debug("File change detected", { 102 | operation: "file_watch", 103 | filePath, 104 | event: "change", 105 | watcherPath: this.targetPath 106 | }); 107 | ``` 108 | 109 | ### Phase 3: Remaining Services (Week 3) 110 | 111 | #### 3.1 Low Priority Files 112 | **Priority: Low** 113 | **Estimated Time: 1-2 days** 114 | 115 | **Target Files:** 116 | - `src/services/CodeAnalysisService.ts` (2 console statements) 117 | - `src/services/TemplateService.ts` (2 console statements) 118 | 119 | #### 3.2 Service Constructor Injection 120 | **Priority: Medium** 121 | **Estimated Time: 1-2 days** 122 | 123 | **Tasks:** 124 | 1. Update all service constructors to accept LoggingService 125 | 2. Update FileContextServer to pass logger to all services 126 | 3. Ensure dependency injection pattern consistency 127 | 128 | ### Phase 4: Testing & Validation (Week 4) 129 | 130 | #### 4.1 Unit Testing 131 | **Estimated Time: 2-3 days** 132 | 133 | **Test Files to Create:** 134 | - `src/services/__tests__/LoggingService.test.ts` 135 | - Integration tests for each migrated service 136 | 137 | **Test Coverage:** 138 | - Level filtering behavior 139 | - Context enrichment accuracy 140 | - Error serialization 141 | - Performance timing 142 | - Fallback logging behavior 143 | - Client setLevel request handling 144 | 145 | #### 4.2 Integration Testing 146 | **Estimated Time: 1-2 days** 147 | 148 | **Validation:** 149 | - MCP client can receive structured logs 150 | - Log level filtering works correctly 151 | - Performance impact is minimal 152 | - No regression in existing functionality 153 | 154 | ## File-by-File Migration Details 155 | 156 | ### 1. `src/index.ts` (17 statements) 157 | **Lines to Migrate:** File operations, tool execution, cache operations 158 | **Context Needed:** filePath, toolName, operation, fileSize, encoding, duration 159 | **Log Levels:** error (file operations), warning (cache issues), info (operations) 160 | 161 | ### 2. `src/services/ProfileService.ts` (13 statements) 162 | **Lines to Migrate:** Initialization, configuration, profile switching 163 | **Context Needed:** projectRoot, profileName, configPath, availableProfiles 164 | **Log Levels:** debug (state changes), error (failures), info (operations) 165 | 166 | ### 3. `src/services/FileWatcherService.ts` (9 statements) 167 | **Lines to Migrate:** File system events, watcher lifecycle 168 | **Context Needed:** filePath, watcherPath, event, dirPath 169 | **Log Levels:** debug (events), info (lifecycle), error (failures) 170 | 171 | ### 4. `src/services/CodeAnalysisService.ts` (2 statements) 172 | **Lines to Migrate:** Analysis failures 173 | **Context Needed:** analysisType, filePath, analysisOptions 174 | **Log Levels:** warning (analysis failures) 175 | 176 | ### 5. `src/services/TemplateService.ts` (2 statements) 177 | **Lines to Migrate:** Template operations 178 | **Context Needed:** templateName, templatePath, renderContext 179 | **Log Levels:** error (template failures) 180 | 181 | ## Implementation Code Examples 182 | 183 | ### LoggingService Creation 184 | ```typescript 185 | // src/services/LoggingService.ts 186 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 187 | import { LoggingLevel } from '@modelcontextprotocol/sdk/types.js'; 188 | 189 | export interface LoggingConfig { 190 | defaultLevel: LoggingLevel; 191 | enableConsoleLogging: boolean; 192 | loggerName: string; 193 | performance: { 194 | enabled: boolean; 195 | slowOperationThreshold: number; 196 | }; 197 | } 198 | 199 | export class LoggingService { 200 | private server: Server; 201 | private config: LoggingConfig; 202 | private currentLevel: LoggingLevel; 203 | 204 | constructor(server: Server, config: Partial = {}) { 205 | this.server = server; 206 | this.config = { 207 | defaultLevel: 'info', 208 | enableConsoleLogging: true, 209 | loggerName: 'file-context-server', 210 | performance: { 211 | enabled: true, 212 | slowOperationThreshold: 1000 213 | }, 214 | ...config 215 | }; 216 | this.currentLevel = this.config.defaultLevel; 217 | } 218 | 219 | async error(message: string, error?: Error, context?: LogContext): Promise { 220 | await this.log('error', message, context, error); 221 | } 222 | 223 | private async log(level: LoggingLevel, message: string, context?: LogContext, error?: Error): Promise { 224 | if (!this.shouldLog(level)) return; 225 | 226 | const logData = { 227 | message, 228 | timestamp: Date.now(), 229 | level, 230 | logger: this.config.loggerName, 231 | ...context, 232 | ...(error && { error: this.formatError(error) }) 233 | }; 234 | 235 | try { 236 | await this.server.sendLoggingMessage({ 237 | level, 238 | logger: this.config.loggerName, 239 | data: logData 240 | }); 241 | } catch (err) { 242 | this.fallbackLog(level, message, logData); 243 | } 244 | } 245 | } 246 | ``` 247 | 248 | ### Server Integration 249 | ```typescript 250 | // src/index.ts modifications 251 | import { LoggingService } from './services/LoggingService.js'; 252 | 253 | export class FileContextServer { 254 | private server: Server; 255 | private logger: LoggingService; 256 | // ... other services 257 | 258 | constructor() { 259 | this.server = new Server(/* ... */); 260 | this.logger = new LoggingService(this.server); 261 | 262 | // Initialize other services with logger 263 | this.profileService = new ProfileService(projectRoot, this.logger); 264 | this.fileWatcher = new FileWatcherService(this.logger); 265 | // ... 266 | 267 | // Add logging level handler 268 | this.server.setRequestHandler(ListRequestsRequestSchema, async () => ({ 269 | requests: [ 270 | // ... existing requests 271 | { 272 | name: "logging/setLevel", 273 | description: "Set the logging level" 274 | } 275 | ] 276 | })); 277 | 278 | this.server.setRequestHandler(CallRequestSchema, async (request) => { 279 | if (request.params.name === "logging/setLevel") { 280 | const { level } = request.params.arguments as { level: LoggingLevel }; 281 | this.logger.setLevel(level); 282 | return { success: true }; 283 | } 284 | // ... existing handlers 285 | }); 286 | } 287 | } 288 | ``` 289 | 290 | ### Service Migration Example 291 | ```typescript 292 | // Before (src/services/ProfileService.ts) 293 | console.error('[ProfileService] Failed to initialize:', error); 294 | 295 | // After 296 | await this.logger.error("Profile service initialization failed", error, { 297 | operation: "profile_init", 298 | projectRoot: this.projectRoot, 299 | configPath: this.configPath, 300 | timestamp: Date.now() 301 | }); 302 | ``` 303 | 304 | ## Testing Strategy 305 | 306 | ### Unit Tests 307 | ```typescript 308 | // src/services/__tests__/LoggingService.test.ts 309 | describe('LoggingService', () => { 310 | test('should filter logs by level', async () => { 311 | const mockServer = { sendLoggingMessage: jest.fn() }; 312 | const logger = new LoggingService(mockServer as any); 313 | 314 | logger.setLevel('error'); 315 | await logger.debug('debug message'); 316 | await logger.error('error message'); 317 | 318 | expect(mockServer.sendLoggingMessage).toHaveBeenCalledTimes(1); 319 | expect(mockServer.sendLoggingMessage).toHaveBeenCalledWith({ 320 | level: 'error', 321 | logger: 'file-context-server', 322 | data: expect.objectContaining({ message: 'error message' }) 323 | }); 324 | }); 325 | }); 326 | ``` 327 | 328 | ### Integration Tests 329 | ```typescript 330 | // Test MCP client can receive logs 331 | test('client receives structured logs', async () => { 332 | const transport = new TestTransport(); 333 | const client = new Client({ name: "test-client", version: "1.0.0" }, { 334 | capabilities: { logging: {} } 335 | }); 336 | 337 | await client.connect(transport); 338 | 339 | // Trigger server operation that logs 340 | await client.request({ method: "tools/call", params: { name: "read_context", arguments: { path: "test.txt" } } }); 341 | 342 | // Verify logging message received 343 | expect(transport.receivedMessages).toContainEqual( 344 | expect.objectContaining({ 345 | method: "notifications/message", 346 | params: expect.objectContaining({ 347 | level: "info", 348 | logger: "file-context-server" 349 | }) 350 | }) 351 | ); 352 | }); 353 | ``` 354 | 355 | ## Success Criteria 356 | 357 | ### Functional Requirements 358 | - [ ] All 43+ console statements migrated to MCP logging 359 | - [ ] Client can control log levels via `logging/setLevel` 360 | - [ ] Structured context included in all log messages 361 | - [ ] Error objects properly serialized with stack traces 362 | - [ ] Performance timing for slow operations (>1s) 363 | - [ ] Graceful fallback to console when MCP unavailable 364 | 365 | ### Performance Requirements 366 | - [ ] Logging overhead < 5ms per message 367 | - [ ] No impact on file operation performance 368 | - [ ] Memory usage stable with large log volumes 369 | 370 | ### Compatibility Requirements 371 | - [ ] No breaking changes to existing API 372 | - [ ] Works with all existing MCP clients 373 | - [ ] Backwards compatible console logging for development 374 | 375 | ## Risk Mitigation 376 | 377 | ### High Risk: Breaking Changes 378 | **Mitigation:** Implement feature flags and graceful degradation 379 | 380 | ### Medium Risk: Performance Impact 381 | **Mitigation:** Async logging, level filtering, message batching 382 | 383 | ### Low Risk: Client Compatibility 384 | **Mitigation:** Optional logging capability, fallback behavior 385 | 386 | ## Timeline Summary 387 | 388 | | Week | Phase | Focus | Deliverables | 389 | |------|-------|-------|--------------| 390 | | 1 | Foundation | LoggingService + Server Integration | Core service, types, integration | 391 | | 2 | Migration | High/Medium priority files | 39/43 statements migrated | 392 | | 3 | Completion | Remaining files + polish | All migrations complete | 393 | | 4 | Validation | Testing + documentation | Production ready | 394 | 395 | ## Next Steps 396 | 397 | 1. **Start with Phase 1.1**: Create the LoggingService infrastructure 398 | 2. **Test early**: Implement basic functionality and test with simple MCP client 399 | 3. **Migrate incrementally**: One file at a time, testing each migration 400 | 4. **Monitor performance**: Ensure no degradation in file operations 401 | 5. **Document patterns**: Create migration examples for future reference 402 | 403 | This plan provides a structured approach to implementing professional, client-visible logging while maintaining full backwards compatibility and ensuring a smooth transition from console-based logging. -------------------------------------------------------------------------------- /PRPs/getFiles-mcp-tool.md: -------------------------------------------------------------------------------- 1 | name: "getFiles MCP Tool Implementation" 2 | description: | 3 | 4 | ## Purpose 5 | Implement a new MCP server tool that accepts an array of file paths and quickly retrieves each file's content and metadata, returning a predictable schema for MCP clients. This enables efficient batch file retrieval for MCP client applications. 6 | 7 | ## Core Principles 8 | 1. **Context is King**: Include ALL necessary documentation, examples, and caveats 9 | 2. **Validation Loops**: Provide executable tests/lints the AI can run and fix 10 | 3. **Information Dense**: Use keywords and patterns from the codebase 11 | 4. **Progressive Success**: Start simple, validate, then enhance 12 | 5. **Global rules**: Be sure to follow all rules in CLAUDE.md 13 | 14 | --- 15 | 16 | ## Goal 17 | Implement a new MCP tool called `getFiles` that accepts an array of file paths and returns structured file data (content, metadata) in a fixed, predictable schema that MCP clients can reliably work with. 18 | 19 | ## Why 20 | - **Business value**: Enables efficient batch file retrieval for MCP clients 21 | - **Integration**: Provides predictable interface for file access across different MCP client applications 22 | - **Problems solved**: Eliminates need for multiple individual file requests, provides standardized file metadata format 23 | 24 | ## What 25 | A new MCP tool that: 26 | - Accepts array of file paths via `filePathList` parameter 27 | - Returns structured array of file objects with content and metadata 28 | - Uses existing file reading infrastructure for consistency 29 | - Handles errors gracefully with partial results 30 | - Follows established security and validation patterns 31 | 32 | ### Success Criteria 33 | - [ ] `getFiles` tool defined in MCP tool schema 34 | - [ ] Tool accepts `filePathList` array as per examples/getFiles_request_schema.json 35 | - [ ] Returns file objects matching examples/getFiles_response_schema.json 36 | - [ ] Handles non-existent files gracefully without failing entire request 37 | - [ ] Uses existing security validation (path access checks) 38 | - [ ] All validation gates pass (build, tests, lint) 39 | 40 | ## All Needed Context 41 | 42 | ### Documentation & References 43 | ```yaml 44 | # MUST READ - Include these in your context window 45 | - file: /mnt/c/Projects/mcp-servers/mcp-context-server/src/index.ts 46 | why: Contains FileContextServer class and existing tool implementation patterns 47 | critical: handleReadFile, handleListFiles methods show file reading patterns 48 | 49 | - file: /mnt/c/Projects/mcp-servers/mcp-context-server/src/tools.ts 50 | why: Tool schema definitions and input validation patterns 51 | critical: Shows how to define new tools with proper inputSchema 52 | 53 | - file: /mnt/c/Projects/mcp-servers/mcp-context-server/src/types.ts 54 | why: FileMetadata, FileContent interfaces and error handling patterns 55 | critical: FileOperationError class and FileErrorCode enum for error handling 56 | 57 | - file: /mnt/c/Projects/mcp-servers/mcp-context-server/examples/getFiles_request_schema.json 58 | why: Exact request schema that must be implemented 59 | critical: filePathList array with fileName objects 60 | 61 | - file: /mnt/c/Projects/mcp-servers/mcp-context-server/examples/getFiles_response_schema.json 62 | why: Exact response schema that must be returned 63 | critical: Array of file objects with fileName, content, fileSize, lastModifiedDateTime 64 | 65 | - url: https://modelcontextprotocol.io/specification/2025-06-18 66 | why: MCP protocol specifications for tool development 67 | critical: Tool security principles and response format requirements 68 | 69 | - file: /mnt/c/Projects/mcp-servers/mcp-context-server/CLAUDE.md 70 | why: Development commands and architecture patterns 71 | critical: npm run build, npm run dev, npm test commands for validation 72 | ``` 73 | 74 | ### Current Codebase tree (relevant files) 75 | ```bash 76 | src/ 77 | ├── index.ts # Main FileContextServer class with tool handlers 78 | ├── tools.ts # MCP tool schema definitions 79 | ├── types.ts # TypeScript interfaces and error classes 80 | └── services/ # Supporting services (FileWatcherService, etc.) 81 | 82 | examples/ 83 | ├── getFiles_request_schema.json # Required request format 84 | └── getFiles_response_schema.json # Required response format 85 | 86 | docs/ 87 | ├── llms-full.txt # MCP client compatibility information 88 | └── mcp-typescript-readme.md # MCP TypeScript SDK patterns 89 | ``` 90 | 91 | ### Desired Codebase tree with files to be modified 92 | ```bash 93 | src/ 94 | ├── index.ts # ADD: handleGetFiles method, ADD: case 'getFiles' to switch 95 | ├── tools.ts # ADD: getFiles tool schema definition 96 | └── types.ts # No changes needed (existing interfaces sufficient) 97 | ``` 98 | 99 | ### Known Gotchas & Library Quirks 100 | ```typescript 101 | // CRITICAL: MCP requires explicit user consent before tool invocation 102 | // CRITICAL: Always use path.resolve() for security validation via validateAccess() 103 | // CRITICAL: Use existing createJsonResponse() method for consistent response format 104 | // CRITICAL: Handle file errors gracefully - return partial results, don't fail entire request 105 | // CRITICAL: FileMetadata.modifiedTime uses ISO string format (toISOString()) 106 | // CRITICAL: Tool schema must match exact format in tools.ts pattern 107 | // CRITICAL: Use existing readFileWithEncoding() method for consistent file reading 108 | // CRITICAL: Follow existing error handling with FileOperationError class 109 | ``` 110 | 111 | ## Implementation Blueprint 112 | 113 | ### Data models and structure 114 | 115 | Using existing interfaces from types.ts - no new models needed: 116 | ```typescript 117 | // Existing interfaces to leverage: 118 | interface FileMetadata { 119 | size: number; 120 | mimeType: string; 121 | modifiedTime: string; // ISO format 122 | createdTime: string; 123 | isDirectory: boolean; 124 | // ... other optional fields 125 | } 126 | 127 | // Response will be array of objects matching getFiles_response_schema.json: 128 | interface GetFilesResponse { 129 | fileName: string; 130 | content: string; 131 | fileSize: number; 132 | lastModifiedDateTime: string; // ISO format 133 | } 134 | ``` 135 | 136 | ### List of tasks to be completed in order 137 | 138 | ```yaml 139 | Task 1: Add getFiles tool schema to tools.ts 140 | MODIFY src/tools.ts: 141 | - FIND: export const tools: typeof ToolSchema[] = [ 142 | - ADD: New tool object after existing tools 143 | - PATTERN: Mirror existing tool structures with name, description, inputSchema 144 | - SCHEMA: Match examples/getFiles_request_schema.json exactly 145 | 146 | Task 2: Add getFiles case to request handler 147 | MODIFY src/index.ts: 148 | - FIND: switch (request.params.name) { 149 | - ADD: case 'getFiles': return await this.handleGetFiles(request.params.arguments); 150 | - PATTERN: Follow existing case statements for other tools 151 | 152 | Task 3: Implement handleGetFiles method 153 | CREATE new private method in FileContextServer class: 154 | - PATTERN: Follow handleReadFile method structure 155 | - VALIDATE: Use existing validateAccess for security 156 | - READ: Use existing readFileWithEncoding and getFileMetadata methods 157 | - ERROR: Handle FileOperationError gracefully, continue with other files 158 | - RETURN: Use createJsonResponse with array matching response schema 159 | 160 | Task 4: Test implementation 161 | RUN validation commands: 162 | - npm run build (compile TypeScript) 163 | - npm run dev (test server startup) 164 | - npm test (run existing tests) 165 | - Manual test with MCP client or curl 166 | ``` 167 | 168 | ### Per task pseudocode 169 | 170 | ```typescript 171 | // Task 1: Tool schema (add to tools.ts) 172 | { 173 | name: 'getFiles', 174 | description: 'Retrieve multiple files by their paths, returning content and metadata for each file', 175 | inputSchema: { 176 | type: 'object', 177 | properties: { 178 | filePathList: { 179 | type: 'array', 180 | description: 'The list of file paths for the file content to return.', 181 | minItems: 1, 182 | items: { 183 | type: 'object', 184 | properties: { 185 | fileName: { 186 | type: 'string', 187 | description: 'Path and file name for the file to be retrieved.' 188 | } 189 | }, 190 | required: ['fileName'] 191 | } 192 | } 193 | }, 194 | required: ['filePathList'] 195 | } 196 | } 197 | 198 | // Task 3: Handler implementation 199 | private async handleGetFiles(args: any) { 200 | const { filePathList } = args; 201 | 202 | if (!Array.isArray(filePathList)) { 203 | throw new McpError(ErrorCode.InvalidParams, 'filePathList must be an array'); 204 | } 205 | 206 | const results: any[] = []; 207 | 208 | // Process each file, handling errors gracefully 209 | for (const fileItem of filePathList) { 210 | const filePath = fileItem.fileName; 211 | 212 | try { 213 | // PATTERN: Use existing security validation 214 | const resolvedPath = await this.validateAccess(filePath); 215 | 216 | // PATTERN: Use existing file reading methods 217 | const metadata = await this.getFileMetadata(resolvedPath); 218 | const { content } = await this.readFileWithEncoding(resolvedPath, 'utf8'); 219 | 220 | // TRANSFORM: Match required response schema 221 | results.push({ 222 | fileName: filePath, 223 | content: content, 224 | fileSize: metadata.size, 225 | lastModifiedDateTime: metadata.modifiedTime 226 | }); 227 | } catch (error) { 228 | // GOTCHA: Don't fail entire request - log error and continue 229 | console.error(`Error reading file ${filePath}:`, error); 230 | 231 | // Optional: include error info in response 232 | results.push({ 233 | fileName: filePath, 234 | content: `Error: ${error.message}`, 235 | fileSize: 0, 236 | lastModifiedDateTime: new Date().toISOString() 237 | }); 238 | } 239 | } 240 | 241 | // PATTERN: Use existing response format method 242 | return this.createJsonResponse(results); 243 | } 244 | ``` 245 | 246 | ### Integration Points 247 | ```yaml 248 | SECURITY: 249 | - Uses existing validateAccess() for path security validation 250 | - Leverages existing file access permission checking 251 | 252 | ERROR_HANDLING: 253 | - Uses existing FileOperationError patterns 254 | - Graceful failure - continue processing other files on individual errors 255 | 256 | RESPONSE_FORMAT: 257 | - Uses existing createJsonResponse() method 258 | - Maintains consistency with other tool responses 259 | 260 | FILE_READING: 261 | - Reuses existing readFileWithEncoding() method 262 | - Reuses existing getFileMetadata() method 263 | - Leverages existing encoding detection and caching 264 | ``` 265 | 266 | ## Validation Loop 267 | 268 | ### Level 1: Syntax & Style 269 | ```bash 270 | # Run these FIRST - fix any errors before proceeding 271 | npm run build # TypeScript compilation 272 | # Expected: No compilation errors 273 | 274 | # If errors: Read TypeScript error messages, fix type issues, re-run 275 | ``` 276 | 277 | ### Level 2: Server Functionality 278 | ```bash 279 | # Test server starts successfully 280 | npm run dev 281 | # Expected: "File Context MCP server running on stdio" message 282 | 283 | # If failing: Check console for startup errors, fix and restart 284 | ``` 285 | 286 | ### Level 3: Tool Integration Test 287 | ```typescript 288 | // Manual test using MCP client or tool like curl/postman 289 | // Test case 1: Valid files 290 | { 291 | "method": "tools/call", 292 | "params": { 293 | "name": "getFiles", 294 | "arguments": { 295 | "filePathList": [ 296 | {"fileName": "package.json"}, 297 | {"fileName": "README.md"} 298 | ] 299 | } 300 | } 301 | } 302 | 303 | // Expected response: Array with file objects containing fileName, content, fileSize, lastModifiedDateTime 304 | 305 | // Test case 2: Mix of valid and invalid files 306 | { 307 | "method": "tools/call", 308 | "params": { 309 | "name": "getFiles", 310 | "arguments": { 311 | "filePathList": [ 312 | {"fileName": "package.json"}, 313 | {"fileName": "nonexistent.txt"} 314 | ] 315 | } 316 | } 317 | } 318 | 319 | // Expected: Partial success - valid files returned, errors handled gracefully 320 | ``` 321 | 322 | ### Level 4: Unit Tests (if test framework available) 323 | ```bash 324 | npm test 325 | # Expected: All existing tests continue to pass 326 | 327 | # If failing: Fix issues without breaking existing functionality 328 | ``` 329 | 330 | ## Final Validation Checklist 331 | - [ ] TypeScript compiles without errors: `npm run build` 332 | - [ ] Server starts successfully: `npm run dev` 333 | - [ ] Tool appears in tools list when MCP client connects 334 | - [ ] getFiles tool accepts correct input schema 335 | - [ ] Returns response matching examples/getFiles_response_schema.json 336 | - [ ] Handles non-existent files gracefully (doesn't crash) 337 | - [ ] Uses existing security validation patterns 338 | - [ ] Response format matches other tools (JSON with content array) 339 | - [ ] All existing tests still pass: `npm test` 340 | 341 | --- 342 | 343 | ## Anti-Patterns to Avoid 344 | - ❌ Don't create new file reading methods - use existing readFileWithEncoding() 345 | - ❌ Don't skip path validation - always use validateAccess() 346 | - ❌ Don't fail entire request on single file error - handle gracefully 347 | - ❌ Don't create custom response format - use createJsonResponse() 348 | - ❌ Don't hardcode file encoding - use existing encoding detection 349 | - ❌ Don't ignore existing error handling patterns - use FileOperationError 350 | - ❌ Don't modify existing tool behavior - only add new functionality 351 | 352 | ## Confidence Score: 9/10 353 | 354 | High confidence due to: 355 | - Clear existing patterns to follow in codebase 356 | - Well-defined request/response schemas in examples 357 | - Existing file reading infrastructure to leverage 358 | - Established error handling and security patterns 359 | - Comprehensive validation steps defined 360 | 361 | Minor uncertainty on specific MCP client testing approach, but manual verification methods are provided. -------------------------------------------------------------------------------- /PRPs/EXAMPLE_multi_agent_prp.md: -------------------------------------------------------------------------------- 1 | name: "Multi-Agent System: Research Agent with Email Draft Sub-Agent" 2 | description: | 3 | 4 | ## Purpose 5 | Build a Pydantic AI multi-agent system where a primary Research Agent uses Brave Search API and has an Email Draft Agent (using Gmail API) as a tool. This demonstrates agent-as-tool pattern with external API integrations. 6 | 7 | ## Core Principles 8 | 1. **Context is King**: Include ALL necessary documentation, examples, and caveats 9 | 2. **Validation Loops**: Provide executable tests/lints the AI can run and fix 10 | 3. **Information Dense**: Use keywords and patterns from the codebase 11 | 4. **Progressive Success**: Start simple, validate, then enhance 12 | 13 | --- 14 | 15 | ## Goal 16 | Create a production-ready multi-agent system where users can research topics via CLI, and the Research Agent can delegate email drafting tasks to an Email Draft Agent. The system should support multiple LLM providers and handle API authentication securely. 17 | 18 | ## Why 19 | - **Business value**: Automates research and email drafting workflows 20 | - **Integration**: Demonstrates advanced Pydantic AI multi-agent patterns 21 | - **Problems solved**: Reduces manual work for research-based email communications 22 | 23 | ## What 24 | A CLI-based application where: 25 | - Users input research queries 26 | - Research Agent searches using Brave API 27 | - Research Agent can invoke Email Draft Agent to create Gmail drafts 28 | - Results stream back to the user in real-time 29 | 30 | ### Success Criteria 31 | - [ ] Research Agent successfully searches via Brave API 32 | - [ ] Email Agent creates Gmail drafts with proper authentication 33 | - [ ] Research Agent can invoke Email Agent as a tool 34 | - [ ] CLI provides streaming responses with tool visibility 35 | - [ ] All tests pass and code meets quality standards 36 | 37 | ## All Needed Context 38 | 39 | ### Documentation & References 40 | ```yaml 41 | # MUST READ - Include these in your context window 42 | - url: https://ai.pydantic.dev/agents/ 43 | why: Core agent creation patterns 44 | 45 | - url: https://ai.pydantic.dev/multi-agent-applications/ 46 | why: Multi-agent system patterns, especially agent-as-tool 47 | 48 | - url: https://developers.google.com/gmail/api/guides/sending 49 | why: Gmail API authentication and draft creation 50 | 51 | - url: https://api-dashboard.search.brave.com/app/documentation 52 | why: Brave Search API REST endpoints 53 | 54 | - file: examples/agent/agent.py 55 | why: Pattern for agent creation, tool registration, dependencies 56 | 57 | - file: examples/agent/providers.py 58 | why: Multi-provider LLM configuration pattern 59 | 60 | - file: examples/cli.py 61 | why: CLI structure with streaming responses and tool visibility 62 | 63 | - url: https://github.com/googleworkspace/python-samples/blob/main/gmail/snippet/send%20mail/create_draft.py 64 | why: Official Gmail draft creation example 65 | ``` 66 | 67 | ### Current Codebase tree 68 | ```bash 69 | . 70 | ├── examples/ 71 | │ ├── agent/ 72 | │ │ ├── agent.py 73 | │ │ ├── providers.py 74 | │ │ └── ... 75 | │ └── cli.py 76 | ├── PRPs/ 77 | │ └── templates/ 78 | │ └── prp_base.md 79 | ├── INITIAL.md 80 | ├── CLAUDE.md 81 | └── requirements.txt 82 | ``` 83 | 84 | ### Desired Codebase tree with files to be added 85 | ```bash 86 | . 87 | ├── agents/ 88 | │ ├── __init__.py # Package init 89 | │ ├── research_agent.py # Primary agent with Brave Search 90 | │ ├── email_agent.py # Sub-agent with Gmail capabilities 91 | │ ├── providers.py # LLM provider configuration 92 | │ └── models.py # Pydantic models for data validation 93 | ├── tools/ 94 | │ ├── __init__.py # Package init 95 | │ ├── brave_search.py # Brave Search API integration 96 | │ └── gmail_tool.py # Gmail API integration 97 | ├── config/ 98 | │ ├── __init__.py # Package init 99 | │ └── settings.py # Environment and config management 100 | ├── tests/ 101 | │ ├── __init__.py # Package init 102 | │ ├── test_research_agent.py # Research agent tests 103 | │ ├── test_email_agent.py # Email agent tests 104 | │ ├── test_brave_search.py # Brave search tool tests 105 | │ ├── test_gmail_tool.py # Gmail tool tests 106 | │ └── test_cli.py # CLI tests 107 | ├── cli.py # CLI interface 108 | ├── .env.example # Environment variables template 109 | ├── requirements.txt # Updated dependencies 110 | ├── README.md # Comprehensive documentation 111 | └── credentials/.gitkeep # Directory for Gmail credentials 112 | ``` 113 | 114 | ### Known Gotchas & Library Quirks 115 | ```python 116 | # CRITICAL: Pydantic AI requires async throughout - no sync functions in async context 117 | # CRITICAL: Gmail API requires OAuth2 flow on first run - credentials.json needed 118 | # CRITICAL: Brave API has rate limits - 2000 req/month on free tier 119 | # CRITICAL: Agent-as-tool pattern requires passing ctx.usage for token tracking 120 | # CRITICAL: Gmail drafts need base64 encoding with proper MIME formatting 121 | # CRITICAL: Always use absolute imports for cleaner code 122 | # CRITICAL: Store sensitive credentials in .env, never commit them 123 | ``` 124 | 125 | ## Implementation Blueprint 126 | 127 | ### Data models and structure 128 | 129 | ```python 130 | # models.py - Core data structures 131 | from pydantic import BaseModel, Field 132 | from typing import List, Optional 133 | from datetime import datetime 134 | 135 | class ResearchQuery(BaseModel): 136 | query: str = Field(..., description="Research topic to investigate") 137 | max_results: int = Field(10, ge=1, le=50) 138 | include_summary: bool = Field(True) 139 | 140 | class BraveSearchResult(BaseModel): 141 | title: str 142 | url: str 143 | description: str 144 | score: float = Field(0.0, ge=0.0, le=1.0) 145 | 146 | class EmailDraft(BaseModel): 147 | to: List[str] = Field(..., min_items=1) 148 | subject: str = Field(..., min_length=1) 149 | body: str = Field(..., min_length=1) 150 | cc: Optional[List[str]] = None 151 | bcc: Optional[List[str]] = None 152 | 153 | class ResearchEmailRequest(BaseModel): 154 | research_query: str 155 | email_context: str = Field(..., description="Context for email generation") 156 | recipient_email: str 157 | ``` 158 | 159 | ### List of tasks to be completed 160 | 161 | ```yaml 162 | Task 1: Setup Configuration and Environment 163 | CREATE config/settings.py: 164 | - PATTERN: Use pydantic-settings like examples use os.getenv 165 | - Load environment variables with defaults 166 | - Validate required API keys present 167 | 168 | CREATE .env.example: 169 | - Include all required environment variables with descriptions 170 | - Follow pattern from examples/README.md 171 | 172 | Task 2: Implement Brave Search Tool 173 | CREATE tools/brave_search.py: 174 | - PATTERN: Async functions like examples/agent/tools.py 175 | - Simple REST client using httpx (already in requirements) 176 | - Handle rate limits and errors gracefully 177 | - Return structured BraveSearchResult models 178 | 179 | Task 3: Implement Gmail Tool 180 | CREATE tools/gmail_tool.py: 181 | - PATTERN: Follow OAuth2 flow from Gmail quickstart 182 | - Store token.json in credentials/ directory 183 | - Create draft with proper MIME encoding 184 | - Handle authentication refresh automatically 185 | 186 | Task 4: Create Email Draft Agent 187 | CREATE agents/email_agent.py: 188 | - PATTERN: Follow examples/agent/agent.py structure 189 | - Use Agent with deps_type pattern 190 | - Register gmail_tool as @agent.tool 191 | - Return EmailDraft model 192 | 193 | Task 5: Create Research Agent 194 | CREATE agents/research_agent.py: 195 | - PATTERN: Multi-agent pattern from Pydantic AI docs 196 | - Register brave_search as tool 197 | - Register email_agent.run() as tool 198 | - Use RunContext for dependency injection 199 | 200 | Task 6: Implement CLI Interface 201 | CREATE cli.py: 202 | - PATTERN: Follow examples/cli.py streaming pattern 203 | - Color-coded output with tool visibility 204 | - Handle async properly with asyncio.run() 205 | - Session management for conversation context 206 | 207 | Task 7: Add Comprehensive Tests 208 | CREATE tests/: 209 | - PATTERN: Mirror examples test structure 210 | - Mock external API calls 211 | - Test happy path, edge cases, errors 212 | - Ensure 80%+ coverage 213 | 214 | Task 8: Create Documentation 215 | CREATE README.md: 216 | - PATTERN: Follow examples/README.md structure 217 | - Include setup, installation, usage 218 | - API key configuration steps 219 | - Architecture diagram 220 | ``` 221 | 222 | ### Per task pseudocode 223 | 224 | ```python 225 | # Task 2: Brave Search Tool 226 | async def search_brave(query: str, api_key: str, count: int = 10) -> List[BraveSearchResult]: 227 | # PATTERN: Use httpx like examples use aiohttp 228 | async with httpx.AsyncClient() as client: 229 | headers = {"X-Subscription-Token": api_key} 230 | params = {"q": query, "count": count} 231 | 232 | # GOTCHA: Brave API returns 401 if API key invalid 233 | response = await client.get( 234 | "https://api.search.brave.com/res/v1/web/search", 235 | headers=headers, 236 | params=params, 237 | timeout=30.0 # CRITICAL: Set timeout to avoid hanging 238 | ) 239 | 240 | # PATTERN: Structured error handling 241 | if response.status_code != 200: 242 | raise BraveAPIError(f"API returned {response.status_code}") 243 | 244 | # Parse and validate with Pydantic 245 | data = response.json() 246 | return [BraveSearchResult(**result) for result in data.get("web", {}).get("results", [])] 247 | 248 | # Task 5: Research Agent with Email Agent as Tool 249 | @research_agent.tool 250 | async def create_email_draft( 251 | ctx: RunContext[AgentDependencies], 252 | recipient: str, 253 | subject: str, 254 | context: str 255 | ) -> str: 256 | """Create email draft based on research context.""" 257 | # CRITICAL: Pass usage for token tracking 258 | result = await email_agent.run( 259 | f"Create an email to {recipient} about: {context}", 260 | deps=EmailAgentDeps(subject=subject), 261 | usage=ctx.usage # PATTERN from multi-agent docs 262 | ) 263 | 264 | return f"Draft created with ID: {result.data}" 265 | ``` 266 | 267 | ### Integration Points 268 | ```yaml 269 | ENVIRONMENT: 270 | - add to: .env 271 | - vars: | 272 | # LLM Configuration 273 | LLM_PROVIDER=openai 274 | LLM_API_KEY=sk-... 275 | LLM_MODEL=gpt-4 276 | 277 | # Brave Search 278 | BRAVE_API_KEY=BSA... 279 | 280 | # Gmail (path to credentials.json) 281 | GMAIL_CREDENTIALS_PATH=./credentials/credentials.json 282 | 283 | CONFIG: 284 | - Gmail OAuth: First run opens browser for authorization 285 | - Token storage: ./credentials/token.json (auto-created) 286 | 287 | DEPENDENCIES: 288 | - Update requirements.txt with: 289 | - google-api-python-client 290 | - google-auth-httplib2 291 | - google-auth-oauthlib 292 | ``` 293 | 294 | ## Validation Loop 295 | 296 | ### Level 1: Syntax & Style 297 | ```bash 298 | # Run these FIRST - fix any errors before proceeding 299 | ruff check . --fix # Auto-fix style issues 300 | mypy . # Type checking 301 | 302 | # Expected: No errors. If errors, READ and fix. 303 | ``` 304 | 305 | ### Level 2: Unit Tests 306 | ```python 307 | # test_research_agent.py 308 | async def test_research_with_brave(): 309 | """Test research agent searches correctly""" 310 | agent = create_research_agent() 311 | result = await agent.run("AI safety research") 312 | assert result.data 313 | assert len(result.data) > 0 314 | 315 | async def test_research_creates_email(): 316 | """Test research agent can invoke email agent""" 317 | agent = create_research_agent() 318 | result = await agent.run( 319 | "Research AI safety and draft email to john@example.com" 320 | ) 321 | assert "draft_id" in result.data 322 | 323 | # test_email_agent.py 324 | def test_gmail_authentication(monkeypatch): 325 | """Test Gmail OAuth flow handling""" 326 | monkeypatch.setenv("GMAIL_CREDENTIALS_PATH", "test_creds.json") 327 | tool = GmailTool() 328 | assert tool.service is not None 329 | 330 | async def test_create_draft(): 331 | """Test draft creation with proper encoding""" 332 | agent = create_email_agent() 333 | result = await agent.run( 334 | "Create email to test@example.com about AI research" 335 | ) 336 | assert result.data.get("draft_id") 337 | ``` 338 | 339 | ```bash 340 | # Run tests iteratively until passing: 341 | pytest tests/ -v --cov=agents --cov=tools --cov-report=term-missing 342 | 343 | # If failing: Debug specific test, fix code, re-run 344 | ``` 345 | 346 | ### Level 3: Integration Test 347 | ```bash 348 | # Test CLI interaction 349 | python cli.py 350 | 351 | # Expected interaction: 352 | # You: Research latest AI safety developments 353 | # 🤖 Assistant: [Streams research results] 354 | # 🛠 Tools Used: 355 | # 1. brave_search (query='AI safety developments', limit=10) 356 | # 357 | # You: Create an email draft about this to john@example.com 358 | # 🤖 Assistant: [Creates draft] 359 | # 🛠 Tools Used: 360 | # 1. create_email_draft (recipient='john@example.com', ...) 361 | 362 | # Check Gmail drafts folder for created draft 363 | ``` 364 | 365 | ## Final Validation Checklist 366 | - [ ] All tests pass: `pytest tests/ -v` 367 | - [ ] No linting errors: `ruff check .` 368 | - [ ] No type errors: `mypy .` 369 | - [ ] Gmail OAuth flow works (browser opens, token saved) 370 | - [ ] Brave Search returns results 371 | - [ ] Research Agent invokes Email Agent successfully 372 | - [ ] CLI streams responses with tool visibility 373 | - [ ] Error cases handled gracefully 374 | - [ ] README includes clear setup instructions 375 | - [ ] .env.example has all required variables 376 | 377 | --- 378 | 379 | ## Anti-Patterns to Avoid 380 | - ❌ Don't hardcode API keys - use environment variables 381 | - ❌ Don't use sync functions in async agent context 382 | - ❌ Don't skip OAuth flow setup for Gmail 383 | - ❌ Don't ignore rate limits for APIs 384 | - ❌ Don't forget to pass ctx.usage in multi-agent calls 385 | - ❌ Don't commit credentials.json or token.json files 386 | 387 | ## Confidence Score: 9/10 388 | 389 | High confidence due to: 390 | - Clear examples to follow from the codebase 391 | - Well-documented external APIs 392 | - Established patterns for multi-agent systems 393 | - Comprehensive validation gates 394 | 395 | Minor uncertainty on Gmail OAuth first-time setup UX, but documentation provides clear guidance. -------------------------------------------------------------------------------- /src/services/CodeAnalysisService.ts: -------------------------------------------------------------------------------- 1 | import { promises as fs } from 'fs'; 2 | import * as path from 'path'; 3 | import { exec } from 'child_process'; 4 | import { promisify } from 'util'; 5 | import * as parser from '@typescript-eslint/parser'; 6 | import { AST_NODE_TYPES, TSESTree } from '@typescript-eslint/types'; 7 | import { FileContent } from '../types.js'; 8 | import { LoggingService } from './LoggingService.js'; 9 | 10 | const execAsync = promisify(exec); 11 | 12 | export interface SecurityIssue { 13 | type: string; 14 | severity: 'low' | 'medium' | 'high' | 'critical'; 15 | description: string; 16 | line?: number; 17 | column?: number; 18 | } 19 | 20 | export interface StyleViolation { 21 | rule: string; 22 | message: string; 23 | line: number; 24 | column: number; 25 | } 26 | 27 | export interface ComplexityMetrics { 28 | cyclomaticComplexity: number; 29 | maintainabilityIndex: number; 30 | linesOfCode: number; 31 | numberOfFunctions: number; 32 | branchCount: number; 33 | returnCount: number; 34 | maxNestingDepth: number; 35 | averageFunctionComplexity: number; 36 | functionMetrics: FunctionMetrics[]; 37 | } 38 | 39 | export interface FunctionMetrics { 40 | name: string; 41 | startLine: number; 42 | endLine: number; 43 | complexity: number; 44 | parameterCount: number; 45 | returnCount: number; 46 | localVariables: number; 47 | nestingDepth: number; 48 | } 49 | 50 | export interface CodeMetrics { 51 | complexity: number; 52 | lineCount: { 53 | total: number; 54 | code: number; 55 | comment: number; 56 | blank: number; 57 | }; 58 | quality: { 59 | longLines: number; 60 | duplicateLines: number; 61 | complexFunctions: number; 62 | }; 63 | dependencies: string[]; 64 | imports: string[]; 65 | definitions: { 66 | classes: string[]; 67 | functions: string[]; 68 | variables: string[]; 69 | }; 70 | } 71 | 72 | export interface CodeAnalysisResult { 73 | metrics: CodeMetrics; 74 | outline: string; 75 | language: string; 76 | security_issues: any[]; 77 | style_violations: any[]; 78 | complexity_metrics: any; 79 | } 80 | 81 | interface LanguageConfig { 82 | extensions: string[]; 83 | securityTool?: string; 84 | styleTool?: string; 85 | complexityTool?: string; 86 | parser?: (code: string) => TSESTree.Program; 87 | } 88 | 89 | export class CodeAnalysisService { 90 | private tempDir: string; 91 | private languageConfigs: Record; 92 | private readonly LONG_LINE_THRESHOLD = 100; 93 | private readonly COMPLEX_FUNCTION_THRESHOLD = 10; 94 | private logger?: LoggingService; 95 | 96 | constructor(logger?: LoggingService) { 97 | this.logger = logger; 98 | this.tempDir = path.join(process.cwd(), '.temp'); 99 | this.languageConfigs = { 100 | python: { 101 | extensions: ['.py'], 102 | securityTool: 'bandit', 103 | styleTool: 'pylint', 104 | complexityTool: 'radon' 105 | }, 106 | typescript: { 107 | extensions: ['.ts', '.tsx'], 108 | securityTool: 'tsc --noEmit', 109 | styleTool: 'eslint', 110 | parser: (code: string) => parser.parse(code, { 111 | sourceType: 'module', 112 | ecmaFeatures: { jsx: true } 113 | }) 114 | }, 115 | javascript: { 116 | extensions: ['.js', '.jsx'], 117 | securityTool: 'eslint', 118 | styleTool: 'eslint', 119 | parser: (code: string) => parser.parse(code, { 120 | sourceType: 'module', 121 | ecmaFeatures: { jsx: true } 122 | }) 123 | }, 124 | csharp: { 125 | extensions: ['.cs'], 126 | securityTool: 'security-code-scan', 127 | styleTool: 'dotnet format', 128 | complexityTool: 'ndepend' 129 | }, 130 | go: { 131 | extensions: ['.go'], 132 | securityTool: 'gosec', 133 | styleTool: 'golint', 134 | complexityTool: 'gocyclo' 135 | }, 136 | bash: { 137 | extensions: ['.sh', '.bash'], 138 | securityTool: 'shellcheck', 139 | styleTool: 'shellcheck', 140 | complexityTool: 'shellcheck' 141 | } 142 | }; 143 | } 144 | 145 | public async initialize(): Promise { 146 | await fs.mkdir(this.tempDir, { recursive: true }); 147 | } 148 | 149 | public async analyzeCode(content: string, filePath: string): Promise { 150 | const ext = path.extname(filePath).toLowerCase(); 151 | const language = this.getLanguage(ext); 152 | 153 | const metrics = await this.calculateMetrics(content, language); 154 | const outline = await this.generateOutline(content, language); 155 | 156 | return { 157 | metrics, 158 | outline, 159 | language, 160 | security_issues: [], // TODO: Implement security analysis 161 | style_violations: [], // TODO: Implement style analysis 162 | complexity_metrics: { 163 | cyclomaticComplexity: metrics.complexity, 164 | linesOfCode: metrics.lineCount.code, 165 | maintainabilityIndex: 100 - (metrics.quality.longLines + metrics.quality.duplicateLines) / metrics.lineCount.total * 100 166 | } 167 | }; 168 | } 169 | 170 | private getLanguage(ext: string): string { 171 | const map: Record = { 172 | '.ts': 'typescript', 173 | '.tsx': 'typescript', 174 | '.js': 'javascript', 175 | '.jsx': 'javascript', 176 | '.py': 'python', 177 | '.go': 'go', 178 | '.java': 'java', 179 | '.cs': 'csharp', 180 | '.cpp': 'cpp', 181 | '.c': 'c', 182 | '.rb': 'ruby' 183 | }; 184 | return map[ext] || 'unknown'; 185 | } 186 | 187 | private async calculateMetrics(content: string, language: string): Promise { 188 | const lines = content.split('\n'); 189 | 190 | const lineCount = this.calculateLineCount(lines, language); 191 | const complexity = this.calculateComplexity(content, language); 192 | const quality = this.calculateQualityMetrics(lines); 193 | const { imports, dependencies } = this.extractDependencies(content, language); 194 | const definitions = this.extractDefinitions(content, language); 195 | 196 | return { 197 | complexity, 198 | lineCount, 199 | quality, 200 | dependencies, 201 | imports, 202 | definitions 203 | }; 204 | } 205 | 206 | private calculateLineCount(lines: string[], language: string): CodeMetrics['lineCount'] { 207 | let code = 0; 208 | let comment = 0; 209 | let blank = 0; 210 | let inMultilineComment = false; 211 | 212 | const commentStart = this.getCommentPatterns(language); 213 | 214 | for (const line of lines) { 215 | const trimmed = line.trim(); 216 | 217 | if (!trimmed) { 218 | blank++; 219 | continue; 220 | } 221 | 222 | if (inMultilineComment) { 223 | comment++; 224 | if (commentStart.multiEnd && trimmed.includes(commentStart.multiEnd)) { 225 | inMultilineComment = false; 226 | } 227 | continue; 228 | } 229 | 230 | if (commentStart.multi && trimmed.startsWith(commentStart.multi)) { 231 | comment++; 232 | inMultilineComment = true; 233 | continue; 234 | } 235 | 236 | if (commentStart.single.some(pattern => trimmed.startsWith(pattern))) { 237 | comment++; 238 | } else { 239 | code++; 240 | } 241 | } 242 | 243 | return { 244 | total: lines.length, 245 | code, 246 | comment, 247 | blank 248 | }; 249 | } 250 | 251 | private calculateComplexity(content: string, language: string): number { 252 | let complexity = 1; 253 | const patterns = [ 254 | /\bif\b/g, 255 | /\belse\b/g, 256 | /\bwhile\b/g, 257 | /\bfor\b/g, 258 | /\bforeach\b/g, 259 | /\bcase\b/g, 260 | /\bcatch\b/g, 261 | /\b\|\|\b/g, 262 | /\b&&\b/g, 263 | /\?/g 264 | ]; 265 | 266 | patterns.forEach(pattern => { 267 | const matches = content.match(pattern); 268 | if (matches) { 269 | complexity += matches.length; 270 | } 271 | }); 272 | 273 | return complexity; 274 | } 275 | 276 | private calculateQualityMetrics(lines: string[]): CodeMetrics['quality'] { 277 | const longLines = lines.filter(line => line.length > this.LONG_LINE_THRESHOLD).length; 278 | 279 | // Simple duplicate line detection 280 | const lineSet = new Set(); 281 | let duplicateLines = 0; 282 | lines.forEach(line => { 283 | const trimmed = line.trim(); 284 | if (trimmed && lineSet.has(trimmed)) { 285 | duplicateLines++; 286 | } else { 287 | lineSet.add(trimmed); 288 | } 289 | }); 290 | 291 | // Count complex functions based on line count and complexity 292 | const complexFunctions = this.countComplexFunctions(lines.join('\n')); 293 | 294 | return { 295 | longLines, 296 | duplicateLines, 297 | complexFunctions 298 | }; 299 | } 300 | 301 | private countComplexFunctions(content: string): number { 302 | const functionMatches = content.match(/\bfunction\s+\w+\s*\([^)]*\)\s*{[^}]*}/g) || []; 303 | return functionMatches.filter(func => { 304 | const complexity = this.calculateComplexity(func, 'unknown'); 305 | return complexity > this.COMPLEX_FUNCTION_THRESHOLD; 306 | }).length; 307 | } 308 | 309 | private extractDependencies(content: string, language: string): { imports: string[], dependencies: string[] } { 310 | const imports: string[] = []; 311 | const dependencies: string[] = []; 312 | 313 | switch (language) { 314 | case 'typescript': 315 | case 'javascript': 316 | const importMatches = content.match(/import\s+.*\s+from\s+['"]([^'"]+)['"]/g) || []; 317 | const requireMatches = content.match(/require\s*\(\s*['"]([^'"]+)['"]\s*\)/g) || []; 318 | 319 | importMatches.forEach(match => { 320 | const [, path] = match.match(/from\s+['"]([^'"]+)['"]/) || []; 321 | if (path) imports.push(path); 322 | }); 323 | 324 | requireMatches.forEach(match => { 325 | const [, path] = match.match(/require\s*\(\s*['"]([^'"]+)['"]\s*\)/) || []; 326 | if (path) dependencies.push(path); 327 | }); 328 | break; 329 | 330 | case 'python': 331 | const pythonImports = content.match(/(?:from\s+(\S+)\s+)?import\s+(\S+)(?:\s+as\s+\S+)?/g) || []; 332 | pythonImports.forEach(match => { 333 | const [, from, module] = match.match(/(?:from\s+(\S+)\s+)?import\s+(\S+)/) || []; 334 | if (from) imports.push(from); 335 | if (module) imports.push(module); 336 | }); 337 | break; 338 | } 339 | 340 | return { imports, dependencies }; 341 | } 342 | 343 | private extractDefinitions(content: string, language: string): CodeMetrics['definitions'] { 344 | const definitions: CodeMetrics['definitions'] = { 345 | classes: [], 346 | functions: [], 347 | variables: [] 348 | }; 349 | 350 | switch (language) { 351 | case 'typescript': 352 | case 'javascript': 353 | // Classes 354 | const classMatches = content.match(/class\s+(\w+)/g) || []; 355 | definitions.classes = classMatches.map(match => match.split(/\s+/)[1]); 356 | 357 | // Functions 358 | const functionMatches = content.match(/(?:function|const|let|var)\s+(\w+)\s*(?:=\s*(?:function|\([^)]*\)\s*=>)|\([^)]*\))/g) || []; 359 | definitions.functions = functionMatches.map(match => { 360 | const [, name] = match.match(/(?:function|const|let|var)\s+(\w+)/) || []; 361 | return name; 362 | }).filter(Boolean); 363 | 364 | // Variables 365 | const varMatches = content.match(/(?:const|let|var)\s+(\w+)\s*=/g) || []; 366 | definitions.variables = varMatches.map(match => { 367 | const [, name] = match.match(/(?:const|let|var)\s+(\w+)/) || []; 368 | return name; 369 | }).filter(Boolean); 370 | break; 371 | 372 | case 'python': 373 | // Classes 374 | const pyClassMatches = content.match(/class\s+(\w+)(?:\([^)]*\))?:/g) || []; 375 | definitions.classes = pyClassMatches.map(match => { 376 | const [, name] = match.match(/class\s+(\w+)/) || []; 377 | return name; 378 | }).filter(Boolean); 379 | 380 | // Functions 381 | const pyFuncMatches = content.match(/def\s+(\w+)\s*\([^)]*\):/g) || []; 382 | definitions.functions = pyFuncMatches.map(match => { 383 | const [, name] = match.match(/def\s+(\w+)/) || []; 384 | return name; 385 | }).filter(Boolean); 386 | 387 | // Variables 388 | const pyVarMatches = content.match(/(\w+)\s*=(?!=)/g) || []; 389 | definitions.variables = pyVarMatches.map(match => { 390 | const [, name] = match.match(/(\w+)\s*=/) || []; 391 | return name; 392 | }).filter(Boolean); 393 | break; 394 | } 395 | 396 | return definitions; 397 | } 398 | 399 | private getCommentPatterns(language: string): { single: string[], multi?: string, multiEnd?: string } { 400 | switch (language) { 401 | case 'typescript': 402 | case 'javascript': 403 | return { 404 | single: ['//'], 405 | multi: '/*', 406 | multiEnd: '*/' 407 | }; 408 | case 'python': 409 | return { 410 | single: ['#'] 411 | }; 412 | case 'ruby': 413 | return { 414 | single: ['#'] 415 | }; 416 | default: 417 | return { 418 | single: ['//'], 419 | multi: '/*', 420 | multiEnd: '*/' 421 | }; 422 | } 423 | } 424 | 425 | private async generateOutline(content: string, language: string): Promise { 426 | const metrics = await this.calculateMetrics(content, language); 427 | 428 | const sections: string[] = []; 429 | 430 | // Add imports section 431 | if (metrics.imports.length > 0) { 432 | sections.push('Imports:', ...metrics.imports.map(imp => ` - ${imp}`)); 433 | } 434 | 435 | // Add definitions section 436 | if (metrics.definitions.classes.length > 0) { 437 | sections.push('\nClasses:', ...metrics.definitions.classes.map(cls => ` - ${cls}`)); 438 | } 439 | 440 | if (metrics.definitions.functions.length > 0) { 441 | sections.push('\nFunctions:', ...metrics.definitions.functions.map(func => ` - ${func}`)); 442 | } 443 | 444 | // Add metrics section 445 | sections.push('\nMetrics:', 446 | ` Lines: ${metrics.lineCount.total} (${metrics.lineCount.code} code, ${metrics.lineCount.comment} comments, ${metrics.lineCount.blank} blank)`, 447 | ` Complexity: ${metrics.complexity}`, 448 | ` Quality Issues:`, 449 | ` - ${metrics.quality.longLines} long lines`, 450 | ` - ${metrics.quality.duplicateLines} duplicate lines`, 451 | ` - ${metrics.quality.complexFunctions} complex functions` 452 | ); 453 | 454 | return sections.join('\n'); 455 | } 456 | 457 | private analyzeAst(ast: TSESTree.Node): ComplexityMetrics { 458 | const functionMetrics: FunctionMetrics[] = []; 459 | let totalComplexity = 0; 460 | let maxNestingDepth = 0; 461 | let branchCount = 0; 462 | let returnCount = 0; 463 | 464 | const visitNode = (node: TSESTree.Node, depth: number = 0): void => { 465 | maxNestingDepth = Math.max(maxNestingDepth, depth); 466 | 467 | switch (node.type) { 468 | case AST_NODE_TYPES.FunctionDeclaration: 469 | case AST_NODE_TYPES.FunctionExpression: 470 | case AST_NODE_TYPES.ArrowFunctionExpression: 471 | case AST_NODE_TYPES.MethodDefinition: 472 | const metrics = this.analyzeFunctionNode(node, depth); 473 | functionMetrics.push(metrics); 474 | totalComplexity += metrics.complexity; 475 | break; 476 | 477 | case AST_NODE_TYPES.IfStatement: 478 | case AST_NODE_TYPES.SwitchCase: 479 | case AST_NODE_TYPES.ConditionalExpression: 480 | branchCount++; 481 | break; 482 | 483 | case AST_NODE_TYPES.ReturnStatement: 484 | returnCount++; 485 | break; 486 | } 487 | 488 | // Recursively visit children 489 | for (const key in node) { 490 | const child = (node as any)[key]; 491 | if (child && typeof child === 'object') { 492 | if (Array.isArray(child)) { 493 | child.forEach(item => { 494 | if (item && typeof item === 'object' && item.type) { 495 | visitNode(item as TSESTree.Node, depth + 1); 496 | } 497 | }); 498 | } else if (child.type) { 499 | visitNode(child as TSESTree.Node, depth + 1); 500 | } 501 | } 502 | } 503 | }; 504 | 505 | visitNode(ast); 506 | 507 | const averageFunctionComplexity = functionMetrics.length > 0 508 | ? totalComplexity / functionMetrics.length 509 | : 0; 510 | 511 | return { 512 | cyclomaticComplexity: totalComplexity, 513 | maintainabilityIndex: this.calculateMaintainabilityIndex(totalComplexity, ast.loc?.end.line || 0), 514 | linesOfCode: ast.loc?.end.line || 0, 515 | numberOfFunctions: functionMetrics.length, 516 | branchCount, 517 | returnCount, 518 | maxNestingDepth, 519 | averageFunctionComplexity, 520 | functionMetrics 521 | }; 522 | } 523 | 524 | private analyzeFunctionNode(node: TSESTree.Node, depth: number): FunctionMetrics { 525 | let complexity = 1; // Base complexity 526 | let returnCount = 0; 527 | let localVariables = 0; 528 | 529 | const visitFunctionNode = (node: TSESTree.Node): void => { 530 | switch (node.type) { 531 | case AST_NODE_TYPES.IfStatement: 532 | case AST_NODE_TYPES.SwitchCase: 533 | case AST_NODE_TYPES.ConditionalExpression: 534 | case AST_NODE_TYPES.LogicalExpression: 535 | complexity++; 536 | break; 537 | 538 | case AST_NODE_TYPES.ReturnStatement: 539 | returnCount++; 540 | break; 541 | 542 | case AST_NODE_TYPES.VariableDeclaration: 543 | localVariables += node.declarations.length; 544 | break; 545 | } 546 | 547 | // Recursively visit children 548 | for (const key in node) { 549 | const child = (node as any)[key]; 550 | if (child && typeof child === 'object') { 551 | if (Array.isArray(child)) { 552 | child.forEach(item => { 553 | if (item && typeof item === 'object' && item.type) { 554 | visitFunctionNode(item as TSESTree.Node); 555 | } 556 | }); 557 | } else if (child.type) { 558 | visitFunctionNode(child as TSESTree.Node); 559 | } 560 | } 561 | } 562 | }; 563 | 564 | visitFunctionNode(node); 565 | 566 | return { 567 | name: this.getFunctionName(node), 568 | startLine: node.loc?.start.line || 0, 569 | endLine: node.loc?.end.line || 0, 570 | complexity, 571 | parameterCount: this.getParameterCount(node), 572 | returnCount, 573 | localVariables, 574 | nestingDepth: depth 575 | }; 576 | } 577 | 578 | private getFunctionName(node: TSESTree.Node): string { 579 | switch (node.type) { 580 | case AST_NODE_TYPES.FunctionDeclaration: 581 | return node.id?.name || 'anonymous'; 582 | case AST_NODE_TYPES.MethodDefinition: 583 | return node.key.type === AST_NODE_TYPES.Identifier ? node.key.name : 'computed'; 584 | default: 585 | return 'anonymous'; 586 | } 587 | } 588 | 589 | private getParameterCount(node: TSESTree.Node): number { 590 | switch (node.type) { 591 | case AST_NODE_TYPES.FunctionDeclaration: 592 | case AST_NODE_TYPES.FunctionExpression: 593 | case AST_NODE_TYPES.ArrowFunctionExpression: 594 | return node.params.length; 595 | case AST_NODE_TYPES.MethodDefinition: 596 | return node.value.params.length; 597 | default: 598 | return 0; 599 | } 600 | } 601 | 602 | private calculateMaintainabilityIndex(complexity: number, linesOfCode: number): number { 603 | // Maintainability Index formula: 604 | // 171 - 5.2 * ln(Halstead Volume) - 0.23 * (Cyclomatic Complexity) - 16.2 * ln(Lines of Code) 605 | // We're using a simplified version since we don't calculate Halstead Volume 606 | const mi = 171 - (0.23 * complexity) - (16.2 * Math.log(linesOfCode)); 607 | return Math.max(0, Math.min(100, mi)); 608 | } 609 | 610 | private async runSecurityAnalysis(filePath: string, config: LanguageConfig): Promise { 611 | if (!config.securityTool) { 612 | return []; 613 | } 614 | 615 | try { 616 | const { stdout } = await execAsync(`${config.securityTool} ${filePath}`); 617 | return this.parseSecurityOutput(stdout, config.securityTool); 618 | } catch (error) { 619 | await this.logger?.warning('Security analysis failed', { 620 | filePath, 621 | securityTool: config.securityTool, 622 | error: error instanceof Error ? error.message : String(error), 623 | operation: 'security_analysis' 624 | }); 625 | return []; 626 | } 627 | } 628 | 629 | private async runStyleAnalysis(filePath: string, config: LanguageConfig): Promise { 630 | if (!config.styleTool) { 631 | return []; 632 | } 633 | 634 | try { 635 | const { stdout } = await execAsync(`${config.styleTool} ${filePath}`); 636 | return this.parseStyleOutput(stdout, config.styleTool); 637 | } catch (error) { 638 | await this.logger?.warning('Style analysis failed', { 639 | filePath, 640 | styleTool: config.styleTool, 641 | error: error instanceof Error ? error.message : String(error), 642 | operation: 'style_analysis' 643 | }); 644 | return []; 645 | } 646 | } 647 | 648 | private getDefaultComplexityMetrics(code: string): ComplexityMetrics { 649 | const lines = code.split('\n'); 650 | const functionMatches = code.match(/function|def|func|method/g); 651 | const branchMatches = code.match(/if|else|switch|case|while|for|catch/g); 652 | const returnMatches = code.match(/return/g); 653 | 654 | return { 655 | cyclomaticComplexity: (branchMatches?.length || 0) + 1, 656 | maintainabilityIndex: 100, 657 | linesOfCode: lines.length, 658 | numberOfFunctions: functionMatches?.length || 0, 659 | branchCount: branchMatches?.length || 0, 660 | returnCount: returnMatches?.length || 0, 661 | maxNestingDepth: 0, 662 | averageFunctionComplexity: 1, 663 | functionMetrics: [] 664 | }; 665 | } 666 | 667 | private parseSecurityOutput(output: string, tool: string): SecurityIssue[] { 668 | switch (tool) { 669 | case 'bandit': 670 | return this.parseBanditOutput(output); 671 | case 'eslint': 672 | return this.parseEslintOutput(output); 673 | default: 674 | return []; 675 | } 676 | } 677 | 678 | private parseStyleOutput(output: string, tool: string): StyleViolation[] { 679 | switch (tool) { 680 | case 'pylint': 681 | return this.parsePylintOutput(output); 682 | case 'eslint': 683 | return this.parseEslintOutput(output).map(issue => ({ 684 | rule: issue.type, 685 | message: issue.description, 686 | line: issue.line || 0, 687 | column: issue.column || 0 688 | })); 689 | default: 690 | return []; 691 | } 692 | } 693 | 694 | private parseComplexityOutput(output: string, tool: string): ComplexityMetrics { 695 | switch (tool) { 696 | case 'radon': 697 | try { 698 | const results = JSON.parse(output); 699 | const totalComplexity = Object.values(results).reduce((sum: number, file: any) => { 700 | return sum + file.complexity; 701 | }, 0); 702 | 703 | return { 704 | cyclomaticComplexity: totalComplexity, 705 | maintainabilityIndex: 100 - (totalComplexity * 5), 706 | linesOfCode: 0, 707 | numberOfFunctions: Object.keys(results).length, 708 | branchCount: 0, 709 | returnCount: 0, 710 | maxNestingDepth: 0, 711 | averageFunctionComplexity: totalComplexity / Object.keys(results).length, 712 | functionMetrics: [] 713 | }; 714 | } catch { 715 | return this.getDefaultComplexityMetrics(''); 716 | } 717 | case 'gocyclo': 718 | try { 719 | const lines = output.split('\n').filter(Boolean); 720 | const metrics = lines.map(line => { 721 | const [complexity, path, name] = line.split(' '); 722 | return { 723 | name, 724 | complexity: parseInt(complexity, 10), 725 | startLine: 0, 726 | endLine: 0, 727 | parameterCount: 0, 728 | returnCount: 0, 729 | localVariables: 0, 730 | nestingDepth: 0 731 | }; 732 | }); 733 | 734 | const totalComplexity = metrics.reduce((sum, m) => sum + m.complexity, 0); 735 | return { 736 | cyclomaticComplexity: totalComplexity, 737 | maintainabilityIndex: this.calculateMaintainabilityIndex(totalComplexity, 0), 738 | linesOfCode: 0, 739 | numberOfFunctions: metrics.length, 740 | branchCount: 0, 741 | returnCount: 0, 742 | maxNestingDepth: 0, 743 | averageFunctionComplexity: totalComplexity / metrics.length, 744 | functionMetrics: metrics 745 | }; 746 | } catch { 747 | return this.getDefaultComplexityMetrics(''); 748 | } 749 | default: 750 | return this.getDefaultComplexityMetrics(''); 751 | } 752 | } 753 | 754 | private parseBanditOutput(output: string): SecurityIssue[] { 755 | try { 756 | const results = JSON.parse(output); 757 | return results.results.map((result: any) => ({ 758 | type: result.test_id, 759 | severity: result.issue_severity, 760 | description: result.issue_text, 761 | line: result.line_number 762 | })); 763 | } catch { 764 | return []; 765 | } 766 | } 767 | 768 | private parsePylintOutput(output: string): StyleViolation[] { 769 | try { 770 | const results = JSON.parse(output); 771 | return results.map((result: any) => ({ 772 | rule: result.symbol, 773 | message: result.message, 774 | line: result.line, 775 | column: result.column 776 | })); 777 | } catch { 778 | return []; 779 | } 780 | } 781 | 782 | private parseEslintOutput(output: string): SecurityIssue[] { 783 | try { 784 | const results = JSON.parse(output); 785 | return results.map((result: { 786 | ruleId: string; 787 | severity: number; 788 | message: string; 789 | line: number; 790 | column: number; 791 | }) => ({ 792 | type: result.ruleId, 793 | severity: result.severity === 2 ? 'high' : result.severity === 1 ? 'medium' : 'low', 794 | description: result.message, 795 | line: result.line, 796 | column: result.column 797 | })); 798 | } catch { 799 | return []; 800 | } 801 | } 802 | } 803 | --------------------------------------------------------------------------------