├── .gitignore ├── requirements.txt ├── __pycache__ └── gemini_mcp.cpython-313.pyc ├── .env.example ├── start_server.sh ├── LICENSE ├── README.md └── gemini_mcp.py /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .env 3 | *.log 4 | .DS_Store 5 | CLAUDE.md -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | google-genai>=0.5.0 2 | mcp>=1.0.0 3 | pydantic>=2.0.0 -------------------------------------------------------------------------------- /__pycache__/gemini_mcp.cpython-313.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/peterkrueck/mcp-gemini-assistant/HEAD/__pycache__/gemini_mcp.cpython-313.pyc -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Copy this to .env and add your actual API key 2 | GEMINI_API_KEY=your-gemini-api-key-here 3 | 4 | # Optional: Use a different model (default: gemini-2.5-pro) 5 | # GEMINI_MODEL=gemini-2.5-pro 6 | 7 | # Optional: Custom system prompt for Gemini behavior 8 | # SYSTEM_PROMPT="Your custom system prompt here..." -------------------------------------------------------------------------------- /start_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd "$(dirname "$0")" 3 | 4 | # Load environment variables from .env file if it exists 5 | if [ -f .env ]; then 6 | export $(grep -v '^#' .env | xargs) 7 | fi 8 | 9 | # Check if GEMINI_API_KEY is set 10 | if [ -z "$GEMINI_API_KEY" ]; then 11 | echo "Error: GEMINI_API_KEY environment variable must be set" >&2 12 | echo "Please set it in .env file or as environment variable" >&2 13 | exit 1 14 | fi 15 | 16 | exec ./venv/bin/python gemini_mcp.py -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 peterkrueck 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Gemini Coding Assistant MCP Server 2 | 3 | A powerful MCP server that allows Claude Code to consult Gemini for complex coding problems with full code context and conversation persistence. 4 | 5 | > **Note**: This server works standalone but is highly recommended to use with the [Claude Code Development Kit](https://github.com/peterkrueck/Claude-Code-Development-Kit) for enhanced automation and context management. 6 | 7 | ## Key Features 8 | 9 | - **Session Management**: Maintain conversation context across multiple queries 10 | - **File Attachments**: Read and include actual code files in conversations 11 | - **Hybrid Context**: Combine text-based `code_context` with file attachments 12 | - **Follow-up Questions**: Ask follow-up questions without resending code context 13 | - **Context Caching**: Code context and file content are cached per session 14 | - **Automatic Processing**: Files are processed and formatted automatically 15 | - **Multiple Sessions**: Run multiple parallel conversations for different problems 16 | - **Session Expiry**: Automatic cleanup of inactive sessions after 1 hour 17 | - **Latest Model**: Uses Gemini 2.5 Pro (stable) by default 18 | 19 | ## Integration with Claude Code Development Kit 20 | 21 | While this MCP server works standalone, it is **highly recommended and optimized** to use with the [Claude Code Development Kit](https://github.com/peterkrueck/Claude-Code-Development-Kit). 22 | 23 | ### Enhanced Features with Development Kit 24 | 25 | The Development Kit transforms Claude Code into an orchestrated development environment that seamlessly integrates with this Gemini MCP server: 26 | 27 | 1. **Automated Context Injection**: The `gemini-context-injector.sh` hook automatically attaches project-specific context files (MCP-ASSISTANT-RULES.md, project-structure.md) to new Gemini sessions 28 | 2. **Multi-Agent Orchestration**: Complex commands spawn specialized agents that can consult Gemini for architectural decisions and design patterns 29 | 3. **Enhanced System Prompt**: The MCP server's system prompt is designed to work with the Development Kit's context injection system 30 | 4. **Security Scanning**: The `mcp-security-scan.sh` hook prevents sensitive data from being sent to external AI services 31 | 5. **Seamless Integration**: Commands like `/full-context` automatically leverage Gemini for complex problems 32 | 33 | ### Quick Setup with Development Kit 34 | 35 | 1. Install the [Claude Code Development Kit](https://github.com/peterkrueck/Claude-Code-Development-Kit) 36 | 2. Configure this MCP server as described in the installation section below 37 | 3. The Development Kit's hooks will automatically enhance your Gemini interactions 38 | 39 | ## Purpose 40 | 41 | When Claude Code encounters difficult problems or needs a second opinion, it can: 42 | - Send complete code files by reading them from the local filesystem 43 | - Include text-based code context alongside file attachments 44 | - Have multi-turn conversations about the same problem 45 | - Get different perspectives without repeating context 46 | - Work on multiple problems in parallel sessions 47 | - Process files locally and include content in conversations 48 | 49 | ## Installation 50 | 51 | 1. Clone this repository 52 | 2. Create a Python virtual environment: 53 | ```bash 54 | python3 -m venv venv 55 | source venv/bin/activate # On Windows: venv\Scripts\activate 56 | ``` 57 | 3. Install dependencies: 58 | ```bash 59 | pip install -r requirements.txt 60 | ``` 61 | 4. Copy `.env.example` to `.env` and add your Gemini API key: 62 | ```bash 63 | cp .env.example .env 64 | # Edit .env file and set your GEMINI_API_KEY 65 | ``` 66 | 5. Add to Claude Code: 67 | ```bash 68 | claude mcp add gemini-coding -s user -- /path/to/gemini-mcp/start_server.sh 69 | ``` 70 | Replace `/path/to/gemini-mcp/` with the actual path to this directory. 71 | 72 | ## Tools Available 73 | 74 | ### 1. `consult_gemini` 75 | Start or continue a conversation with Gemini about complex coding problems. 76 | 77 | **Parameters:** 78 | - `session_id` (optional): Continue a previous conversation 79 | - `problem_description`: Description of the problem (required for new sessions) 80 | - `code_context`: All relevant code (required for new sessions, cached afterward) 81 | - `attached_files` (optional): Array of file paths to read and include in the conversation 82 | - `file_descriptions` (optional): Object mapping file paths to descriptions 83 | - `specific_question`: The question you want answered 84 | - `additional_context` (optional): Updates or changes since last question 85 | - `preferred_approach`: Type of help needed (solution/review/debug/optimize/explain/follow-up) 86 | 87 | ### 2. `list_sessions` 88 | List all active Gemini consultation sessions. 89 | 90 | ### 3. `end_session` 91 | End a specific session to free up memory. 92 | 93 | ## Usage Examples 94 | 95 | ### Starting a New Conversation (with text code) 96 | ``` 97 | /consult_gemini 98 | problem_description: "I need to implement efficient caching for a React application" 99 | code_context: "[paste entire relevant codebase]" 100 | specific_question: "What's the best approach for implementing LRU cache with React Query?" 101 | preferred_approach: "solution" 102 | ``` 103 | 104 | ### Starting a New Conversation (with file attachments) 105 | ``` 106 | /consult_gemini 107 | problem_description: "I need to optimize this React component for performance" 108 | attached_files: ["/absolute/path/to/src/components/Dashboard.jsx", "/absolute/path/to/src/hooks/useData.js", "/absolute/path/to/package.json"] 109 | file_descriptions: { 110 | "/absolute/path/to/src/components/Dashboard.jsx": "Main dashboard component with performance issues", 111 | "/absolute/path/to/src/hooks/useData.js": "Custom hook for data fetching", 112 | "/absolute/path/to/package.json": "Project dependencies" 113 | } 114 | specific_question: "How can I improve the rendering performance of this dashboard?" 115 | preferred_approach: "optimize" 116 | ``` 117 | 118 | ### Combining Both Approaches 119 | ``` 120 | /consult_gemini 121 | problem_description: "Complex authentication flow needs debugging" 122 | code_context: "// Additional context or pseudocode here" 123 | attached_files: ["/absolute/path/to/auth/login.js", "/absolute/path/to/middleware/auth.js"] 124 | specific_question: "Why is the token refresh failing?" 125 | preferred_approach: "debug" 126 | ``` 127 | 128 | Response includes a session ID for follow-ups. 129 | 130 | ### Follow-up Question 131 | ``` 132 | /consult_gemini 133 | session_id: "abc123..." 134 | specific_question: "I implemented your suggestion but getting stale data issues. How do I handle cache invalidation?" 135 | additional_context: "Added the LRU cache as suggested, but users see old data after updates" 136 | preferred_approach: "follow-up" 137 | ``` 138 | 139 | ### Managing Sessions 140 | ``` 141 | /list_sessions 142 | # Shows all active sessions with IDs and summaries 143 | 144 | /end_session 145 | session_id: "abc123..." 146 | # Frees up memory for completed conversations 147 | ``` 148 | 149 | ## Best Practices 150 | 151 | 1. **Initial Context**: Include ALL relevant code via `code_context` or `attached_files` 152 | 2. **File Organization**: Use `attached_files` for multiple files, `code_context` for snippets 153 | 3. **File Descriptions**: Provide clear descriptions for each attached file 154 | 4. **Follow-ups**: Use the session ID to continue conversations 155 | 5. **Additional Context**: When asking follow-ups, explain what changed 156 | 6. **Session Management**: End sessions when done to free memory and clean up files 157 | 7. **Multiple Problems**: Use different sessions for unrelated problems 158 | 8. **File Types**: Supports JavaScript, Python, TypeScript, JSON, and other text-based files 159 | 160 | ## Testing the Server 161 | 162 | You can test the server directly before adding it to Claude Code: 163 | 164 | ```bash 165 | # Make sure your .env file has a valid GEMINI_API_KEY 166 | ./start_server.sh 167 | ``` 168 | 169 | The server will start and display: 170 | ``` 171 | Gemini Coding Assistant MCP Server v3.0 running (Python) 172 | Features: Session management, file attachments, context persistence, follow-up questions 173 | Ready to help with complex coding problems! 174 | ``` 175 | 176 | ## Context Limits 177 | 178 | - Maximum combined input: ~50,000 characters per message 179 | - Maximum response: 8,192 tokens (~16,000 characters) 180 | - Session timeout: 1 hour of inactivity 181 | - Rate limiting: 1 second between requests 182 | 183 | ## How It Works 184 | 185 | 1. **First Message**: Creates a new session, caches code context 186 | 2. **Follow-ups**: Reuses cached context, maintains conversation history 187 | 3. **Session Storage**: In-memory storage (use Redis for production) 188 | 4. **Cleanup**: Automatic expiry after 1 hour of inactivity 189 | 190 | ## Advantages Over Stateless Design 191 | 192 | - **Efficiency**: Code context sent only once per session 193 | - **Context**: Gemini remembers previous questions and answers 194 | - **Natural Flow**: Have real conversations about complex problems 195 | - **Cost Savings**: Reduced token usage for follow-up questions 196 | 197 | ## Security 198 | 199 | - API key is never exposed 200 | - Rate limiting prevents abuse 201 | - Sessions expire automatically 202 | - No persistent storage of code 203 | - When used with Claude Code Development Kit, additional security scanning prevents sensitive data leakage 204 | 205 | ## Version History 206 | 207 | - v3.0.0: Enhanced system prompt for Claude Code Development Kit integration 208 | - v2.1.0: Added file attachment system with automatic cleanup 209 | - v2.0.0: Added session management and follow-up support 210 | - v1.0.0: Initial stateless implementation 211 | 212 | ## Connect 213 | 214 | Feel free to connect with me on [LinkedIn](https://www.linkedin.com/in/peterkrueck/) if you have questions, need clarification, or wish to provide feedback. -------------------------------------------------------------------------------- /gemini_mcp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import asyncio 4 | import os 5 | import sys 6 | import time 7 | import mimetypes 8 | from pathlib import Path 9 | from typing import Dict, List, Optional, Any 10 | from dataclasses import dataclass 11 | from datetime import datetime, timedelta 12 | 13 | from google import genai 14 | from google.genai import types 15 | from mcp.server.fastmcp import FastMCP 16 | from mcp.types import TextContent 17 | import json 18 | 19 | # Configure environment 20 | if not os.getenv('GEMINI_API_KEY'): 21 | print("Error: GEMINI_API_KEY environment variable is required", file=sys.stderr) 22 | sys.exit(1) 23 | 24 | # Initialize client 25 | client = genai.Client(api_key=os.getenv('GEMINI_API_KEY')) 26 | 27 | # Configuration 28 | MODEL_NAME = os.getenv('GEMINI_MODEL', 'gemini-2.5-pro') 29 | SESSION_TTL = 3600 # 1 hour in seconds 30 | 31 | # Default system prompt for Gemini 32 | DEFAULT_SYSTEM_PROMPT = """You are an expert technical advisor helping Claude (another AI) solve complex programming problems through thoughtful analysis and genuine technical dialogue. 33 | 34 | **IMPORTANT CONTEXT CHECK**: First, examine any project-specific context files that have been attached to this session (e.g., MCP-ASSISTANT-RULES.md, project-structure.md, README.md). If such files are available, incorporate their guidelines, project standards, and architectural principles into your approach. If no project context is provided, proceed directly with the analysis. 35 | 36 | ## Your Role as Technical Advisor 37 | You provide: 38 | - Deep analysis and architectural insights 39 | - Thoughtful discussions about implementation approaches 40 | - Clarifying questions to understand requirements fully 41 | - Constructive challenges to assumptions when you see potential issues 42 | - Context from comprehensive code analysis 43 | - Alternative solutions with clear trade-offs 44 | 45 | ## Communication Philosophy 46 | Be conversational and engaging - you're a thinking partner, not just an analyzer: 47 | - Engage in real dialogue, don't just dump analysis 48 | - Ask clarifying questions when requirements are ambiguous 49 | - Challenge ideas constructively when you see better approaches 50 | - Iterate through discussion before settling on solutions 51 | - Think deeply about problems before responding 52 | - Be genuinely curious about the problem space 53 | 54 | ## Dialogue Patterns for Productive Discussion 55 | - "Before diving into the implementation, could you clarify what the expected behavior should be when..." 56 | - "I see multiple approaches here. What's more important for this use case: [tradeoff A] or [tradeoff B]?" 57 | - "Looking at the existing pattern in [file:line], should we maintain consistency or is there a reason to diverge?" 58 | - "To provide the most relevant analysis, I need to understand: will this feature need to scale to..." 59 | - "I notice [pattern/issue] in the current implementation. Have you considered [alternative]? What constraints led to this approach?" 60 | - "This reminds me of [pattern/problem]. In that context, [approach] worked well because..." 61 | 62 | ## Structured Response Approach 63 | 1. **Initial Understanding**: Briefly confirm what you understand about the problem 64 | 2. **Clarifying Questions**: Ask what you need to know for better analysis (don't assume!) 65 | 3. **Analysis**: Provide detailed examination after gathering context 66 | 4. **Recommendations**: Suggest specific approaches with clear trade-offs 67 | 5. **Implementation Details**: Provide complete, working code examples when applicable 68 | 6. **Open Questions**: Continue the conversation where helpful 69 | 70 | ## Technical Analysis Focus 71 | When examining code: 72 | - Identify patterns, potential issues, and optimization opportunities 73 | - Reference specific files, functions, and line numbers (format: file.py:42) 74 | - Explain complex logic and architectural decisions 75 | - Consider security, performance, and maintainability implications 76 | - Think about edge cases, error handling, and failure modes 77 | - Check adherence to project standards (if provided in context files) 78 | - Suggest testing strategies and validation approaches 79 | 80 | ## Collaboration Capabilities 81 | - When you need current information: "I would search for: [specific query] - Claude, could you search for this?" 82 | - When you need to see specific files: "Claude, can you show me [file path]?" 83 | - When you need to run commands: "Claude, please run '[command]' to verify..." 84 | - Be explicit about uncertainty and suggest verification steps 85 | - Request specific diagnostics or logs when debugging 86 | 87 | ## Key Principles 88 | - **Think First**: Take time to understand the problem deeply before suggesting solutions 89 | - **Question Assumptions**: Don't accept requirements at face value if they seem problematic 90 | - **Consider Context**: Always think about how your suggestions fit the broader system 91 | - **Be Honest**: If an approach seems wrong, say so clearly with reasoning 92 | - **Stay Practical**: Balance ideal solutions with pragmatic constraints 93 | - **Remain Curious**: Each problem is an opportunity to learn something new 94 | 95 | Remember: The best solutions emerge from genuine technical dialogue. Your goal is to help achieve the best possible implementation through thoughtful analysis, engaging discussion, and collaborative problem-solving.""" 96 | 97 | SYSTEM_PROMPT = os.getenv('SYSTEM_PROMPT', DEFAULT_SYSTEM_PROMPT) 98 | 99 | @dataclass 100 | class ProcessedFile: 101 | """Information about a processed file.""" 102 | file_type: str 103 | file_uri: str 104 | mime_type: str 105 | file_name: str 106 | file_path: str 107 | gemini_file_id: str 108 | 109 | @dataclass 110 | class Session: 111 | """Chat session with Gemini.""" 112 | session_id: str 113 | chat: Any 114 | created: datetime 115 | last_used: datetime 116 | message_count: int 117 | problem_description: Optional[str] = None 118 | code_context: Optional[str] = None 119 | processed_files: Dict[str, ProcessedFile] = None 120 | requested_files: List[str] = None # Track files Gemini has requested 121 | search_queries: List[str] = None # Track searches Gemini has requested 122 | 123 | def __post_init__(self): 124 | if self.processed_files is None: 125 | self.processed_files = {} 126 | if self.requested_files is None: 127 | self.requested_files = [] 128 | if self.search_queries is None: 129 | self.search_queries = [] 130 | 131 | class GeminiMCPServer: 132 | """MCP Server for Gemini file attachment functionality.""" 133 | 134 | def __init__(self): 135 | self.sessions: Dict[str, Session] = {} 136 | self.last_request_time = 0 137 | self.min_time_between_requests = 1.0 # 1 second 138 | self._cleanup_task = None 139 | 140 | def _ensure_cleanup_task_started(self): 141 | """Start cleanup task if not already running.""" 142 | if self._cleanup_task is None or self._cleanup_task.done(): 143 | self._cleanup_task = asyncio.create_task(self._cleanup_sessions()) 144 | 145 | async def _cleanup_sessions(self): 146 | """Periodically clean up expired sessions.""" 147 | while True: 148 | await asyncio.sleep(300) # Check every 5 minutes 149 | now = datetime.now() 150 | expired_sessions = [] 151 | 152 | for session_id, session in self.sessions.items(): 153 | if (now - session.last_used).total_seconds() > SESSION_TTL: 154 | expired_sessions.append(session_id) 155 | 156 | for session_id in expired_sessions: 157 | await self._cleanup_session_files(session_id) 158 | del self.sessions[session_id] 159 | print(f"[{datetime.now().isoformat()}] Session {session_id} expired and removed", file=sys.stderr) 160 | 161 | async def _cleanup_session_files(self, session_id: str): 162 | """Clean up uploaded files for a session.""" 163 | if session_id not in self.sessions: 164 | return 165 | 166 | session = self.sessions[session_id] 167 | for file_path, file_info in session.processed_files.items(): 168 | try: 169 | client.files.delete(file_info.gemini_file_id) 170 | print(f"[{datetime.now().isoformat()}] Session {session_id}: Deleted file {file_info.file_name}", file=sys.stderr) 171 | except Exception as e: 172 | print(f"[{datetime.now().isoformat()}] Session {session_id}: Failed to delete file {file_info.file_name}: {e}", file=sys.stderr) 173 | 174 | async def _rate_limit(self): 175 | """Simple rate limiting.""" 176 | now = time.time() 177 | time_since_last = now - self.last_request_time 178 | if time_since_last < self.min_time_between_requests: 179 | await asyncio.sleep(self.min_time_between_requests - time_since_last) 180 | self.last_request_time = time.time() 181 | 182 | async def _process_file(self, file_path: str, session: Session) -> ProcessedFile: 183 | """Upload file to Gemini and return processed file info.""" 184 | # Check if already processed 185 | if file_path in session.processed_files: 186 | return session.processed_files[file_path] 187 | 188 | # Check if file exists 189 | if not os.path.exists(file_path): 190 | raise FileNotFoundError(f"File not found: {file_path}") 191 | 192 | # Get file info 193 | file_name = os.path.basename(file_path) 194 | mime_type, _ = mimetypes.guess_type(file_path) 195 | 196 | # Handle common file extensions that mimetypes doesn't recognize 197 | if not mime_type: 198 | ext = os.path.splitext(file_path)[1].lower() 199 | mime_type_map = { 200 | '.jsx': 'text/javascript', 201 | '.tsx': 'text/typescript', 202 | '.ts': 'text/typescript', 203 | '.vue': 'text/html', 204 | '.svelte': 'text/html', 205 | '.md': 'text/markdown', 206 | '.json': 'application/json', 207 | '.py': 'text/x-python', 208 | '.js': 'text/javascript', 209 | '.css': 'text/css', 210 | '.html': 'text/html', 211 | '.xml': 'text/xml', 212 | '.yaml': 'text/yaml', 213 | '.yml': 'text/yaml', 214 | '.toml': 'text/plain', 215 | '.ini': 'text/plain', 216 | '.cfg': 'text/plain', 217 | '.conf': 'text/plain', 218 | '.sh': 'text/x-shellscript', 219 | '.bat': 'text/plain', 220 | '.sql': 'text/x-sql' 221 | } 222 | mime_type = mime_type_map.get(ext, 'text/plain') 223 | 224 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: Uploading file {file_name} ({mime_type})", file=sys.stderr) 225 | 226 | # Upload to Gemini 227 | try: 228 | uploaded_file = client.files.upload(file=file_path) 229 | 230 | # Wait for processing with exponential backoff 231 | wait_intervals = [0.5, 0.5, 1, 1, 2, 3, 5, 8] # Exponential backoff pattern 232 | total_wait = 0 233 | max_wait = 20 # Reduced from 30 seconds 234 | 235 | for interval in wait_intervals: 236 | if uploaded_file.state != 'PROCESSING': 237 | break 238 | 239 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: File {file_name} is processing... ({total_wait:.1f}s)", file=sys.stderr) 240 | await asyncio.sleep(interval) 241 | total_wait += interval 242 | uploaded_file = client.files.get(name=uploaded_file.name) 243 | 244 | if total_wait >= max_wait: 245 | break 246 | 247 | if uploaded_file.state == 'PROCESSING': 248 | raise Exception(f"File processing timeout after {max_wait} seconds") 249 | 250 | if uploaded_file.state == 'FAILED': 251 | raise Exception(f"File upload failed: {getattr(uploaded_file, 'error', 'Unknown error')}") 252 | 253 | # Create processed file info 254 | processed_file = ProcessedFile( 255 | file_type='file_data', 256 | file_uri=uploaded_file.uri, 257 | mime_type=uploaded_file.mime_type, 258 | file_name=file_name, 259 | file_path=file_path, 260 | gemini_file_id=uploaded_file.name 261 | ) 262 | 263 | # Store in session 264 | session.processed_files[file_path] = processed_file 265 | 266 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: File {file_name} uploaded successfully (URI: {uploaded_file.uri})", file=sys.stderr) 267 | return processed_file 268 | 269 | except Exception as e: 270 | raise Exception(f"Failed to process file {file_path}: {e}") 271 | 272 | def _get_or_create_session(self, session_id: Optional[str] = None) -> Session: 273 | """Get existing session or create new one.""" 274 | if not session_id: 275 | import uuid 276 | session_id = str(uuid.uuid4()) 277 | 278 | if session_id in self.sessions: 279 | session = self.sessions[session_id] 280 | session.last_used = datetime.now() 281 | return session 282 | 283 | # Create new session with system prompt 284 | chat = client.chats.create( 285 | model=MODEL_NAME, 286 | config=types.GenerateContentConfig( 287 | temperature=0.2, 288 | max_output_tokens=8192, 289 | top_p=0.95, 290 | top_k=40, 291 | system_instruction=SYSTEM_PROMPT, 292 | ) 293 | ) 294 | 295 | session = Session( 296 | session_id=session_id, 297 | chat=chat, 298 | created=datetime.now(), 299 | last_used=datetime.now(), 300 | message_count=0 301 | ) 302 | 303 | self.sessions[session_id] = session 304 | print(f"[{datetime.now().isoformat()}] New session created: {session_id}", file=sys.stderr) 305 | return session 306 | 307 | def _extract_requests_from_response(self, response_text: str, session: Session): 308 | """Extract file requests and search queries from Gemini's response.""" 309 | # Track file requests 310 | import re 311 | 312 | # Pattern for file requests 313 | file_patterns = [ 314 | r"show me (?:the )?([^\s]+\.[a-zA-Z]+)", 315 | r"share (?:the )?([^\s]+\.[a-zA-Z]+)", 316 | r"can you (?:show|share) (?:me )?([^\s]+\.[a-zA-Z]+)", 317 | r"(?:I need to see|please provide) ([^\s]+\.[a-zA-Z]+)", 318 | ] 319 | 320 | for pattern in file_patterns: 321 | matches = re.findall(pattern, response_text, re.IGNORECASE) 322 | for match in matches: 323 | if match not in session.requested_files: 324 | session.requested_files.append(match) 325 | 326 | # Pattern for search requests 327 | search_patterns = [ 328 | r"I would search for: ([^\n]+)", 329 | r"search for (?:the )?([^\n]+)", 330 | r"Let me search for ([^\n]+)", 331 | ] 332 | 333 | for pattern in search_patterns: 334 | matches = re.findall(pattern, response_text, re.IGNORECASE) 335 | for match in matches: 336 | if match not in session.search_queries: 337 | session.search_queries.append(match.strip()) 338 | 339 | # Create server instance 340 | mcp = FastMCP("gemini-coding-assistant") 341 | gemini_server = GeminiMCPServer() 342 | 343 | @mcp.tool() 344 | async def consult_gemini( 345 | specific_question: str, 346 | session_id: Optional[str] = None, 347 | problem_description: Optional[str] = None, 348 | code_context: Optional[str] = None, 349 | attached_files: Optional[List[str]] = None, 350 | file_descriptions: Optional[dict] = None, 351 | additional_context: Optional[str] = None, 352 | preferred_approach: str = "solution" 353 | ) -> str: 354 | """Start or continue a conversation with Gemini about complex coding problems. Supports follow-up questions in the same context. 355 | 356 | Args: 357 | specific_question: The specific question you want answered 358 | session_id: Optional session ID to continue a previous conversation 359 | problem_description: Detailed description of the coding problem (required for new sessions) 360 | code_context: All relevant code - will be cached for the session (required for new sessions) 361 | attached_files: Array of file paths to upload and attach to the conversation 362 | file_descriptions: Optional object mapping file paths to descriptions 363 | additional_context: Additional context, updates, or what changed since last question 364 | preferred_approach: Type of assistance needed (solution, review, debug, optimize, explain, follow-up) 365 | """ 366 | 367 | await gemini_server._rate_limit() 368 | 369 | # Start cleanup task if needed 370 | gemini_server._ensure_cleanup_task_started() 371 | 372 | try: 373 | # Get or create session 374 | session = gemini_server._get_or_create_session(session_id) 375 | 376 | # For new sessions, require problem description and either code_context or attached_files 377 | if session.message_count == 0: 378 | if not problem_description: 379 | raise ValueError("problem_description is required for new sessions") 380 | if not code_context and not attached_files: 381 | raise ValueError("Either code_context or attached_files are required for new sessions") 382 | 383 | # Store initial context 384 | session.problem_description = problem_description 385 | session.code_context = code_context 386 | 387 | # Build initial context 388 | context_parts = [ 389 | f"I'm Claude, an AI assistant, and I need your help with a complex coding problem. Here's the context:\n\n**Problem Description:**\n{problem_description}" 390 | ] 391 | 392 | # Add code context if provided 393 | if code_context: 394 | context_parts.append(f"\n**Code Context:**\n{code_context}") 395 | 396 | # Handle file attachments 397 | if attached_files: 398 | context_parts.append("\n**Attached Files:**") 399 | 400 | # Create parallel upload tasks 401 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: Starting parallel upload of {len(attached_files)} files", file=sys.stderr) 402 | upload_tasks = [] 403 | for file_path in attached_files: 404 | task = gemini_server._process_file(file_path, session) 405 | upload_tasks.append(task) 406 | 407 | # Execute all uploads in parallel 408 | file_results = await asyncio.gather(*upload_tasks, return_exceptions=True) 409 | 410 | # Process results 411 | for file_path, result in zip(attached_files, file_results): 412 | if isinstance(result, Exception): 413 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: Failed to process file {file_path}: {result}", file=sys.stderr) 414 | # Continue with other files instead of failing completely 415 | context_parts.append(f"\n- {os.path.basename(file_path)} (failed to upload: {str(result)})") 416 | else: 417 | # Success - file was uploaded 418 | file_info = result 419 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: File {file_info.file_name} processed successfully", file=sys.stderr) 420 | 421 | # Add file description 422 | description = file_descriptions.get(file_path, "") if file_descriptions else "" 423 | if description: 424 | description = f" - {description}" 425 | context_parts.append(f"\n- {file_info.file_name}{description}") 426 | 427 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: Parallel upload completed", file=sys.stderr) 428 | 429 | context_parts.append("\n\nPlease help me solve this problem. I may have follow-up questions, so please maintain context throughout our conversation.") 430 | 431 | # Build message content - include text and uploaded file objects 432 | message_content = ["".join(context_parts)] 433 | 434 | # Add uploaded file objects for this session's new files 435 | for file_path in attached_files or []: 436 | if file_path in session.processed_files: 437 | file_info = session.processed_files[file_path] 438 | # Get the actual uploaded file object from Gemini 439 | uploaded_file = client.files.get(name=file_info.gemini_file_id) 440 | message_content.append(uploaded_file) 441 | 442 | # Send initial context 443 | response = await asyncio.get_event_loop().run_in_executor( 444 | None, session.chat.send_message, message_content 445 | ) 446 | session.message_count += 1 447 | 448 | file_count = len(session.processed_files) 449 | code_length = len(code_context) if code_context else 0 450 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: Initial context sent ({code_length} chars, {file_count} files)", file=sys.stderr) 451 | 452 | # Build the question 453 | question_parts = [f"**Question:** {specific_question}"] 454 | 455 | if additional_context: 456 | question_parts.append(f"\n\n**Additional Context/Updates:**\n{additional_context}") 457 | 458 | if preferred_approach != "follow-up": 459 | question_parts.append(f"\n\n**Type of Help Needed:** {preferred_approach}") 460 | 461 | question_prompt = "".join(question_parts) 462 | 463 | # Log request 464 | print(f"[{datetime.now().isoformat()}] Session {session.session_id}: Question #{session.message_count + 1} ({preferred_approach})", file=sys.stderr) 465 | 466 | # Send message and get response 467 | response = await asyncio.get_event_loop().run_in_executor( 468 | None, session.chat.send_message, question_prompt 469 | ) 470 | session.message_count += 1 471 | 472 | response_text = response.text 473 | 474 | # Extract any file requests or search queries from response 475 | gemini_server._extract_requests_from_response(response_text, session) 476 | 477 | # Build response with session info 478 | result_parts = [ 479 | f"**Session ID:** {session.session_id}", 480 | f"**Message #{session.message_count}**\n", 481 | response_text 482 | ] 483 | 484 | # Add summary of requests if any 485 | if session.requested_files or session.search_queries: 486 | result_parts.append("\n\n---") 487 | if session.requested_files: 488 | result_parts.append(f"\n**Files Requested:** {', '.join(session.requested_files)}") 489 | if session.search_queries: 490 | result_parts.append(f"\n**Searches Requested:** {'; '.join(session.search_queries)}") 491 | 492 | result_parts.append(f"\n\n---\n*Use session_id: \"{session.session_id}\" for follow-up questions*") 493 | 494 | return "\n".join(result_parts) 495 | 496 | except Exception as e: 497 | print(f"[{datetime.now().isoformat()}] Error: {e}", file=sys.stderr) 498 | 499 | error_message = str(e) 500 | if "RESOURCE_EXHAUSTED" in error_message: 501 | error_message = "Gemini API quota exceeded. Please try again later." 502 | elif "INVALID_ARGUMENT" in error_message: 503 | error_message = "Request too large. Try reducing code context size." 504 | 505 | return f"Error: {error_message}" 506 | 507 | @mcp.tool() 508 | async def get_gemini_requests(session_id: str) -> str: 509 | """Get the files and searches that Gemini has requested in a session. 510 | 511 | Args: 512 | session_id: The session ID to check 513 | """ 514 | if session_id not in gemini_server.sessions: 515 | return f"Session {session_id} not found" 516 | 517 | session = gemini_server.sessions[session_id] 518 | 519 | result_parts = [f"**Session {session_id} Requests:**"] 520 | 521 | if session.requested_files: 522 | result_parts.append(f"\n\n**Files Requested:**") 523 | for file in session.requested_files: 524 | result_parts.append(f"- {file}") 525 | else: 526 | result_parts.append("\n\nNo files requested") 527 | 528 | if session.search_queries: 529 | result_parts.append(f"\n\n**Searches Requested:**") 530 | for query in session.search_queries: 531 | result_parts.append(f"- {query}") 532 | else: 533 | result_parts.append("\n\nNo searches requested") 534 | 535 | return "\n".join(result_parts) 536 | 537 | @mcp.tool() 538 | async def list_sessions() -> str: 539 | """List all active Gemini consultation sessions.""" 540 | session_list = [] 541 | for session_id, session in gemini_server.sessions.items(): 542 | session_info = { 543 | "id": session_id, 544 | "created": session.created.isoformat(), 545 | "last_used": session.last_used.isoformat(), 546 | "message_count": session.message_count, 547 | "problem_summary": (session.problem_description[:100] + "...") if session.problem_description else "No description", 548 | "file_count": len(session.processed_files), 549 | "has_code_context": bool(session.code_context), 550 | "requests": len(session.requested_files) + len(session.search_queries) 551 | } 552 | session_list.append(session_info) 553 | 554 | if session_list: 555 | session_text = "\n\n".join([ 556 | f"- **{s['id']}**\n Messages: {s['message_count']}\n Created: {s['created']}\n Last used: {s['last_used']}\n Files attached: {s['file_count']}\n Code context: {'Yes' if s['has_code_context'] else 'No'}\n Requests made: {s['requests']}\n Problem: {s['problem_summary']}" 557 | for s in session_list 558 | ]) 559 | text = f"Active sessions:\n{session_text}" 560 | else: 561 | text = "No active sessions" 562 | 563 | return text 564 | 565 | @mcp.tool() 566 | async def end_session(session_id: str) -> str: 567 | """End a specific Gemini consultation session to free up memory.""" 568 | if session_id in gemini_server.sessions: 569 | await gemini_server._cleanup_session_files(session_id) 570 | del gemini_server.sessions[session_id] 571 | print(f"[{datetime.now().isoformat()}] Session {session_id} ended by user", file=sys.stderr) 572 | return f"Session {session_id} has been ended" 573 | else: 574 | return f"Session {session_id} not found or already expired" 575 | 576 | if __name__ == "__main__": 577 | print("Gemini Coding Assistant MCP Server v3.0.0 running (Python)", file=sys.stderr) 578 | print("Features: Session management, file attachments, context persistence, follow-up questions, request tracking", file=sys.stderr) 579 | print("Ready to help with complex coding problems!", file=sys.stderr) 580 | 581 | mcp.run() --------------------------------------------------------------------------------