├── __init__.py ├── app ├── prompts │ └── __init__.py ├── __init__.py ├── core │ ├── __init__.py │ └── gemini_client.py ├── routers │ ├── __init__.py │ ├── dependencies.py │ └── chats.py ├── services │ ├── __init__.py │ └── chat_service.py ├── repositories │ ├── __init__.py │ └── chat_repository.py ├── config.py ├── models.py └── main.py ├── .gitignore ├── .env.example ├── requirements.txt ├── docker-compose.example.yml ├── Dockerfile ├── static ├── manage_chats.html ├── css │ └── manage_chats.css └── js │ └── manage_chats.js └── README.md /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /app/prompts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | *.pyc 3 | chat_sessions.db -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | Secure_1PSID=COOKIE VALUE HERE 2 | Secure_1PSIDTS=COOKIE VALUE HERE -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes Python treat the directory 'app' as a package. -------------------------------------------------------------------------------- /app/core/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes Python treat the directory 'core' as a package. -------------------------------------------------------------------------------- /app/routers/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes Python treat the directory 'routers' as a package. -------------------------------------------------------------------------------- /app/services/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes Python treat the directory 'services' as a package. -------------------------------------------------------------------------------- /app/repositories/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes Python treat the directory 'repositories' as a package. -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | browser-cookie3==0.20.1 2 | gemini-webapi==1.10.2 3 | fastapi==0.115.12 4 | aiosqlite 5 | uvicorn -------------------------------------------------------------------------------- /docker-compose.example.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | gemini-web-wrapper: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | image: gemini-web-wrapper 9 | ports: 10 | - "8022:8022" 11 | volumes: 12 | - "browser_user:/root/.mozilla/firefox" 13 | stdin_open: true # equivalent to -i in docker run 14 | tty: true # equivalent to -t in docker run -------------------------------------------------------------------------------- /app/config.py: -------------------------------------------------------------------------------- 1 | # app/config.py 2 | from typing import Literal 3 | 4 | # --- Database --- 5 | # Using aiosqlite for async access 6 | DATABASE_URL = "sqlite+aiosqlite:///./chat_sessions.db" # Relative path 7 | 8 | # --- Gemini Settings --- 9 | GEMINI_MODEL_NAME = "gemini-2.5-exp-advanced" # Or your preferred model 10 | 11 | # --- Chat Modes --- 12 | # Defines the allowed mode names for validation purposes. 13 | # The actual system prompt text associated with these modes is handled by the ChatService, 14 | # likely by importing from the prompts module. 15 | ALLOWED_MODES = Literal["Default", "Code", "Architect", "Debug", "Ask"] -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.11-slim 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | # Copy the requirements file into the container at /app 8 | COPY requirements.txt /app/ 9 | 10 | # Install any needed packages specified in requirements.txt 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Copy the current directory contents into the container at /app 14 | COPY . /app 15 | 16 | # Make port 8022 available to the world outside this container 17 | EXPOSE 8022 18 | 19 | # Run the command to start the application using uvicorn 20 | CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8022", "--reload"] -------------------------------------------------------------------------------- /static/manage_chats.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Gerenciar Chats - Gemini API 7 | 8 | 9 | 10 |
11 |

Manage Chat Sessions

12 |
13 |

New Chat

14 |
15 | 16 | 17 |
18 |
19 | 20 | 23 |
24 |
25 | 26 |
27 |
28 | 29 |
30 | 31 |
32 |
33 |
34 | 35 |

Existing Chats

36 | 37 |

Loading...

38 |
39 | 40 | 41 | -------------------------------------------------------------------------------- /app/models.py: -------------------------------------------------------------------------------- 1 | # app/models.py 2 | from pydantic import BaseModel, Field 3 | from typing import List, Literal, Optional, Union, Dict, Any 4 | import uuid 5 | import time 6 | 7 | # Import central ALLOWED_MODES definition 8 | from app.config import ALLOWED_MODES 9 | 10 | # --- Modelos Pydantic para Conteúdo Multi-Modal --- 11 | class TextBlock(BaseModel): type: Literal["text"]; text: str 12 | class ImageUrlDetail(BaseModel): url: str 13 | class ImageUrlBlock(BaseModel): type: Literal["image_url"]; image_url: ImageUrlDetail 14 | ContentType = Union[str, List[Union[TextBlock, ImageUrlBlock]]] 15 | 16 | class OpenAIMessage(BaseModel): 17 | role: Literal["user", "assistant", "system"]; content: ContentType; name: Optional[str] = None 18 | 19 | class OriginalChatCompletionRequest(BaseModel): 20 | model: Optional[str] = None; messages: List[OpenAIMessage]; temperature: Optional[float] = None 21 | max_tokens: Optional[int] = None; stream: Optional[bool] = False 22 | 23 | # --- Modelos Pydantic de Resposta --- 24 | class Choice(BaseModel): index: int = 0; message: OpenAIMessage; finish_reason: Optional[Literal["stop", "length"]] = "stop" 25 | class Usage(BaseModel): prompt_tokens: int = 0; completion_tokens: int = 0; total_tokens: int = 0 26 | class OriginalChatCompletionResponse(BaseModel): 27 | id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4()}"); object: Literal["chat.completion"] = "chat.completion" 28 | created: int = Field(default_factory=lambda: int(time.time())); model: str; choices: List[Choice] 29 | usage: Usage = Field(default_factory=Usage); system_fingerprint: Optional[str] = None 30 | 31 | class ChatCompletionResponse(OriginalChatCompletionResponse): 32 | chat_id: str = Field(..., description="ID of the chat session used for this response.") 33 | 34 | 35 | # --- Modelos Pydantic Específicos da API --- 36 | class CreateChatRequest(BaseModel): 37 | description: Optional[str] = Field(None, max_length=255) 38 | # Use imported ALLOWED_MODES for validation 39 | mode: Optional[ALLOWED_MODES] = "Default" # Default if not sent 40 | 41 | class ChatInfo(BaseModel): 42 | chat_id: str 43 | description: str | None 44 | mode: str | None # Mode can be null if not defined or if it's 'Default' conceptually 45 | 46 | class UpdateChatModeRequest(BaseModel): 47 | # Receive the new mode, validated by imported ALLOWED_MODES 48 | mode: ALLOWED_MODES -------------------------------------------------------------------------------- /static/css/manage_chats.css: -------------------------------------------------------------------------------- 1 | body { font-family: sans-serif; margin: 20px; background-color: #f4f4f4; } 2 | .container { background-color: #fff; padding: 20px; border-radius: 8px; box-shadow: 0 2px 5px rgba(0,0,0,0.1); } 3 | h1, h2 { color: #333; border-bottom: 1px solid #eee; padding-bottom: 10px; } 4 | #chatList table { width: 100%; border-collapse: collapse; margin-top: 15px; table-layout: fixed; /* Ajuda com layout */} 5 | #chatList th, #chatList td { border: 1px solid #ddd; padding: 8px; text-align: left; vertical-align: middle; word-wrap: break-word; /* Quebra palavras longas */} 6 | #chatList th { background-color: #f2f2f2; } 7 | #chatList tr:nth-child(even) { background-color: #f9f9f9; } 8 | button { padding: 8px 12px; margin: 0; border: none; border-radius: 4px; cursor: pointer; font-size: 0.9em; line-height: 1.2; } 9 | select { padding: 8px; margin-right: 5px; border: 1px solid #ccc; border-radius: 4px; } /* Estilo para Select */ 10 | .btn-refresh { background-color: #007bff; color: white; margin-bottom: 10px;} 11 | .btn-create { background-color: #28a745; color: white; } 12 | .btn-set-active { background-color: #ffc107; color: #333; } 13 | .btn-is-active { background-color: #28a745; color: white; font-weight: bold;} 14 | .btn-delete { background-color: #dc3545; color: white; margin-left: 5px; } 15 | button:hover { opacity: 0.9; } 16 | #createChat label { margin-right: 5px; font-weight: bold;} 17 | #createChat input[type="text"] { padding: 8px; margin-right: 5px; border: 1px solid #ccc; border-radius: 4px; width: calc(50% - 80px); /* Ajuste largura */} 18 | #createChat select { width: calc(50% - 80px); } /* Ajuste largura */ 19 | #statusMessage { margin-top: 15px; padding: 10px; border-radius: 4px; display: none; } 20 | .status-success { background-color: #d4edda; color: #155724; border: 1px solid #c3e6cb; } 21 | .status-error { background-color: #f8d7da; color: #721c24; border: 1px solid #f5c6cb; } 22 | code { background-color: #eee; padding: 2px 4px; border-radius: 3px; font-family: monospace; word-break: break-all; } 23 | td:last-child { white-space: nowrap; width: 1%;} 24 | td select { padding: 5px; border: 1px solid #ccc; border-radius: 4px; width: 95%; box-sizing: border-box; 25 | } 26 | th.col-desc { width: 30%; } /* Largura coluna descrição */ 27 | th.col-mode { width: 15%; } /* Largura coluna modo */ 28 | th.col-id { width: 35%; } /* Largura coluna ID */ 29 | th.col-actions { width: 20%;} /* Largura coluna ações */ -------------------------------------------------------------------------------- /app/routers/dependencies.py: -------------------------------------------------------------------------------- 1 | # app/routers/dependencies.py 2 | from typing import AsyncGenerator, Optional 3 | import aiosqlite 4 | from fastapi import Request, HTTPException, Depends 5 | 6 | from app.services.chat_service import ChatService 7 | from app.config import DATABASE_URL # Used to get path if needed, though pool should be managed 8 | 9 | # Database Connection Dependency 10 | # This dependency assumes a single connection 'db_conn' is managed 11 | # in app.state by the lifespan function. 12 | # For production, using a connection pool (like aiopg or asyncpg with adapters, 13 | # or a library like databases/encode) would be more robust. 14 | 15 | async def get_db(request: Request) -> aiosqlite.Connection: 16 | """ 17 | FastAPI dependency that provides the shared aiosqlite connection 18 | managed by the application's lifespan context. 19 | """ 20 | db_conn = getattr(request.app.state, "db_conn", None) 21 | if db_conn is None: 22 | print("ERROR: get_db dependency - Database connection not found in app.state!") 23 | # Raising 503 Service Unavailable is appropriate here 24 | raise HTTPException(status_code=503, detail="Database service unavailable.") 25 | # We yield the connection managed by lifespan, no explicit open/close here. 26 | # Lifespan is responsible for the connection lifecycle. 27 | # For a pool-based approach, this would acquire and release a connection. 28 | return db_conn 29 | 30 | 31 | # Service Dependency 32 | def get_chat_service(request: Request) -> ChatService: 33 | """ 34 | FastAPI dependency that retrieves the singleton ChatService instance 35 | stored in the application's state by the lifespan function. 36 | """ 37 | chat_service = getattr(request.app.state, "chat_service", None) 38 | if chat_service is None: 39 | print("ERROR: get_chat_service dependency - ChatService not found in app.state!") 40 | # Raising 503 Service Unavailable is appropriate 41 | raise HTTPException(status_code=503, detail="Chat service unavailable.") 42 | return chat_service 43 | 44 | # Convenience dependency combining DB and Service 45 | # Not strictly necessary but can simplify endpoint signatures 46 | # class CommonDeps: 47 | # def __init__( 48 | # self, 49 | # service: ChatService = Depends(get_chat_service), 50 | # db: aiosqlite.Connection = Depends(get_db), 51 | # ): 52 | # self.service = service 53 | # self.db = db -------------------------------------------------------------------------------- /app/core/gemini_client.py: -------------------------------------------------------------------------------- 1 | # app/core/gemini_client.py 2 | import os 3 | import traceback 4 | from typing import Optional, List, Dict, Any 5 | 6 | from gemini_webapi import GeminiClient, ChatSession 7 | from fastapi import HTTPException 8 | 9 | from app.config import GEMINI_MODEL_NAME 10 | 11 | class GeminiClientWrapper: 12 | """Manages the GeminiClient instance and interactions.""" 13 | 14 | def __init__(self): 15 | self._client: Optional[GeminiClient] = None 16 | 17 | async def init_client(self, timeout: int = 180): 18 | """Initializes the GeminiClient.""" 19 | if self._client: 20 | print("Gemini client already initialized.") 21 | return 22 | 23 | print(f"Initializing Gemini Client (Timeout: {timeout}s)...") 24 | try: 25 | # Consider proxy settings if needed from config 26 | temp_client = GeminiClient( 27 | proxy=None 28 | ) 29 | # Use specified timeout, auto_close=False, auto_refresh=True 30 | await temp_client.init(timeout=timeout, auto_close=False, auto_refresh=True) 31 | self._client = temp_client 32 | print("Gemini Client initialized successfully.") 33 | except Exception as e: 34 | self._client = None # Ensure client is None if init fails 35 | print(f"!!!!!!!! FAILED TO INITIALIZE GEMINI CLIENT !!!!!!!! Error: {e}") 36 | traceback.print_exc() 37 | # Depending on requirements, could raise an exception here to halt startup 38 | # raise RuntimeError(f"Failed to initialize Gemini Client: {e}") from e 39 | 40 | async def close_client(self): 41 | """Closes the GeminiClient connection.""" 42 | if self._client: 43 | print("Closing Gemini Client...") 44 | try: 45 | await self._client.close() 46 | print("Gemini Client closed.") 47 | except Exception as e: 48 | print(f"Error closing Gemini Client: {e}") 49 | finally: 50 | self._client = None 51 | else: 52 | print("Gemini Client was not initialized or already closed.") 53 | 54 | def get_client(self) -> GeminiClient: 55 | """Returns the initialized GeminiClient instance, raising an error if not ready.""" 56 | if not self._client: 57 | # This indicates a programming error or failed startup, internal server error is appropriate 58 | raise HTTPException(status_code=503, detail="Service Unavailable: Gemini client not initialized.") 59 | return self._client 60 | 61 | def start_new_chat(self, model: str = GEMINI_MODEL_NAME) -> ChatSession: 62 | """Starts a new chat session using the underlying client.""" 63 | client = self.get_client() 64 | # Let exceptions from start_chat propagate up 65 | return client.start_chat(model=model) 66 | 67 | def load_chat_from_metadata(self, metadata: Dict[str, Any], model: str = GEMINI_MODEL_NAME) -> ChatSession: 68 | """Loads an existing chat session from metadata.""" 69 | client = self.get_client() 70 | try: 71 | # Recreate the session object from metadata 72 | chat_session = client.start_chat(metadata=metadata, model=model) 73 | return chat_session 74 | except Exception as e: 75 | print(f"Error loading chat session from metadata: {e}") 76 | # Raise HTTPException here so the service layer can catch it 77 | raise HTTPException(status_code=500, detail=f"Failed to load chat session from metadata: {e}") from e 78 | 79 | async def send_message( 80 | self, 81 | chat_session: ChatSession, 82 | prompt: str, 83 | files: Optional[List[str]] = None 84 | ) -> Any: # Return type depends on gemini_webapi response object, using Any for now 85 | """Sends a message using the provided ChatSession.""" 86 | # The ChatSession object should belong to the initialized client 87 | # No need to call get_client() here if chat_session is managed correctly 88 | if not chat_session: 89 | raise ValueError("Invalid ChatSession provided to send_message.") 90 | print(f"Sending message via GeminiClientWrapper (Files: {len(files or [])})...") 91 | try: 92 | # Let exceptions propagate 93 | response = await chat_session.send_message(prompt, files=files) 94 | print(f"Response received via GeminiClientWrapper.") 95 | return response 96 | except Exception as e: 97 | print(f"Error sending message via Gemini: {e}") 98 | traceback.print_exc() 99 | # Raise a specific exception or HTTPException for the service to handle 100 | raise HTTPException(status_code=500, detail=f"Error communicating with Gemini API: {e}") from e -------------------------------------------------------------------------------- /app/main.py: -------------------------------------------------------------------------------- 1 | # app/main.py 2 | import aiosqlite 3 | from contextlib import asynccontextmanager 4 | from pathlib import Path 5 | from fastapi import FastAPI, HTTPException, status # Added status 6 | from fastapi.staticfiles import StaticFiles 7 | from fastapi.responses import HTMLResponse 8 | 9 | # Import components from the app package 10 | from app.config import DATABASE_URL 11 | from app.core.gemini_client import GeminiClientWrapper 12 | from app.repositories.chat_repository import SqliteChatRepository 13 | from app.services.chat_service import ChatService 14 | from app.routers.chats import router as chats_router 15 | 16 | # Determine paths relative to this main.py file 17 | current_script_dir = Path(__file__).parent 18 | # Static files are one level up from 'app' directory 19 | static_dir_path = (current_script_dir.parent / "static").resolve() 20 | # Extract DB path from config URL 21 | db_path = DATABASE_URL.split("///")[-1] 22 | 23 | @asynccontextmanager 24 | async def lifespan(app: FastAPI): 25 | """ 26 | Manage application lifespan events: startup and shutdown. 27 | Initializes and closes resources like DB connections and external clients. 28 | Stores shared instances on app.state. 29 | """ 30 | print("--- Application Lifespan: Startup Initiated ---") 31 | app.state.db_conn = None # Ensure state attributes exist even if setup fails 32 | app.state.gemini_wrapper = None 33 | app.state.repository = None 34 | app.state.chat_service = None 35 | 36 | # 1. Initialize Database Table (creates if not exists) 37 | try: 38 | await SqliteChatRepository.initialize_db() 39 | except Exception as init_db_e: 40 | print(f"FATAL: Database table initialization failed: {init_db_e}") 41 | # Raising prevents app from starting if DB table init fails 42 | raise RuntimeError("Failed to initialize database table") from init_db_e 43 | 44 | # 2. Establish Shared Database Connection 45 | db_conn = None 46 | try: 47 | print(f"Connecting to database at: {db_path}") 48 | db_conn = await aiosqlite.connect(db_path) 49 | # Set WAL mode for better concurrency 50 | await db_conn.execute("PRAGMA journal_mode=WAL;") 51 | await db_conn.commit() # Commit journal mode change 52 | app.state.db_conn = db_conn # Store connection on app state 53 | print("Database connection established (WAL mode enabled).") 54 | except Exception as conn_db_e: 55 | print(f"FATAL: Database connection failed: {conn_db_e}") 56 | if db_conn: await db_conn.close() # Attempt close if connection object exists 57 | raise RuntimeError("Failed to connect to database") from conn_db_e 58 | 59 | # 3. Initialize Gemini Client Wrapper 60 | gemini_wrapper = GeminiClientWrapper() 61 | try: 62 | await gemini_wrapper.init_client() # Uses default timeout 63 | if not gemini_wrapper._client: # Verify initialization success internally 64 | raise RuntimeError("Gemini client initialization method completed but client instance is still None.") 65 | app.state.gemini_wrapper = gemini_wrapper # Store wrapper on app state 66 | print("Gemini Client Wrapper initialized successfully.") 67 | except Exception as gemini_e: 68 | print(f"FATAL: Gemini Client initialization failed: {gemini_e}") 69 | # Close DB connection before raising error, as Gemini client is essential 70 | if db_conn: await db_conn.close() 71 | raise RuntimeError("Failed to initialize Gemini client") from gemini_e 72 | 73 | # 4. Create Repository Instance (stateless, just needs creation) 74 | repository = SqliteChatRepository() 75 | app.state.repository = repository 76 | print("Chat Repository instance created.") 77 | 78 | # 5. Create Service Instance (injecting repository and client wrapper) 79 | # Service instance holds in-memory state (cache, active_id) 80 | chat_service = ChatService(repository=repository, gemini_wrapper=gemini_wrapper) 81 | app.state.chat_service = chat_service 82 | print("Chat Service instance created.") 83 | 84 | # 6. Load Initial Service Cache from DB 85 | try: 86 | print("Attempting to load initial chat service cache...") 87 | await chat_service.load_initial_cache(db_conn) # Pass the established connection 88 | print("Initial chat service cache loading process completed.") 89 | except Exception as cache_e: 90 | # Log error but allow app to continue, cache will be empty/partially loaded 91 | print(f"WARNING: Failed to load initial cache during startup: {cache_e}") 92 | 93 | print("--- Application Startup Successfully Completed ---") 94 | yield # Application runs here... 95 | print("--- Application Lifespan: Shutdown Initiated ---") 96 | 97 | # Cleanup: Close resources in reverse order of creation 98 | 99 | # 1. Close Gemini Client (via wrapper) 100 | if hasattr(app.state, 'gemini_wrapper') and app.state.gemini_wrapper: 101 | print("Closing Gemini Client...") 102 | await app.state.gemini_wrapper.close_client() 103 | else: 104 | print("Gemini Wrapper not found in state or already closed.") 105 | 106 | # 2. Close Database Connection 107 | if hasattr(app.state, 'db_conn') and app.state.db_conn: 108 | print("Closing database connection...") 109 | await app.state.db_conn.close() 110 | print("Database connection closed.") 111 | else: 112 | print("Database connection not found in state or already closed.") 113 | 114 | print("--- Application Shutdown Complete ---") 115 | 116 | 117 | # Create FastAPI app instance with title, description, version, and lifespan 118 | app = FastAPI( 119 | title="Gemini FastAPI Wrapper", 120 | description="A refactored API wrapper for Google Gemini with session management.", 121 | version="1.1.0", # Incremented version after refactor 122 | lifespan=lifespan 123 | ) 124 | 125 | # Include the API router defined in app/routers/chats.py 126 | app.include_router(chats_router) 127 | 128 | # Mount static files directory (for the frontend) 129 | if static_dir_path.is_dir(): 130 | print(f"Mounting static directory: {static_dir_path} at /static") 131 | app.mount("/static", StaticFiles(directory=static_dir_path), name="static") 132 | else: 133 | # Log a warning if the static directory doesn't exist 134 | print(f"WARNING: Static directory not found at '{static_dir_path}'. Frontend will not be served.") 135 | 136 | # Serve the main frontend HTML page from the static directory at the root URL 137 | @app.get("/", response_class=HTMLResponse, include_in_schema=False) 138 | async def serve_index_page(): 139 | """Serves the main HTML frontend page (manage_chats.html).""" 140 | index_path = static_dir_path / "manage_chats.html" 141 | if not index_path.is_file(): 142 | print(f"ERROR: Frontend entry point file '{index_path}' not found!") 143 | # Return 404 if the main HTML file is missing 144 | raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Frontend entry point not found.") 145 | try: 146 | # Read and return the content of the HTML file 147 | with open(index_path, "r", encoding="utf-8") as f: 148 | content = f.read() 149 | return HTMLResponse(content=content) 150 | except Exception as e: 151 | print(f"ERROR reading frontend file '{index_path}': {e}") 152 | # Return 500 if there's an error reading the file 153 | raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Error serving frontend.") 154 | 155 | # Basic health check endpoint 156 | @app.get("/health", tags=["Health"], status_code=status.HTTP_200_OK) 157 | async def health_check(): 158 | """Simple health check endpoint to confirm the API is running.""" 159 | return {"status": "ok"} 160 | 161 | # Reminder for running the application: 162 | # Use a command like: uvicorn app.main:app --reload --host 0.0.0.0 --port 8000 -------------------------------------------------------------------------------- /app/routers/chats.py: -------------------------------------------------------------------------------- 1 | # app/routers/chats.py 2 | from typing import List, Dict, Optional 3 | import aiosqlite 4 | from fastapi import APIRouter, Depends, HTTPException, Path as FastApiPath, Request, status 5 | from pydantic import BaseModel # For local request/response models if needed 6 | 7 | # Import application models 8 | from app.models import ( 9 | ChatInfo, CreateChatRequest, UpdateChatModeRequest, 10 | OriginalChatCompletionRequest, ChatCompletionResponse, OpenAIMessage, ALLOWED_MODES 11 | ) 12 | # Import service and dependencies 13 | from app.services.chat_service import ChatService 14 | from app.routers.dependencies import get_chat_service, get_db 15 | 16 | # Define router 17 | router = APIRouter( 18 | prefix="/v1", 19 | tags=["Chat Sessions"], # Group endpoints in OpenAPI docs 20 | ) 21 | 22 | # --- Router-specific Models --- 23 | # These models were implicitly defined or used in the old gemini_api.py but not in models.py 24 | class SetActiveChatRequest(BaseModel): 25 | """Request body for setting the active chat.""" 26 | chat_id: Optional[str] = None # Allow null to deactivate 27 | 28 | class GetActiveChatResponse(BaseModel): 29 | """Response body for getting the active chat.""" 30 | active_chat_id: Optional[str] 31 | 32 | 33 | # --- Chat Session Endpoints --- 34 | 35 | @router.get("/chats", response_model=List[ChatInfo]) 36 | async def list_chats_endpoint( 37 | service: ChatService = Depends(get_chat_service), 38 | db: aiosqlite.Connection = Depends(get_db) 39 | ): 40 | """ 41 | Retrieve a list of all existing chat sessions, including their ID, 42 | description, and mode. 43 | """ 44 | print("Router: GET /v1/chats") 45 | # The service method handles fetching from the repository 46 | # Exceptions (like DB errors wrapped in HTTPException) from the service layer 47 | # will be automatically handled by FastAPI. 48 | return await service.list_chats(db) 49 | 50 | 51 | @router.post("/chats", response_model=str, status_code=status.HTTP_201_CREATED) 52 | async def create_chat_endpoint( 53 | payload: CreateChatRequest, 54 | service: ChatService = Depends(get_chat_service), 55 | db: aiosqlite.Connection = Depends(get_db) 56 | ): 57 | """ 58 | Create a new chat session. Requires an optional description and mode 59 | (defaults to 'Default'). Returns the ID of the newly created chat session. 60 | """ 61 | print(f"Router: POST /v1/chats (Desc: {payload.description or 'N/A'}, Mode: {payload.mode or 'Default'})") 62 | # The service handles mode validation, interaction with Gemini client (via wrapper) 63 | # for initial metadata, saving to DB (via repository), and updating cache. 64 | new_chat_id = await service.create_chat( 65 | db=db, 66 | description=payload.description, 67 | mode=payload.mode # Pass the mode (Optional[ALLOWED_MODES]) 68 | ) 69 | return new_chat_id # Return the generated chat_id string 70 | 71 | 72 | @router.post("/chats/active", response_model=Dict[str, str]) 73 | async def set_active_chat_endpoint( 74 | payload: SetActiveChatRequest, # Use the locally defined model here 75 | service: ChatService = Depends(get_chat_service), 76 | db: aiosqlite.Connection = Depends(get_db) # Add DB dependency 77 | ): 78 | """ 79 | Set the globally active chat session ID. This ID will be used for all 80 | subsequent requests to `/v1/chat/completions`. 81 | Send `{"chat_id": null}` or `{}` in the body to deactivate the active chat. 82 | """ 83 | print(f"Router: POST /v1/chats/active (Setting ID to: {payload.chat_id})") 84 | # Service method validates the chat_id against its cache and updates state. 85 | # Service handles validation, prompt sending (if needed), and state updates 86 | await service.set_active_chat(db=db, chat_id=payload.chat_id) # Await the async call and pass db 87 | 88 | if payload.chat_id: 89 | return {"message": f"Active chat session set to {payload.chat_id}"} 90 | else: 91 | return {"message": "Active chat session deactivated."} 92 | 93 | 94 | @router.get("/chats/active", response_model=GetActiveChatResponse) 95 | async def get_active_chat_endpoint( 96 | service: ChatService = Depends(get_chat_service) 97 | # No DB connection needed 98 | ): 99 | """ 100 | Retrieve the ID of the currently active chat session. 101 | Returns `null` if no chat session is currently active. 102 | """ 103 | print("Router: GET /v1/chats/active") 104 | active_id = service.get_active_chat() 105 | return GetActiveChatResponse(active_chat_id=active_id) 106 | 107 | 108 | @router.put("/chats/{chat_id}/mode", response_model=Dict[str, str]) 109 | async def update_chat_mode_endpoint( 110 | payload: UpdateChatModeRequest, 111 | chat_id: str = FastApiPath(..., title="Chat ID", description="The unique identifier of the chat session to update."), 112 | service: ChatService = Depends(get_chat_service), 113 | db: aiosqlite.Connection = Depends(get_db) 114 | ): 115 | """ 116 | Update the mode (e.g., 'Code', 'Architect') for a specific chat session. 117 | Changing the mode also resets the flag indicating whether the system prompt 118 | has been sent, ensuring it's included in the next completion request. 119 | """ 120 | print(f"Router: PUT /v1/chats/{chat_id}/mode (New Mode: {payload.mode})") 121 | # Service validates chat existence in cache, calls repo to update DB, updates cache. 122 | await service.update_chat_mode(db=db, chat_id=chat_id, new_mode=payload.mode) 123 | # If successful, return confirmation message. Service raises HTTPException on error. 124 | return {"message": f"Mode for Chat {chat_id} updated to '{payload.mode}'. System prompt will be resent."} 125 | 126 | 127 | @router.delete("/chats/{chat_id}", status_code=status.HTTP_204_NO_CONTENT) 128 | async def delete_chat_endpoint( 129 | chat_id: str = FastApiPath(..., title="Chat ID", description="The unique identifier of the chat session to delete."), 130 | service: ChatService = Depends(get_chat_service), 131 | db: aiosqlite.Connection = Depends(get_db) 132 | ): 133 | """ 134 | Delete a specific chat session permanently from the database and cache. 135 | If the deleted chat was the active one, the active chat will be deactivated. 136 | """ 137 | print(f"Router: DELETE /v1/chats/{chat_id}") 138 | # Service validates existence in cache, calls repo to delete from DB, removes from cache, 139 | # and updates active ID state if necessary. Raises HTTPException on errors. 140 | await service.delete_chat(db=db, chat_id=chat_id) 141 | # No response body is sent for 204 No Content status code. 142 | 143 | 144 | @router.post("/chat/completions", response_model=ChatCompletionResponse) 145 | async def chat_completions_endpoint( 146 | # FastAPI automatically validates the incoming body against this Pydantic model 147 | request_body: OriginalChatCompletionRequest, 148 | service: ChatService = Depends(get_chat_service), 149 | db: aiosqlite.Connection = Depends(get_db) 150 | ): 151 | """ 152 | Processes a chat completion request using the currently **active** chat session. 153 | This endpoint handles message history context implicitly (managed by the backend), 154 | prepends system prompts based on the active chat's mode (if not already sent), 155 | processes included images (base64 data URIs), interacts with the Gemini API, 156 | and updates the chat session's state. 157 | 158 | **Requires an active chat session to be set via `POST /v1/chats/active` first.** 159 | """ 160 | print("Router: POST /v1/chat/completions received") 161 | # The service's handle_completion method contains the core complex logic. 162 | # We pass only the list of messages from the validated request body. 163 | response = await service.handle_completion(db=db, user_messages=request_body.messages) 164 | # Service method returns the fully formed ChatCompletionResponse or raises HTTPException. 165 | return response -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Gemini WebAPI to OpenAI API Bridge (Multi-Session & Modes) 2 | 3 | This FastAPI application acts as a bridge, exposing an OpenAI-compatible `/v1/chat/completions` endpoint that internally uses the unofficial [gemini-webap](https://github.com/HanaokaYuzu/Gemini-API) library to interact with Google Gemini Web. This allows tools configured for the OpenAI API (like Roo Code, configured NOT to use streaming) to potentially use Gemini as the backend model. 4 | 5 | **Disclaimer:** This uses the unofficial `gemini-webapi` library, which relies on browser cookies for authentication. Changes to Google's web interface or authentication methods may break this library and, consequently, this bridge. Use at your own risk. 6 | 7 | ## Key Features 8 | 9 | * **OpenAI-Compatible API:** Provides `/v1/chat/completions` endpoint. 10 | * **Multiple Chat Sessions:** Manage multiple independent chat conversations via API. 11 | * **Session Persistence:** Chat history and metadata are stored in a local SQLite database (`chat_sessions.db`). 12 | * **Chat Modes:** Supports different modes (e.g., `Code`, `Architect`, `Debug`, `Ask`, `Default`) with distinct system prompts defined in `prompts.py`. 13 | * **System Prompt Handling:** Automatically sends the appropriate system prompt when a chat session is first activated or when the mode of the active chat is changed. 14 | * **Image Support:** Accepts Base64-encoded images within requests using the OpenAI vision format. 15 | * **Simple Web UI:** Includes a basic web interface (served at `/`) for viewing, creating, deleting, activating, and changing the mode of chat sessions. 16 | 17 | ## Limitations 18 | 19 | * Relies on the unofficial `gemini-webapi` and browser cookie authentication. 20 | * Currently, requires the `prompts.py` file to be present at the project root for defining mode behaviors. 21 | * Error handling and stability, particularly regarding `gemini-webapi` interactions, might require further testing and refinement. 22 | * You need to manually create/change the chats through the provided frontend. 23 | 24 | ## Dependencies 25 | 26 | * **Python 3.8+** (Tested primarily with Python 3.13) 27 | * **FastAPI:** For the web server framework. 28 | * **Uvicorn:** ASGI server to run FastAPI. 29 | * **gemini-webapi:** The core library interacting with Google Gemini Web. 30 | * **browser-cookie3:** Used by `gemini-webapi` for cookie access. 31 | * **aiosqlite:** For asynchronous SQLite database access. 32 | 33 | Install dependencies using: 34 | ```bash 35 | pip install -r requirements.txt 36 | ``` 37 | 38 | ## Authentication 39 | 40 | `gemini-webapi` authenticates by accessing the cookies stored by your web browser. Therefore, **you must be logged into the Google Gemini website** (e.g., [https://gemini.google.com/app](https://gemini.google.com/app)) in your **default web browser** on the machine where you run this script. Ensure the browser is closed before running the script if you encounter cookie loading issues. 41 | 42 | ## Project Structure 43 | 44 | The application code is organized within the `app/` directory: 45 | 46 | * `app/main.py`: Main FastAPI application instance, lifespan management, static file serving. 47 | * `app/config.py`: Static configuration (DB URL, Gemini Model, Allowed Modes). 48 | * `app/models.py`: Pydantic models for API requests/responses. 49 | * `app/routers/`: Defines API endpoints (e.g., `chats.py`). 50 | * `dependencies.py`: Reusable FastAPI dependencies. 51 | * `app/services/`: Contains business logic (`chat_service.py`). Manages state (cache, active chat). 52 | * `app/repositories/`: Handles data access (`chat_repository.py` for SQLite). 53 | * `app/core/`: Core components like the Gemini client wrapper (`gemini_client.py`). 54 | * `prompts/prompts.py`: Defines the system prompts for different modes. 55 | * `static/`: Contains frontend HTML, CSS, and JavaScript files. 56 | 57 | ## Configuration 58 | 59 | * **Gemini Model:** Set the `GEMINI_MODEL_NAME` in `app/config.py`. 60 | * **Database:** The database file path (`chat_sessions.db`) is configured via `DATABASE_URL` in `app/config.py`. 61 | * **System Prompts:** The text for system prompts used by different modes is defined in `prompts.py` at the project root. 62 | 63 | ## Running the Server 64 | 65 | Ensure you are in the project root directory (the one containing the `app/` directory and `requirements.txt`). 66 | 67 | Run using Uvicorn: 68 | ```bash 69 | uvicorn app.main:app --host 0.0.0.0 --port 8022 --reload 70 | ``` 71 | * Replace `8099` with your desired port. 72 | * The `--reload` flag enables auto-reloading during development. Remove it for production. 73 | * The server will listen on the specified port on all network interfaces. Check console output for startup confirmation and potential errors. 74 | 75 | ## API Usage / Session Management 76 | 77 | This application manages multiple chat sessions. The `/v1/chat/completions` endpoint always operates on the currently **active** session. 78 | 79 | ## The below is only valid if you prefer to manage the chats through requests (E.g.: Postman, curl etc) 80 | 81 | **Workflow:** 82 | 83 | 1. **List Existing Chats (Optional):** `GET /v1/chats` 84 | * Returns a list of `ChatInfo` objects (`chat_id`, `description`, `mode`). 85 | 2. **Create a New Chat:** `POST /v1/chats` 86 | * Body: `{ "description": "Optional description", "mode": "Optional ModeName" }` (Mode defaults to "Default"). 87 | * Returns the `chat_id` (string) of the newly created session. 88 | 3. **Activate a Chat:** `POST /v1/chats/active` 89 | * Body: `{ "chat_id": "your-chat-id" }` 90 | * Sets the specified chat as the active one for subsequent `/completions` requests. 91 | * **Important:** This step also triggers sending the appropriate system prompt to the Gemini session if it hasn't been sent yet for this chat's current mode (e.g., on first activation or after a mode change while inactive). 92 | * To deactivate, send `{ "chat_id": null }`. 93 | 4. **Get Active Chat (Optional):** `GET /v1/chats/active` 94 | * Returns `{ "active_chat_id": "current-active-id" }` or `{ "active_chat_id": null }`. 95 | 5. **Change Chat Mode:** `PUT /v1/chats/{chat_id}/mode` 96 | * Body: `{ "mode": "NewModeName" }` (e.g., "Code", "Ask"). 97 | * Updates the mode for the specified chat. 98 | * **Important:** If the specified `chat_id` is the *currently active* chat, this endpoint immediately sends the system prompt for the *new* mode to the Gemini session. If the chat is inactive, the prompt is sent the next time it's activated via `POST /v1/chats/active`. 99 | 6. **Send Message / Get Completion:** `POST /v1/chat/completions` 100 | * Uses the currently **active** chat session (set via step 3). 101 | * Body: Standard OpenAI format `{ "messages": [{"role": "user", "content": "Your message" or [{"type":"text",...},{"type":"image_url",...}] }], ... }`. 102 | * Sends *only* the user message content to the active Gemini session. 103 | * Returns an OpenAI-compatible `ChatCompletionResponse`. 104 | 7. **Delete a Chat:** `DELETE /v1/chats/{chat_id}` 105 | * Permanently deletes the specified chat session. 106 | 107 | **Roo Code Configuration:** 108 | * Point Roo Code to use the API Base URL: `http://:/v1` (e.g., `http://localhost:8022/v1`). 109 | * Ensure that Roo is **NOT** configured to use streaming responses. 110 | 111 | ## Web UI 112 | 113 | A simple web interface is available at the root URL (`http://:/`) for basic chat management: 114 | * View existing chats. 115 | * Create new chats with descriptions and modes. 116 | * Delete chats. 117 | * Set the active chat. 118 | * Change the mode (prompt) of existing chats. 119 | 120 | ## Image Handling 121 | 122 | - The server expects images encoded as Base64 within `data:` URIs, following the OpenAI vision format. 123 | - It decodes these images and saves them to temporary files on the server filesystem for processing by `gemini-webapi`. 124 | - These temporary files are automatically deleted after the API call completes. Ensure the server process has permissions to write to the system's temporary directory. 125 | - Direct `http`/`https` image URLs sent by the client are currently _ignored_. 126 | 127 | ## Error Handling 128 | 129 | - Check the console output of the FastAPI server for errors during initialization, request handling, or interactions with the Gemini API or database. 130 | - Common HTTP status codes include 404 (Not Found), 422 (Validation Error), 500 (Internal Server Error), 503 (Service Unavailable - e.g., DB/Gemini client init failure). 131 | 132 | This is a PERSONAL project for study purposes only. USE AT YOUR OWN RISK. 133 | -------------------------------------------------------------------------------- /app/repositories/chat_repository.py: -------------------------------------------------------------------------------- 1 | # app/repositories/chat_repository.py 2 | import aiosqlite 3 | import json 4 | from typing import List, Optional, Dict, Any, Tuple 5 | from app.models import ChatInfo # Assuming ChatInfo is defined in app.models 6 | from app.config import DATABASE_URL # Needed for initialization connection 7 | 8 | # Could define a Protocol for the interface here for better type hinting and testing 9 | 10 | class SqliteChatRepository: 11 | """Repository for chat session data using aiosqlite.""" 12 | 13 | # Store the path extracted from the DATABASE_URL 14 | db_path = DATABASE_URL.split("///")[-1] 15 | 16 | @staticmethod 17 | async def initialize_db(): 18 | """Creates the sessions table if it doesn't exist. Should be called during app lifespan startup.""" 19 | print(f"Initializing database table 'sessions' at: {SqliteChatRepository.db_path}") 20 | try: 21 | async with aiosqlite.connect(SqliteChatRepository.db_path) as db: 22 | # Enable Write-Ahead Logging for better concurrency with reads/writes 23 | await db.execute("PRAGMA journal_mode=WAL;") 24 | await db.execute(""" 25 | CREATE TABLE IF NOT EXISTS sessions ( 26 | chat_id TEXT PRIMARY KEY, 27 | metadata_json TEXT NOT NULL, 28 | description TEXT, 29 | mode TEXT, 30 | system_prompt_sent BOOLEAN DEFAULT FALSE NOT NULL, 31 | last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP 32 | ) 33 | """) 34 | await db.execute("CREATE INDEX IF NOT EXISTS idx_sessions_chat_id ON sessions(chat_id)") 35 | # Trigger to update last_updated automatically on UPDATE 36 | await db.execute(""" 37 | CREATE TRIGGER IF NOT EXISTS update_last_updated_after_update 38 | AFTER UPDATE ON sessions FOR EACH ROW 39 | WHEN OLD.metadata_json <> NEW.metadata_json OR OLD.description <> NEW.description OR OLD.mode <> NEW.mode OR OLD.system_prompt_sent <> NEW.system_prompt_sent 40 | BEGIN 41 | UPDATE sessions SET last_updated = CURRENT_TIMESTAMP WHERE chat_id = OLD.chat_id; 42 | END; 43 | """) 44 | await db.commit() 45 | print("Database table 'sessions' initialized successfully.") 46 | except Exception as e: 47 | print(f"!!!!!!!! DATABASE INITIALIZATION FAILED !!!!!!!! Error: {e}") 48 | # Depending on requirements, might want to raise this to stop app startup 49 | raise RuntimeError(f"Failed to initialize database: {e}") from e 50 | 51 | # Note: Methods below assume an active aiosqlite.Connection 'db' is passed in. 52 | # This connection should be managed externally (e.g., via lifespan and dependency injection). 53 | 54 | async def get_chat_info_list(self, db: aiosqlite.Connection) -> List[ChatInfo]: 55 | """Fetches basic info (id, description, mode) for all chats.""" 56 | chats = [] 57 | try: 58 | db.row_factory = aiosqlite.Row # Access columns by name 59 | async with db.execute("SELECT chat_id, description, mode FROM sessions ORDER BY last_updated DESC") as cursor: 60 | rows = await cursor.fetchall() 61 | chats = [ChatInfo(chat_id=row["chat_id"], description=row["description"], mode=row["mode"]) for row in rows] 62 | except Exception as e: 63 | print(f"Repository Error in get_chat_info_list: {e}") 64 | # Return empty list, let service layer decide how to handle 65 | return chats 66 | 67 | async def get_all_session_data(self, db: aiosqlite.Connection) -> Dict[str, Dict[str, Any]]: 68 | """Loads metadata, mode, and prompt flag for all sessions (intended for cache hydration).""" 69 | sessions_cache: Dict[str, Dict[str, Any]] = {} 70 | try: 71 | db.row_factory = aiosqlite.Row 72 | async with db.execute("SELECT chat_id, metadata_json, mode, system_prompt_sent FROM sessions") as cursor: 73 | rows = await cursor.fetchall() 74 | for row in rows: 75 | chat_id = row["chat_id"] 76 | try: 77 | metadata = json.loads(row["metadata_json"]) 78 | prompt_sent = bool(row["system_prompt_sent"]) # Convert DB 0/1 to bool 79 | sessions_cache[chat_id] = {"metadata": metadata, "mode": row["mode"], "prompt_sent": prompt_sent} 80 | except json.JSONDecodeError: 81 | print(f"Warning: Bad JSON metadata for chat_id '{chat_id}' in get_all_session_data. Skipping.") 82 | except Exception as inner_e: 83 | print(f"Warning: Error processing row for chat_id '{chat_id}' in get_all_session_data: {inner_e}. Skipping.") 84 | 85 | except Exception as e: 86 | print(f"Repository Error in get_all_session_data: {e}") 87 | # Return empty dict, let service layer decide how to handle 88 | return sessions_cache 89 | 90 | async def get_session_data(self, db: aiosqlite.Connection, chat_id: str) -> Optional[Dict[str, Any]]: 91 | """Loads metadata, mode, and prompt flag for a single session by ID.""" 92 | try: 93 | db.row_factory = aiosqlite.Row 94 | async with db.execute("SELECT metadata_json, mode, system_prompt_sent FROM sessions WHERE chat_id = ?", (chat_id,)) as cursor: 95 | row = await cursor.fetchone() 96 | if row: 97 | try: 98 | metadata = json.loads(row["metadata_json"]) 99 | prompt_sent = bool(row["system_prompt_sent"]) 100 | return {"metadata": metadata, "mode": row["mode"], "prompt_sent": prompt_sent} 101 | except json.JSONDecodeError: 102 | print(f"Warning: Bad JSON metadata for chat_id '{chat_id}' in get_session_data. Returning None.") 103 | return None 104 | else: 105 | return None # Chat ID not found 106 | except Exception as e: 107 | print(f"Repository Error in get_session_data for chat_id '{chat_id}': {e}") 108 | return None # Return None on error 109 | 110 | async def create_chat(self, db: aiosqlite.Connection, chat_id: str, metadata: dict, description: str | None, mode: str | None) -> bool: 111 | """Creates a new chat session record.""" 112 | success = False 113 | try: 114 | metadata_json = json.dumps(metadata) 115 | # system_prompt_sent defaults to FALSE in schema definition 116 | await db.execute( 117 | "INSERT INTO sessions (chat_id, metadata_json, description, mode) VALUES (?, ?, ?, ?)", 118 | (chat_id, metadata_json, description, mode) 119 | ) 120 | await db.commit() 121 | success = True 122 | print(f"Repository: Session CREATED in DB: {chat_id}") 123 | except aiosqlite.IntegrityError: 124 | # This is an expected error if the chat_id already exists 125 | print(f"Repository Warning: Session '{chat_id}' already exists (IntegrityError).") 126 | # Consider if this should return True or False, or raise a specific exception 127 | # Returning False indicates it wasn't newly created. 128 | pass # Keep success = False 129 | except Exception as e: 130 | print(f"Repository Error CREATING session '{chat_id}': {e}") 131 | try: await db.rollback() 132 | except Exception as rb_e: print(f"Rollback failed after create_chat error: {rb_e}") 133 | return success 134 | 135 | async def update_metadata(self, db: aiosqlite.Connection, chat_id: str, metadata: dict) -> bool: 136 | """Updates only the metadata for a specific chat session.""" 137 | success = False 138 | try: 139 | metadata_json = json.dumps(metadata) 140 | # The trigger should handle last_updated 141 | cursor = await db.execute( 142 | "UPDATE sessions SET metadata_json = ? WHERE chat_id = ?", 143 | (metadata_json, chat_id) 144 | ) 145 | await db.commit() 146 | success = cursor.rowcount > 0 147 | await cursor.close() 148 | if not success: 149 | print(f"Repository Warning: update_metadata - No rows updated for chat_id '{chat_id}'.") 150 | except Exception as e: 151 | print(f"Repository Error UPDATING metadata for '{chat_id}': {e}") 152 | try: await db.rollback() 153 | except Exception as rb_e: print(f"Rollback failed after update_metadata error: {rb_e}") 154 | return success 155 | 156 | async def mark_prompt_sent(self, db: aiosqlite.Connection, chat_id: str) -> bool: 157 | """Sets the system_prompt_sent flag to TRUE for a specific chat session.""" 158 | success = False 159 | try: 160 | cursor = await db.execute( 161 | "UPDATE sessions SET system_prompt_sent = TRUE WHERE chat_id = ?", 162 | (chat_id,) 163 | ) 164 | await db.commit() 165 | success = cursor.rowcount > 0 166 | await cursor.close() 167 | if not success: 168 | print(f"Repository Warning: mark_prompt_sent - No rows updated for chat_id '{chat_id}'.") 169 | except Exception as e: 170 | print(f"Repository Error marking prompt sent for '{chat_id}': {e}") 171 | try: await db.rollback() 172 | except Exception as rb_e: print(f"Rollback failed after mark_prompt_sent error: {rb_e}") 173 | return success 174 | 175 | async def update_mode_and_reset_flag(self, db: aiosqlite.Connection, chat_id: str, new_mode: str | None) -> bool: 176 | """Updates the mode and resets the system_prompt_sent flag to FALSE.""" 177 | success = False 178 | try: 179 | cursor = await db.execute( 180 | "UPDATE sessions SET mode = ?, system_prompt_sent = FALSE WHERE chat_id = ?", 181 | (new_mode, chat_id) 182 | ) 183 | await db.commit() 184 | success = cursor.rowcount > 0 185 | await cursor.close() 186 | if not success: 187 | print(f"Repository Warning: update_mode_and_reset_flag - No rows updated for chat_id '{chat_id}'.") 188 | except Exception as e: 189 | print(f"Repository Error updating mode/resetting flag for '{chat_id}': {e}") 190 | try: await db.rollback() 191 | except Exception as rb_e: print(f"Rollback failed after update_mode_and_reset_flag error: {rb_e}") 192 | return success 193 | 194 | async def delete_chat(self, db: aiosqlite.Connection, chat_id: str) -> bool: 195 | """Deletes a chat session by ID.""" 196 | success = False 197 | try: 198 | cursor = await db.execute("DELETE FROM sessions WHERE chat_id = ?", (chat_id,)) 199 | await db.commit() 200 | success = cursor.rowcount > 0 201 | await cursor.close() 202 | if not success: 203 | print(f"Repository Warning: delete_chat - No rows deleted for chat_id '{chat_id}'.") 204 | else: 205 | print(f"Repository: Session DELETED from DB: {chat_id}") 206 | except Exception as e: 207 | print(f"Repository Error deleting session '{chat_id}': {e}") 208 | try: await db.rollback() 209 | except Exception as rb_e: print(f"Rollback failed after delete_chat error: {rb_e}") 210 | return success -------------------------------------------------------------------------------- /static/js/manage_chats.js: -------------------------------------------------------------------------------- 1 | // static/js/manage_chats.js 2 | 3 | document.addEventListener('DOMContentLoaded', () => { 4 | // --- Configuration --- 5 | const API_BASE_URL = 'http://localhost:8022'; 6 | const AVAILABLE_MODES = ["Default", "Code", "Architect", "Debug", "Ask"]; // Use const 7 | 8 | // --- State --- 9 | const state = { 10 | currentActiveChatId: null, 11 | currentChatList: [], 12 | statusTimeout: null, 13 | }; 14 | 15 | // --- DOM Elements --- 16 | const elements = { 17 | chatListDiv: document.getElementById('chatList'), 18 | chatDescriptionInput: document.getElementById('chatDescription'), 19 | chatModeSelectCreate: document.getElementById('chatMode'), 20 | btnCreateChat: document.getElementById('btnCreateChat'), 21 | btnRefreshList: document.getElementById('btnRefreshList'), 22 | statusMessageDiv: document.getElementById('statusMessage'), 23 | }; 24 | 25 | // Basic validation that essential elements exist 26 | for (const key in elements) { 27 | if (!elements[key]) { 28 | console.error(`ERROR: UI Element '${key}' not found! Check HTML ID.`); 29 | // Optional: Halt execution if critical elements missing 30 | if (key === 'chatListDiv' || key === 'statusMessageDiv') { 31 | document.body.innerHTML = `

Error: Critical UI element missing (${key}). Cannot initialize application.

`; 32 | return; 33 | } 34 | } 35 | } 36 | 37 | // --- API Service --- 38 | /** 39 | * Generic fetch wrapper for API calls. Handles common error checking. 40 | * @param {string} endpoint - API endpoint (e.g., '/v1/chats') 41 | * @param {object} options - Fetch options (method, headers, body) 42 | * @returns {Promise} - Promise resolving with JSON data or null for 204 43 | * @throws {Error} - Throws an error for network issues or non-ok responses 44 | */ 45 | async function _fetchApi(endpoint, options = {}) { 46 | const url = `${API_BASE_URL}${endpoint}`; 47 | try { 48 | const response = await fetch(url, options); 49 | 50 | if (!response.ok) { 51 | let errorDetail = `HTTP error ${response.status}`; 52 | try { 53 | const errorData = await response.json(); 54 | errorDetail = errorData.detail || JSON.stringify(errorData) || errorDetail; 55 | } catch (e) { 56 | errorDetail = response.statusText || errorDetail; 57 | } 58 | throw new Error(`API Error (${response.status}): ${errorDetail}`); 59 | } 60 | 61 | if (response.status === 204) { 62 | return null; // Handle No Content 63 | } 64 | return await response.json(); // Assume JSON for other success cases 65 | 66 | } catch (error) { 67 | console.error(`Workspace failed for ${url}:`, error); 68 | throw error; // Re-throw for the caller to handle UI feedback 69 | } 70 | } 71 | 72 | // Namespaced API functions using the helper 73 | const api = { 74 | getActiveChatId: () => _fetchApi('/v1/chats/active'), 75 | getChats: () => _fetchApi('/v1/chats'), 76 | createChat: (description, mode) => _fetchApi('/v1/chats', { 77 | method: 'POST', 78 | headers: { 'Content-Type': 'application/json' }, 79 | body: JSON.stringify({ description: description || null, mode: mode }), 80 | }), 81 | updateChatMode: (chatId, newMode) => _fetchApi(`/v1/chats/${chatId}/mode`, { 82 | method: 'PUT', 83 | headers: { 'Content-Type': 'application/json' }, 84 | body: JSON.stringify({ mode: newMode }), 85 | }), 86 | setActiveChat: (chatId) => _fetchApi('/v1/chats/active', { 87 | method: 'POST', 88 | headers: { 'Content-Type': 'application/json' }, 89 | body: JSON.stringify({ chat_id: chatId }), 90 | }), 91 | deactivateChat: () => _fetchApi('/v1/chats/active', { 92 | method: 'POST', 93 | headers: { 'Content-Type': 'application/json' }, 94 | body: JSON.stringify({ chat_id: null }), 95 | }), 96 | deleteChat: (chatId) => _fetchApi(`/v1/chats/${chatId}`, { method: 'DELETE' }), 97 | }; 98 | 99 | 100 | // --- UI Manager --- 101 | const ui = { 102 | /** Shows a status message to the user. */ 103 | showMessage: (message, isError = false) => { 104 | if (!elements.statusMessageDiv) return; 105 | elements.statusMessageDiv.textContent = message; 106 | elements.statusMessageDiv.className = isError ? 'status-error' : 'status-success'; 107 | elements.statusMessageDiv.style.display = 'block'; 108 | 109 | clearTimeout(state.statusTimeout); 110 | state.statusTimeout = setTimeout(() => { 111 | if (elements.statusMessageDiv) elements.statusMessageDiv.style.display = 'none'; 112 | }, 5000); 113 | }, 114 | 115 | /** Renders the list of chats in the table. */ 116 | renderChatList: () => { 117 | const chats = state.currentChatList; 118 | const activeId = state.currentActiveChatId; 119 | 120 | if (!elements.chatListDiv) return; // Guard against missing element 121 | 122 | // Ensure the table structure exists, create if not 123 | let tbody = elements.chatListDiv.querySelector('tbody'); 124 | if (!tbody) { 125 | elements.chatListDiv.innerHTML = ` 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 |
DescriptionModeChat IDActions
`; 137 | tbody = elements.chatListDiv.querySelector('tbody'); // Get the new tbody 138 | } 139 | 140 | // Clear only the tbody content 141 | tbody.innerHTML = ''; 142 | 143 | if (!chats || chats.length === 0) { 144 | // If table exists but no chats, show message inside div, replacing table 145 | elements.chatListDiv.innerHTML = '

Nenhum chat encontrado.

'; 146 | return; 147 | } 148 | 149 | // Use DocumentFragment for efficient bulk appending 150 | const fragment = document.createDocumentFragment(); 151 | chats.forEach(chat => { 152 | const isActive = (chat.chat_id === activeId); 153 | const tr = document.createElement('tr'); 154 | // Store chat data directly on the row for delegation handlers 155 | tr.dataset.chatId = chat.chat_id; 156 | tr.dataset.chatDesc = chat.description || chat.chat_id; 157 | 158 | tr.innerHTML = ` 159 | ${chat.description || 'Sem descrição'} 160 | 161 | 168 | 169 | ${chat.chat_id} 170 | 171 | 178 | 185 | 186 | `; 187 | fragment.appendChild(tr); 188 | }); 189 | tbody.appendChild(fragment); 190 | }, 191 | 192 | /** Populates the 'Create Chat' mode select dropdown. */ 193 | populateCreateModeSelect: () => { 194 | if (!elements.chatModeSelectCreate) return; 195 | elements.chatModeSelectCreate.innerHTML = ''; // Clear existing options 196 | AVAILABLE_MODES.forEach(mode => { 197 | const option = document.createElement('option'); 198 | option.value = mode; 199 | option.textContent = mode; 200 | elements.chatModeSelectCreate.appendChild(option); 201 | }); 202 | elements.chatModeSelectCreate.value = "Default"; // Set default 203 | }, 204 | 205 | /** Clears the create chat form inputs. */ 206 | clearCreateForm: () => { 207 | if (elements.chatDescriptionInput) elements.chatDescriptionInput.value = ''; 208 | if (elements.chatModeSelectCreate) elements.chatModeSelectCreate.value = "Default"; 209 | } 210 | }; 211 | 212 | // --- Event Handlers --- 213 | 214 | /** Main function to refresh chat list and active status from API. */ 215 | async function refreshChatData() { 216 | ui.showMessage('Buscando chats e status ativo...', false); 217 | if(elements.chatListDiv) elements.chatListDiv.innerHTML = '

Loading chats list...

'; 218 | 219 | try { 220 | const [chatsData, activeData] = await Promise.all([ 221 | api.getChats(), 222 | api.getActiveChatId() 223 | ]); 224 | 225 | state.currentChatList = chatsData || []; 226 | state.currentActiveChatId = activeData?.active_chat_id ?? null; 227 | 228 | ui.renderChatList(); // Render based on updated state 229 | // Only show success if not initially empty, prevent message flashing on load 230 | if (state.currentChatList.length > 0) { 231 | ui.showMessage('Chats list updated.', false); 232 | } else if (!elements.chatListDiv.querySelector('p')) { 233 | // If the list is empty but no "Nenhum chat" message is shown yet, show status briefly. 234 | ui.showMessage('No chat found.', false); 235 | } 236 | console.log("Active Chat ID:", state.currentActiveChatId); 237 | 238 | } catch (error) { 239 | state.currentChatList = []; 240 | state.currentActiveChatId = null; 241 | ui.renderChatList(); // Render empty state 242 | ui.showMessage(`Error retrieving data: ${error.message}`, true); 243 | } 244 | } 245 | 246 | /** Handles creating a new chat. */ 247 | async function handleCreateChat() { 248 | const description = elements.chatDescriptionInput?.value.trim(); // Use optional chaining 249 | const mode = elements.chatModeSelectCreate?.value; 250 | if (mode === undefined) { // Check if element exists via value 251 | ui.showMessage("Erro: Elemento de modo não encontrado.", true); return; 252 | } 253 | ui.showMessage(`Creating chat (Mode: ${mode})...`, false); 254 | try { 255 | const newChatData = await api.createChat(description, mode); 256 | ui.showMessage(`Chat "${description || newChatData.chat_id}" (Mode: ${mode}) created. ID: ${newChatData.chat_id}`, false); 257 | ui.clearCreateForm(); 258 | await refreshChatData(); 259 | } catch (error) { 260 | ui.showMessage(`Erro ao criar chat: ${error.message}`, true); 261 | } 262 | } 263 | 264 | /** Handles clicks within the chat list table body (Event Delegation). */ 265 | async function handleTableClick(event) { 266 | const target = event.target; 267 | const actionButton = target.closest('button[data-action]'); 268 | 269 | if (!actionButton) return; 270 | 271 | const row = actionButton.closest('tr'); 272 | const chatId = row?.dataset.chatId; // Get ID from row 273 | const action = actionButton.dataset.action; 274 | 275 | if (!chatId || !action) return; 276 | 277 | console.log(`Table Click - Action: ${action}, Chat ID: ${chatId}`); 278 | 279 | if (action === 'activate') { 280 | ui.showMessage(`Definindo chat ${chatId} como ativo...`, false); 281 | try { 282 | const result = await api.setActiveChat(chatId); 283 | ui.showMessage(result.message || `Chat ${chatId} agora está ativo.`, false); 284 | await refreshChatData(); 285 | } catch (error) { 286 | ui.showMessage(`Erro ao ativar chat ${chatId}: ${error.message}`, true); 287 | } 288 | } else if (action === 'deactivate') { 289 | ui.showMessage(`Desativando chat ativo...`, false); 290 | try { 291 | const result = await api.deactivateChat(); 292 | ui.showMessage(result.message || `Chat ativo desativado.`, false); 293 | await refreshChatData(); 294 | } catch (error) { 295 | ui.showMessage(`Erro ao desativar chat: ${error.message}`, true); 296 | } 297 | } else if (action === 'delete') { 298 | const chatDesc = row.dataset.chatDesc || chatId; // Get desc from row 299 | if (!confirm(`Deletar chat "${chatDesc}" (ID: ${chatId})?`)) return; 300 | ui.showMessage(`Deletando chat ${chatId}...`, false); 301 | try { 302 | await api.deleteChat(chatId); 303 | ui.showMessage(`Chat ${chatId} deletado.`, false); 304 | await refreshChatData(); 305 | } catch (error) { 306 | ui.showMessage(`Erro ao deletar chat ${chatId}: ${error.message}`, true); 307 | } 308 | } 309 | } 310 | 311 | /** Handles mode changes within the chat list table body (Event Delegation). */ 312 | async function handleTableModeChange(event) { 313 | const target = event.target; 314 | 315 | if (!target.matches('select.mode-select')) return; // Target the select directly 316 | 317 | const selectElement = target; 318 | const row = selectElement.closest('tr'); 319 | const chatId = row?.dataset.chatId; // Get ID from row 320 | const newMode = selectElement.value; 321 | 322 | if (!chatId) return; 323 | 324 | console.log(`Table Mode Change - Chat ID: ${chatId}, New Mode: ${newMode}`); 325 | 326 | ui.showMessage(`Atualizando modo do chat ${chatId} para ${newMode}...`, false); 327 | try { 328 | const result = await api.updateChatMode(chatId, newMode); 329 | ui.showMessage(result.message || `Modo do chat ${chatId} atualizado para ${newMode}.`, false); 330 | await refreshChatData(); // Refresh to ensure UI consistency 331 | } catch (error) { 332 | ui.showMessage(`Erro ao atualizar modo do chat ${chatId}: ${error.message}`, true); 333 | // Revert UI optimistically on error? Or let refresh handle it? 334 | // For simplicity, let refresh handle visual state. 335 | await refreshChatData(); // Refresh even on error to show actual state 336 | } 337 | } 338 | 339 | 340 | // --- Event Listeners Setup --- 341 | function setupEventListeners() { 342 | // Static element listeners 343 | elements.btnRefreshList?.addEventListener('click', refreshChatData); 344 | elements.btnCreateChat?.addEventListener('click', handleCreateChat); 345 | 346 | // Event Delegation listeners on the container div 347 | elements.chatListDiv?.addEventListener('click', handleTableClick); 348 | elements.chatListDiv?.addEventListener('change', handleTableModeChange); 349 | } 350 | 351 | // --- Initialization --- 352 | function initialize() { 353 | console.log("Initializing Chat Manager..."); 354 | ui.populateCreateModeSelect(); 355 | setupEventListeners(); 356 | refreshChatData(); // Load initial data 357 | console.log("Chat Manager Initialized."); 358 | } 359 | 360 | initialize(); // Start 361 | 362 | }); // End DOMContentLoaded -------------------------------------------------------------------------------- /app/services/chat_service.py: -------------------------------------------------------------------------------- 1 | # app/services/chat_service.py 2 | import uuid 3 | import base64 4 | import tempfile 5 | import os 6 | import re 7 | import mimetypes 8 | import traceback 9 | from typing import List, Optional, Dict, Any 10 | 11 | import aiosqlite # Needed for type hinting db parameter 12 | from fastapi import HTTPException 13 | 14 | # Assuming prompts.py is accessible in the top-level directory or moved to app/ 15 | try: 16 | from ..prompts import prompts 17 | PROMPTS_LOADED = True 18 | except ImportError: 19 | print("WARNING: services.chat_service - prompts.py not found or import failed. Using placeholder prompts.") 20 | class MockPrompts: # Minimal class definition 21 | code = "Placeholder Code Prompt - prompts.py not loaded" 22 | architect = "Placeholder Architect Prompt - prompts.py not loaded" 23 | debug = "Placeholder Debug Prompt - prompts.py not loaded" 24 | ask = "Placeholder Ask Prompt - prompts.py not loaded" 25 | prompts = MockPrompts() 26 | PROMPTS_LOADED = False 27 | 28 | 29 | from app.repositories.chat_repository import SqliteChatRepository 30 | from app.core.gemini_client import GeminiClientWrapper 31 | from app.models import ChatInfo, OpenAIMessage, TextBlock, ImageUrlBlock, ChatCompletionResponse, Choice, Usage 32 | from app.config import ALLOWED_MODES, GEMINI_MODEL_NAME 33 | 34 | # Mapping from mode names to the actual prompt variables/text 35 | MODE_PROMPT_TEXTS: Dict[ALLOWED_MODES, Optional[str]] = { 36 | "Code": getattr(prompts, 'code', None), 37 | "Architect": getattr(prompts, 'architect', None), 38 | "Debug": getattr(prompts, 'debug', None), 39 | "Ask": getattr(prompts, 'ask', None), 40 | "Default": None 41 | } 42 | # Check if any actual prompts failed to load *if* the import was expected to succeed 43 | if PROMPTS_LOADED and None in [MODE_PROMPT_TEXTS.get(m) for m in ["Code", "Architect", "Debug", "Ask"]]: 44 | print("WARNING: services.chat_service - prompts.py loaded, but one or more specific prompt variables (code, architect, etc.) are missing!") 45 | 46 | 47 | class ChatService: 48 | """Orchestrates chat operations, managing state, repository, and Gemini client interactions.""" 49 | 50 | def __init__(self, repository: SqliteChatRepository, gemini_wrapper: GeminiClientWrapper): 51 | self.repository = repository 52 | self.gemini_wrapper = gemini_wrapper 53 | self._cache: Dict[str, Dict[str, Any]] = {} 54 | self._active_chat_id: Optional[str] = None 55 | print("ChatService initialized.") 56 | 57 | async def load_initial_cache(self, db: aiosqlite.Connection): 58 | """Loads all session data from DB into the cache.""" 59 | print("ChatService: Loading initial cache from database...") 60 | try: 61 | self._cache = await self.repository.get_all_session_data(db) 62 | print(f"ChatService: Initial cache loaded with {len(self._cache)} sessions.") 63 | except Exception as e: 64 | print(f"ChatService CRITICAL ERROR: Failed to load initial cache: {e}") 65 | self._cache = {} 66 | 67 | async def list_chats(self, db: aiosqlite.Connection) -> List[ChatInfo]: 68 | """Lists all available chat sessions.""" 69 | return await self.repository.get_chat_info_list(db) 70 | 71 | async def create_chat(self, db: aiosqlite.Connection, description: Optional[str], mode: Optional[ALLOWED_MODES]) -> str: 72 | """Creates a new chat session, saves it, and updates the cache.""" 73 | new_chat_id = str(uuid.uuid4()) 74 | final_mode = mode or "Default" 75 | if final_mode not in MODE_PROMPT_TEXTS: 76 | print(f"Service Warning: Invalid mode '{final_mode}' provided during chat creation. Forcing 'Default'.") 77 | final_mode = "Default" 78 | print(f"Service: Creating chat - ID: {new_chat_id}, Desc: '{description or 'N/A'}', Mode: '{final_mode}'") 79 | try: 80 | chat_session = self.gemini_wrapper.start_new_chat() 81 | initial_metadata = chat_session.metadata 82 | success_db = await self.repository.create_chat(db, new_chat_id, initial_metadata, description, final_mode) 83 | if not success_db: 84 | raise HTTPException(status_code=500, detail="Failed to save new chat session to database (likely already exists or DB error).") 85 | self._cache[new_chat_id] = { 86 | "metadata": initial_metadata, 87 | "mode": final_mode, 88 | "prompt_sent": False # System prompt NOT sent on creation 89 | } 90 | print(f"Service: Chat {new_chat_id} created and added to cache.") 91 | return new_chat_id 92 | except Exception as e: 93 | print(f"Service Error creating chat: {e}") 94 | traceback.print_exc() 95 | if isinstance(e, HTTPException): raise e 96 | raise HTTPException(status_code=500, detail=f"Unexpected error creating chat session: {e}") 97 | 98 | async def set_active_chat(self, db: aiosqlite.Connection, chat_id: Optional[str]): 99 | """ 100 | Sets the globally active chat ID. If activating a chat, sends the 101 | system prompt via Gemini if it hasn't been sent for the current mode yet, 102 | then updates state in DB and cache. 103 | """ 104 | # --- Deactivation --- 105 | if chat_id is None: 106 | if self._active_chat_id is not None: 107 | print(f"Service: Deactivating active chat {self._active_chat_id}.") 108 | self._active_chat_id = None 109 | return 110 | 111 | # --- Activation --- 112 | print(f"Service: Attempting to activate chat: {chat_id}") 113 | 114 | # 1. Validate chat exists in cache 115 | if chat_id not in self._cache: 116 | print(f"Service ERROR: Cannot activate chat - ID '{chat_id}' not found in cache.") 117 | raise HTTPException(status_code=404, detail=f"Chat session not found in active cache: {chat_id}") 118 | 119 | session_data = self._cache[chat_id] 120 | metadata = session_data.get("metadata") 121 | mode = session_data.get("mode", "Default") 122 | prompt_sent = session_data.get("prompt_sent", False) # Default to False if missing 123 | system_prompt = MODE_PROMPT_TEXTS.get(mode) 124 | 125 | # Check if metadata exists (essential for sending prompt) 126 | if metadata is None: 127 | print(f"Service CRITICAL ERROR: Metadata missing in cache for chat {chat_id} during activation!") 128 | raise HTTPException(status_code=500, detail="Internal Error: Cannot activate chat, state corrupted.") 129 | 130 | # 2. Send System Prompt if Needed 131 | prompt_send_error = False 132 | prompt_sent_this_activation = False # Track if we attempted send in this call 133 | if system_prompt and not prompt_sent: 134 | print(f"Service: Activating chat {chat_id}: System prompt needed (Mode: {mode}). Sending...") 135 | prompt_sent_this_activation = True 136 | try: 137 | # Load session, send prompt, get updated metadata 138 | chat_session = self.gemini_wrapper.load_chat_from_metadata(metadata=metadata) 139 | await self.gemini_wrapper.send_message(chat_session, system_prompt) 140 | updated_metadata = chat_session.metadata 141 | print(f"Service: System prompt sent successfully for {chat_id}.") 142 | 143 | # Update DB: metadata and mark prompt as sent 144 | print(f"Service: Updating DB for chat {chat_id} post-prompt send...") 145 | meta_ok = await self.repository.update_metadata(db, chat_id, updated_metadata) 146 | flag_ok = await self.repository.mark_prompt_sent(db, chat_id) 147 | 148 | # Update cache based on DB success 149 | if meta_ok: 150 | self._cache[chat_id]["metadata"] = updated_metadata 151 | print("Service: Metadata cache updated.") 152 | else: 153 | print(f"Service ERROR: Failed to update metadata in DB for {chat_id} post-prompt send. Cache metadata may be stale.") 154 | prompt_send_error = True 155 | 156 | if flag_ok: 157 | self._cache[chat_id]["prompt_sent"] = True 158 | print("Service: prompt_sent flag cache updated.") 159 | else: 160 | print(f"Service ERROR: Failed to mark prompt sent flag in DB for {chat_id}. Cache flag not updated.") 161 | prompt_send_error = True 162 | 163 | except Exception as send_error: 164 | print(f"Service ERROR sending system prompt during activation for {chat_id}: {send_error}") 165 | traceback.print_exc() 166 | prompt_send_error = True 167 | 168 | # 3. Set Active ID in memory 169 | self._active_chat_id = chat_id 170 | print(f"Service: Active chat set to {self._active_chat_id}") 171 | 172 | if prompt_sent_this_activation and prompt_send_error: 173 | print(f"Service WARNING: Chat {chat_id} activated, but there was an error sending/confirming the system prompt send state.") 174 | elif prompt_sent_this_activation: 175 | print(f"Service: System prompt sending process completed for chat {chat_id} activation.") 176 | 177 | def get_active_chat(self) -> Optional[str]: 178 | """Gets the currently active chat ID.""" 179 | return self._active_chat_id 180 | 181 | # --- MODIFIED update_chat_mode --- 182 | async def update_chat_mode(self, db: aiosqlite.Connection, chat_id: str, new_mode: ALLOWED_MODES): 183 | """ 184 | Updates the mode for a chat, resets the prompt flag in DB/cache, AND 185 | sends the new system prompt immediately if the updated chat is the active one. 186 | """ 187 | print(f"Service: Updating mode for chat {chat_id} to '{new_mode}'") 188 | # Validate mode 189 | if new_mode not in MODE_PROMPT_TEXTS: 190 | print(f"Service Warning: Invalid mode '{new_mode}' passed to update_chat_mode.") 191 | raise HTTPException(status_code=422, detail=f"Invalid mode provided: {new_mode}") 192 | # Validate chat exists 193 | if chat_id not in self._cache: 194 | print(f"Service ERROR: Chat {chat_id} not found in cache for mode update.") 195 | raise HTTPException(status_code=404, detail="Chat session not found.") 196 | 197 | # 1. Update DB (mode and resets prompt_sent flag) via repository 198 | success_db = await self.repository.update_mode_and_reset_flag(db, chat_id, new_mode) 199 | if not success_db: 200 | print(f"Service Error: DB update failed for mode change on chat {chat_id}.") 201 | raise HTTPException(status_code=500, detail=f"Failed to update chat mode in database for {chat_id}.") 202 | 203 | # 2. Update cache (mode and prompt_sent flag) 204 | self._cache[chat_id]["mode"] = new_mode 205 | self._cache[chat_id]["prompt_sent"] = False # Reset flag first in cache 206 | print(f"Service: Cache updated for chat {chat_id}: Mode='{new_mode}', prompt_sent=False (reset)") 207 | 208 | # 3. Send new system prompt IF this is the active chat 209 | if chat_id == self._active_chat_id: 210 | print(f"Service: Chat {chat_id} is active. Sending system prompt for new mode '{new_mode}'...") 211 | new_system_prompt = MODE_PROMPT_TEXTS.get(new_mode) 212 | 213 | if new_system_prompt: 214 | prompt_send_error = False 215 | try: 216 | metadata = self._cache[chat_id].get("metadata") 217 | if not metadata: 218 | # This should ideally not happen if cache is consistent 219 | print(f"Service CRITICAL ERROR: Metadata missing for active chat {chat_id} during mode change prompt send!") 220 | raise Exception("Cannot send prompt, metadata missing in cache.") 221 | 222 | # Load session, send prompt, get updated metadata 223 | chat_session = self.gemini_wrapper.load_chat_from_metadata(metadata=metadata) 224 | await self.gemini_wrapper.send_message(chat_session, new_system_prompt) 225 | updated_metadata = chat_session.metadata 226 | print(f"Service: System prompt for new mode '{new_mode}' sent successfully for active chat {chat_id}.") 227 | 228 | # Update DB: metadata and mark prompt as sent (for the NEW mode) 229 | print(f"Service: Updating DB state for chat {chat_id} after immediate prompt send...") 230 | meta_ok = await self.repository.update_metadata(db, chat_id, updated_metadata) 231 | # Use mark_prompt_sent to set the flag to TRUE now 232 | flag_ok = await self.repository.mark_prompt_sent(db, chat_id) 233 | 234 | # Update cache based on DB success 235 | if meta_ok: 236 | self._cache[chat_id]["metadata"] = updated_metadata 237 | print("Service: Metadata cache updated.") 238 | else: 239 | prompt_send_error = True 240 | print(f"Service ERROR: Failed to update metadata post-mode-change prompt send for {chat_id}.") 241 | 242 | if flag_ok: 243 | self._cache[chat_id]["prompt_sent"] = True # Mark as sent in cache 244 | print("Service: prompt_sent flag cache updated to True.") 245 | else: 246 | prompt_send_error = True 247 | # If flag update failed, cache remains False (conservative) 248 | print(f"Service ERROR: Failed to mark prompt sent flag post-mode-change prompt send for {chat_id}. Cache flag remains False.") 249 | 250 | if not prompt_send_error: 251 | print(f"Service: DB and cache updated successfully after mode change prompt send for {chat_id}.") 252 | 253 | except Exception as send_error: 254 | print(f"Service ERROR sending system prompt during mode change for active chat {chat_id}: {send_error}") 255 | traceback.print_exc() 256 | prompt_send_error = True 257 | # Log the error, but don't raise HTTPException to keep API responsive 258 | finally: 259 | if prompt_send_error: 260 | print(f"Service WARNING: Failed to fully send/update state for system prompt after mode change on active chat {chat_id}.") 261 | else: 262 | # Case where the new mode is "Default" or has no prompt 263 | print(f"Service: New mode '{new_mode}' has no system prompt. Nothing to send for active chat {chat_id}. Prompt flag remains reset (False).") 264 | else: 265 | # Case where the updated chat was not the active one 266 | print(f"Service: Chat {chat_id} (updated to mode '{new_mode}') is not the active chat. Prompt will be sent upon its next activation.") 267 | 268 | 269 | async def delete_chat(self, db: aiosqlite.Connection, chat_id: str): 270 | """Deletes a chat session from DB and cache.""" 271 | print(f"Service: Attempting to delete chat {chat_id}") 272 | if chat_id not in self._cache: 273 | print(f"Service Warning: Chat {chat_id} not found in cache during deletion request.") 274 | raise HTTPException(status_code=404, detail="Chat session not found.") 275 | success_db = await self.repository.delete_chat(db, chat_id) 276 | if not success_db: 277 | print(f"Service Error: DB deletion failed for chat {chat_id} (it was present in cache).") 278 | raise HTTPException(status_code=500, detail=f"Failed to delete chat session from database for {chat_id}.") 279 | del self._cache[chat_id] 280 | print(f"Service: Chat {chat_id} removed from cache.") 281 | if self._active_chat_id == chat_id: 282 | self._active_chat_id = None 283 | print(f"Service: Deactivated chat {chat_id} because it was deleted.") 284 | 285 | # --- Method CORRECTED to REMOVE system prompt logic --- 286 | async def handle_completion(self, db: aiosqlite.Connection, user_messages: List[OpenAIMessage]) -> ChatCompletionResponse: 287 | """ 288 | Handles sending ONLY the user's message to the active chat's Gemini session. 289 | Updates metadata in DB/cache afterwards. System prompt logic is handled by set_active_chat or update_chat_mode. 290 | """ 291 | if not self._active_chat_id: 292 | raise HTTPException(status_code=400, detail="No active chat session set. Use POST /v1/chats/active.") 293 | 294 | current_chat_id = self._active_chat_id 295 | print(f"Service: Handling completion for active chat: {current_chat_id}") 296 | 297 | # 1. Get session data from cache 298 | session_data = self._cache.get(current_chat_id) 299 | if not session_data: 300 | print(f"Service CRITICAL ERROR: Active chat ID '{current_chat_id}' is set but not found in cache!") 301 | self._active_chat_id = None 302 | raise HTTPException(status_code=404, detail=f"Active chat session '{current_chat_id}' state not found. Please set active chat again.") 303 | 304 | metadata = session_data.get("metadata") 305 | if metadata is None: 306 | print(f"Service CRITICAL ERROR: Metadata missing in cache for active chat {current_chat_id}!") 307 | raise HTTPException(status_code=500, detail="Internal error: Corrupted state for active chat.") 308 | 309 | # 2. Load Gemini ChatSession object 310 | try: 311 | chat_session = self.gemini_wrapper.load_chat_from_metadata(metadata=metadata) 312 | print(f"Service: Loaded Gemini ChatSession object for {current_chat_id}") 313 | except HTTPException as e: raise e 314 | except Exception as e: 315 | print(f"Service Error loading chat session from metadata: {e}") 316 | raise HTTPException(status_code=500, detail=f"Failed to load active chat session state: {e}") 317 | 318 | # 3. Process User Input (Text & Images) 319 | last_user_message = next((msg for msg in reversed(user_messages) if msg.role == "user"), None) 320 | 321 | if not last_user_message: raise HTTPException(status_code=400, detail="No user message found in the request.") 322 | user_message_text = "" 323 | image_urls_to_process = [] 324 | temp_file_paths = [] 325 | try: 326 | content = last_user_message.content 327 | if isinstance(content, str): user_message_text = content 328 | elif isinstance(content, list): 329 | for block in content: 330 | if isinstance(block, TextBlock): user_message_text += block.text + "\n" 331 | elif isinstance(block, ImageUrlBlock) and block.image_url.url.startswith("data:image"): image_urls_to_process.append(block.image_url.url) 332 | user_message_text = user_message_text.strip() 333 | for img_url in image_urls_to_process: 334 | try: 335 | header, encoded = img_url.split(",", 1); img_data = base64.b64decode(encoded) 336 | mime_type = header.split(";")[0].split(":")[1] if ':' in header else 'application/octet-stream'; ext = mimetypes.guess_extension(mime_type) or "" 337 | safe_extensions = ['.png', '.jpg', '.jpeg', '.webp', '.gif', '.heic', '.heif'] 338 | if ext.lower() in safe_extensions: 339 | fd, temp_path = tempfile.mkstemp(suffix=ext); os.write(fd, img_data); os.close(fd); temp_file_paths.append(temp_path) 340 | print(f"Service: Saved image data URI ({mime_type}) to temp file: {temp_path}") 341 | else: print(f"Service Warning: Skipping image with potentially unsafe extension '{ext or 'unknown'}' from mime type '{mime_type}'") 342 | except Exception as img_e: print(f"Service Error processing data URI: {img_e}. Skipping image.") 343 | if not user_message_text and not temp_file_paths: raise HTTPException(status_code=400, detail="No processable content found.") 344 | except Exception as proc_e: 345 | self._cleanup_temp_files(temp_file_paths); raise HTTPException(status_code=400, detail=f"Error processing user message content: {proc_e}") 346 | 347 | mode_switch_match = re.search(r"\[switch_mode to '(.*?)' because:.*?\]", user_message_text, re.IGNORECASE | re.DOTALL) 348 | 349 | if mode_switch_match: 350 | extracted_mode = mode_switch_match.group(1) 351 | extracted_mode = extracted_mode.title() 352 | new_mode_prompt = MODE_PROMPT_TEXTS.get(extracted_mode) 353 | final_prompt_to_send = f"Now you are in {extracted_mode} mode. Use the following prompt:\n {new_mode_prompt}\n\n{user_message_text}" 354 | 355 | else: 356 | final_prompt_to_send = user_message_text 357 | 358 | # 4. Prepare Final Prompt (User message ONLY) 359 | print("Service: Preparing user message only for completion endpoint.") 360 | 361 | # 5. Send to Gemini & Handle Response/State Update 362 | try: 363 | print(f"Service: Sending message to Gemini for chat {current_chat_id}...") 364 | api_response = await self.gemini_wrapper.send_message( 365 | chat_session=chat_session, 366 | prompt=final_prompt_to_send, 367 | files=temp_file_paths 368 | ) 369 | response_text = getattr(api_response, 'text', "[No text in response]") 370 | print(f"Service: Response received from Gemini for chat {current_chat_id}.") 371 | 372 | # --- Update State Post-Gemini Call (Metadata ONLY) --- 373 | updated_metadata = chat_session.metadata 374 | print(f"Service: Updating metadata in DB for chat {current_chat_id}...") 375 | meta_update_ok = await self.repository.update_metadata(db, current_chat_id, updated_metadata) 376 | 377 | # Update cache metadata based on DB success 378 | if meta_update_ok: 379 | self._cache[current_chat_id]["metadata"] = updated_metadata 380 | # prompt_sent flag is NOT touched here 381 | print(f"Service: Metadata cache updated for chat {current_chat_id}.") 382 | else: 383 | print(f"Service ERROR: Failed to update metadata in DB for {current_chat_id}. Cache may be stale.") 384 | 385 | # 6. Format Final API Response 386 | assistant_message = OpenAIMessage(role="assistant", content=response_text) 387 | choice = Choice(message=assistant_message) 388 | usage = Usage() 389 | openai_response = ChatCompletionResponse( 390 | model=GEMINI_MODEL_NAME, choices=[choice], usage=usage, chat_id=current_chat_id 391 | ) 392 | return openai_response 393 | 394 | except HTTPException as e: 395 | print(f"Service Error (HTTPException) during completion for {current_chat_id}: {e.detail}") 396 | raise e 397 | except Exception as e: 398 | print(f"Service Error (General Exception) during completion for {current_chat_id}: {e}") 399 | traceback.print_exc() 400 | raise HTTPException(status_code=500, detail=f"Unexpected server error during chat completion: {e}") 401 | finally: 402 | # 7. Cleanup Temp Files 403 | self._cleanup_temp_files(temp_file_paths) 404 | 405 | def _cleanup_temp_files(self, file_paths: List[str]): 406 | """Safely removes temporary files created for image uploads.""" 407 | if file_paths: 408 | print(f"Service: Cleaning up {len(file_paths)} temporary image files...") 409 | for path in file_paths: 410 | try: 411 | if path and os.path.exists(path): 412 | os.remove(path) 413 | except OSError as cleanup_e: 414 | print(f"Service Error removing temp file '{path}': {cleanup_e}") 415 | except Exception as general_e: 416 | print(f"Service Error during temp file '{path}' cleanup: {general_e}") --------------------------------------------------------------------------------