├── .gitignore
├── .env.example
├── docker-compose.yml
├── Dockerfile
├── render.yaml
├── pyproject.toml
├── LICENSE
├── README.md
├── CLAUDE.md
├── .github
└── workflows
│ └── update-upstream.yml
└── main.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .python-version
2 | .idea
3 | .venv
4 | uv.lock
5 | .env
6 | __pycache__
7 | .cursor
8 | .ruff_cache
9 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | SECURE_1PSID=your_psid_value_here
2 | SECURE_1PSIDTS=your_psidts_value_here
3 | API_KEY=your_api_key_here
4 | ENABLE_THINKING=false
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | services:
4 | gemini-api:
5 | build: .
6 | ports:
7 | - "8000:8000"
8 | volumes:
9 | - ./main.py:/app/main.py
10 | - ./pyproject.toml:/app/pyproject.toml
11 | env_file:
12 | - .env
13 | restart: unless-stopped
14 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
2 |
3 | WORKDIR /app
4 |
5 | # Install dependencies
6 | COPY pyproject.toml .
7 | RUN uv sync
8 |
9 | # Copy application code
10 | COPY main.py .
11 |
12 | # Expose the port the app runs on
13 | EXPOSE 8000
14 |
15 | # Command to run the application
16 | CMD ["uv", "run", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
--------------------------------------------------------------------------------
/render.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | - type: web
3 | name: gemi2api-server
4 | env: docker
5 | plan: free
6 | region: oregon
7 | dockerfilePath: ./Dockerfile
8 | repo: https://github.com/zhiyu1998/Gemi2Api-Server
9 | branch: main
10 | envVars:
11 | - key: SECURE_1PSID
12 | sync: false
13 | - key: SECURE_1PSIDTS
14 | sync: false
15 | - key: API_KEY
16 | sync: false
17 | healthCheckPath: /
18 | autoDeploy: true
19 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "gemi2api-server"
3 | version = "0.1.3"
4 | license = "MIT"
5 | description = "Add your description here"
6 | readme = "README.md"
7 | requires-python = ">=3.11"
8 | dependencies = [
9 | "browser-cookie3>=0.20.1",
10 | "fastapi>=0.115.12",
11 | "gemini-webapi>=1.17.3",
12 | "uvicorn[standard]>=0.34.1",
13 | ]
14 | [[tool.uv.index]]
15 | url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
16 | default = true
17 |
18 | [dependency-groups]
19 | dev = [
20 | "ruff>=0.11.7",
21 | ]
22 |
23 | [tool.ruff]
24 | line-length = 150 # 设置最大行长度
25 |
26 | [tool.ruff.lint]
27 | select = ["E", "F", "W", "I"] # 启用的规则(E: pycodestyle, F: pyflakes, W: pycodestyle warnings, I: isort)
28 | ignore = ["E501", "W191"] # 忽略特定规则(行长度警告和tab缩进警告)
29 |
30 | [tool.ruff.format]
31 | quote-style = "double" # 使用双引号
32 | indent-style = "tab" # 使用tab缩进
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 RrOrange
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Gemi2Api-Server
2 | [HanaokaYuzu / Gemini-API](https://github.com/HanaokaYuzu/Gemini-API) 的服务端简单实现
3 |
4 | [](https://imgse.com/i/pE79pPf)
5 |
6 | ## 快捷部署
7 |
8 | ### Render
9 |
10 | [](https://render.com/deploy?repo=https://github.com/zhiyu1998/Gemi2Api-Server)
11 |
12 | ### HuggingFace(由佬友@qqrr部署)
13 |
14 | [](https://huggingface.co/spaces/ykl45/gmn2a)
15 |
16 | ## 直接运行
17 |
18 | 0. 填入 `SECURE_1PSID` 和 `SECURE_1PSIDTS`(登录 Gemini 在浏览器开发工具中查找 Cookie),有必要的话可以填写 `API_KEY`
19 | ```properties
20 | SECURE_1PSID = "COOKIE VALUE HERE"
21 | SECURE_1PSIDTS = "COOKIE VALUE HERE"
22 | API_KEY= "API_KEY VALUE HERE"
23 | ```
24 | 1. `uv` 安装一下依赖
25 | > uv init
26 | >
27 | > uv add fastapi uvicorn gemini-webapi
28 |
29 | > [!NOTE]
30 | > 如果存在`pyproject.toml` 那么就使用下面的命令:
31 | > uv sync
32 |
33 | 或者 `pip` 也可以
34 |
35 | > pip install fastapi uvicorn gemini-webapi
36 |
37 | 2. 激活一下环境
38 | > source venv/bin/activate
39 |
40 | 3. 启动
41 | > uvicorn main:app --reload --host 127.0.0.1 --port 8000
42 |
43 | > [!WARNING]
44 | > tips: 如果不填写 API_KEY ,那么就直接使用
45 |
46 | ## 使用Docker运行(推荐)
47 |
48 | ### 快速开始
49 |
50 | 1. 克隆本项目
51 | ```bash
52 | git clone https://github.com/zhiyu1998/Gemi2Api-Server.git
53 | ```
54 |
55 | 2. 创建 `.env` 文件并填入你的 Gemini Cookie 凭据:
56 | ```bash
57 | cp .env.example .env
58 | # 用编辑器打开 .env 文件,填入你的 Cookie 值
59 | ```
60 |
61 | 3. 启动服务:
62 | ```bash
63 | docker-compose up -d
64 | ```
65 |
66 | 4. 服务将在 http://0.0.0.0:8000 上运行
67 |
68 | ### 其他 Docker 命令
69 |
70 | ```bash
71 | # 查看日志
72 | docker-compose logs
73 |
74 | # 重启服务
75 | docker-compose restart
76 |
77 | # 停止服务
78 | docker-compose down
79 |
80 | # 重新构建并启动
81 | docker-compose up -d --build
82 | ```
83 |
84 | ## API端点
85 |
86 | - `GET /`: 服务状态检查
87 | - `GET /v1/models`: 获取可用模型列表
88 | - `POST /v1/chat/completions`: 与模型聊天 (类似OpenAI接口)
89 |
90 | ## 常见问题
91 |
92 | ### 服务器报 500 问题解决方案
93 |
94 | 500 的问题一般是 IP 不太行 或者 请求太频繁(后者等待一段时间或者重新新建一个隐身标签登录一下重新给 Secure_1PSID 和 Secure_1PSIDTS 即可),见 issue:
95 | - [__Secure-1PSIDTS · Issue #6 · HanaokaYuzu/Gemini-API](https://github.com/HanaokaYuzu/Gemini-API/issues/6)
96 | - [Failed to initialize client. SECURE_1PSIDTS could get expired frequently · Issue #72 · HanaokaYuzu/Gemini-API](https://github.com/HanaokaYuzu/Gemini-API/issues/72)
97 |
98 | 解决步骤:
99 | 1. 使用隐身标签访问 [Google Gemini](https://gemini.google.com/) 并登录
100 | 2. 打开浏览器开发工具 (F12)
101 | 3. 切换到 "Application" 或 "应用程序" 标签
102 | 4. 在左侧找到 "Cookies" > "gemini.google.com"
103 | 5. 复制 `__Secure-1PSID` 和 `__Secure-1PSIDTS` 的值
104 | 6. 更新 `.env` 文件
105 | 7. 重新构建并启动: `docker-compose up -d --build`
106 |
107 | ## 贡献
108 |
109 | 同时感谢以下开发者对 `Gemi2Api-Server` 作出的贡献:
110 |
111 |
112 |
113 |
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
1 | # CLAUDE.md
2 |
3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4 |
5 | ## Architecture Overview
6 |
7 | This is a FastAPI-based server that provides OpenAI-compatible API endpoints for Google's Gemini AI model via the `gemini-webapi` library. The server acts as a bridge, translating OpenAI API requests to Gemini API calls.
8 |
9 | ### Core Components
10 |
11 | - **main.py**: Single-file application containing all API endpoints, authentication, and request handling
12 | - **Authentication**: Uses Gemini cookies (`SECURE_1PSID`, `SECURE_1PSIDTS`) for Gemini API access and optional `API_KEY` for server authentication
13 | - **API Endpoints**:
14 | - `GET /`: Health check endpoint
15 | - `GET /v1/models`: Lists available Gemini models in OpenAI format
16 | - `POST /v1/chat/completions`: Main chat completion endpoint (supports streaming)
17 |
18 | ### Key Features
19 |
20 | - OpenAI-compatible chat completions API
21 | - Streaming response support
22 | - Image processing (base64 encoded images via temporary files)
23 | - Markdown link correction for Google search results
24 | - CORS enabled for web clients
25 | - Docker containerization with uv package manager
26 |
27 | ## Coding Conventions
28 |
29 | When making changes to this codebase, please adhere to the following principles:
30 |
31 | - **Keep It Simple, Stupid (KISS):** Write code that is simple, straightforward, and easy to understand. Avoid introducing unnecessary complexity.
32 | - **Don't Repeat Yourself (DRY):** Instead of duplicating code for similar functionalities, create generic, reusable functions. A good example is the `initProviderFilter` function in `scripts/settings.js`, which handles filtering logic for multiple providers in a unified way.
33 | - **Centralize Configuration:** Group related configurations together to make the code easier to maintain and extend. For instance, the `filterConfigurations` array in `scripts/settings.js` centralizes all the settings for the provider-specific filters, making it easy to add new ones in the future.
34 |
35 | ## Development Commands
36 |
37 | ### Environment Setup
38 | ```bash
39 | # Install dependencies with uv (recommended)
40 | uv sync
41 |
42 | # Or with pip
43 | pip install fastapi uvicorn gemini-webapi
44 |
45 | # Set up environment variables (copy from example)
46 | cp .env.example .env
47 | # Edit .env with your Gemini credentials
48 | ```
49 |
50 | ### Running the Server
51 | ```bash
52 | # Development server with auto-reload
53 | uvicorn main:app --reload --host 127.0.0.1 --port 8000
54 |
55 | # Production server
56 | uvicorn main:app --host 0.0.0.0 --port 8000
57 |
58 | # Using uv
59 | uv run uvicorn main:app --reload --host 127.0.0.1 --port 8000
60 | ```
61 |
62 | ### Code Quality
63 | ```bash
64 | # Lint and format with ruff
65 | ruff check .
66 | ruff format .
67 | ```
68 |
69 | ### Docker Commands
70 | ```bash
71 | # Build and run with docker-compose
72 | docker-compose up -d
73 |
74 | # View logs
75 | docker-compose logs
76 |
77 | # Rebuild and restart
78 | docker-compose up -d --build
79 |
80 | # Stop services
81 | docker-compose down
82 | ```
83 |
84 | ## Configuration
85 |
86 | ### Required Environment Variables
87 | - `SECURE_1PSID`: Gemini cookie for authentication (obtained from browser dev tools)
88 | - `SECURE_1PSIDTS`: Gemini cookie timestamp for authentication
89 | - `API_KEY`: Optional server authentication key
90 | - `ENABLE_THINKING`: Optional boolean to enable thinking content in responses (default: false)
91 |
92 | ### Code Style
93 | - Uses ruff for linting and formatting
94 | - Line length: 150 characters
95 | - Tab-based indentation
96 | - Double quotes for strings
97 | - Ignores E501 (line length warnings due to custom 150 char limit)
98 |
99 | ## Model Mapping
100 |
101 | The server maps OpenAI model names to Gemini models through `map_model_name()` function. It supports fuzzy matching and falls back to sensible defaults based on keywords (pro, flash, vision, etc.).
102 |
103 | ## Request Flow
104 |
105 | 1. Client sends OpenAI-compatible request to `/v1/chat/completions`
106 | 2. Server authenticates using optional API_KEY
107 | 3. Messages are converted from OpenAI format to conversation string
108 | 4. Images are decoded from base64 and saved to temporary files
109 | 5. Request is sent to Gemini via `gemini-webapi`
110 | 6. Response is processed, markdown corrected, and returned in OpenAI format
111 | 7. Temporary files are cleaned up
--------------------------------------------------------------------------------
/.github/workflows/update-upstream.yml:
--------------------------------------------------------------------------------
1 | name: Update Upstream Dependencies
2 |
3 | on:
4 | schedule:
5 | # 每天 UTC 02:00 检查更新 (北京时间上午10点)
6 | - cron: '0 2 * * *'
7 | workflow_dispatch: # 允许手动触发
8 |
9 | jobs:
10 | check-updates:
11 | runs-on: ubuntu-latest
12 | permissions:
13 | contents: write
14 | pull-requests: write
15 |
16 | steps:
17 | - name: Checkout repository
18 | uses: actions/checkout@v4
19 |
20 | - name: Set up Python
21 | uses: actions/setup-python@v4
22 | with:
23 | python-version: '3.11'
24 |
25 | - name: Install uv
26 | uses: astral-sh/setup-uv@v4
27 |
28 | - name: Get current gemini-webapi version
29 | id: current_version
30 | run: |
31 | current_version=$(grep -o 'gemini-webapi>=[0-9]\+\.[0-9]\+\.[0-9]\+' pyproject.toml | sed 's/gemini-webapi>=//')
32 | echo "current=$current_version" >> $GITHUB_OUTPUT
33 | echo "Current version: $current_version"
34 |
35 | - name: Check latest upstream release
36 | id: latest_version
37 | run: |
38 | latest_version=$(curl -s "https://api.github.com/repos/HanaokaYuzu/Gemini-API/releases/latest" | python3 -c "import sys, json; print(json.load(sys.stdin)['tag_name'].lstrip('v'))")
39 | echo "latest=$latest_version" >> $GITHUB_OUTPUT
40 | echo "Latest version: $latest_version"
41 |
42 | - name: Compare versions
43 | id: version_check
44 | run: |
45 | current="${{ steps.current_version.outputs.current }}"
46 | latest="${{ steps.latest_version.outputs.latest }}"
47 |
48 | if [ "$current" != "$latest" ]; then
49 | echo "needs_update=true" >> $GITHUB_OUTPUT
50 | echo "Version update needed: $current -> $latest"
51 | else
52 | echo "needs_update=false" >> $GITHUB_OUTPUT
53 | echo "Already up to date: $current"
54 | fi
55 |
56 | - name: Update pyproject.toml
57 | if: steps.version_check.outputs.needs_update == 'true'
58 | run: |
59 | current="${{ steps.current_version.outputs.current }}"
60 | latest="${{ steps.latest_version.outputs.latest }}"
61 |
62 | # 更新 pyproject.toml 中的版本号
63 | sed -i "s/gemini-webapi>=$current/gemini-webapi>=$latest/g" pyproject.toml
64 |
65 | echo "Updated gemini-webapi version from $current to $latest"
66 |
67 | - name: Update lock file
68 | if: steps.version_check.outputs.needs_update == 'true'
69 | run: |
70 | uv lock --upgrade-package gemini-webapi
71 |
72 | - name: Test installation
73 | if: steps.version_check.outputs.needs_update == 'true'
74 | run: |
75 | uv sync
76 | uv run python -c "import gemini_webapi; print('gemini-webapi imported successfully')"
77 |
78 | - name: Run linting
79 | if: steps.version_check.outputs.needs_update == 'true'
80 | run: |
81 | uv run ruff check .
82 | uv run ruff format --check .
83 |
84 | - name: Create Pull Request
85 | if: steps.version_check.outputs.needs_update == 'true'
86 | uses: peter-evans/create-pull-request@v6
87 | with:
88 | token: ${{ secrets.GITHUB_TOKEN }}
89 | commit-message: |
90 | ✨ feat: 升级上游版本 gemini-webapi 至 v${{ steps.latest_version.outputs.latest }}
91 |
92 | - 自动更新 gemini-webapi 从 v${{ steps.current_version.outputs.current }} 到 v${{ steps.latest_version.outputs.latest }}
93 | - 更新 uv.lock 文件
94 | - 验证安装和代码格式
95 | title: '⬆️ 自动更新上游依赖: gemini-webapi v${{ steps.latest_version.outputs.latest }}'
96 | body: |
97 | ## 🔄 自动上游版本更新
98 |
99 | 此 PR 自动更新了上游依赖版本:
100 |
101 | - **gemini-webapi**: `${{ steps.current_version.outputs.current }}` → `${{ steps.latest_version.outputs.latest }}`
102 | - **上游发布页面**: https://github.com/HanaokaYuzu/Gemini-API/releases/tag/v${{ steps.latest_version.outputs.latest }}
103 |
104 | ### ✅ 自动验证完成
105 |
106 | - [x] 依赖安装测试通过
107 | - [x] 代码格式检查通过
108 | - [x] uv.lock 文件已更新
109 |
110 | ### 📋 手动检查清单
111 |
112 | 在合并此 PR 前,请确认:
113 |
114 | - [ ] 查看上游更改日志,确认无破坏性变更
115 | - [ ] 本地测试 API 功能正常
116 | - [ ] 确认新版本兼容现有功能
117 |
118 | ---
119 |
120 | 🤖 此 PR 由 GitHub Actions 自动生成
121 | branch: auto-update/gemini-webapi-v${{ steps.latest_version.outputs.latest }}
122 | delete-branch: true
123 | draft: false
124 | labels: |
125 | dependencies
126 | enhancement
127 | automated
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import base64
3 | import json
4 | import logging
5 | import os
6 | import re
7 | import tempfile
8 | import time
9 | import uuid
10 | from datetime import datetime, timezone
11 | from typing import Dict, List, Optional, Union
12 |
13 | from fastapi import Depends, FastAPI, Header, HTTPException, Request
14 | from fastapi.middleware.cors import CORSMiddleware
15 | from fastapi.responses import JSONResponse, StreamingResponse
16 | from gemini_webapi import GeminiClient, set_log_level
17 | from gemini_webapi.constants import Model
18 | from pydantic import BaseModel
19 |
20 | # Configure logging
21 | logging.basicConfig(level=logging.INFO)
22 | logger = logging.getLogger(__name__)
23 | set_log_level("INFO")
24 |
25 | app = FastAPI(title="Gemini API FastAPI Server")
26 |
27 | # Add CORS middleware
28 | app.add_middleware(
29 | CORSMiddleware,
30 | allow_origins=["*"],
31 | allow_credentials=True,
32 | allow_methods=["*"],
33 | allow_headers=["*"],
34 | )
35 |
36 | # Global client
37 | gemini_client = None
38 |
39 | # Authentication credentials
40 | SECURE_1PSID = os.environ.get("SECURE_1PSID", "")
41 | SECURE_1PSIDTS = os.environ.get("SECURE_1PSIDTS", "")
42 | API_KEY = os.environ.get("API_KEY", "")
43 | ENABLE_THINKING = os.environ.get("ENABLE_THINKING", "false").lower() == "true"
44 |
45 | # Print debug info at startup
46 | if not SECURE_1PSID or not SECURE_1PSIDTS:
47 | logger.warning("⚠️ Gemini API credentials are not set or empty! Please check your environment variables.")
48 | logger.warning("Make sure SECURE_1PSID and SECURE_1PSIDTS are correctly set in your .env file or environment.")
49 | logger.warning("If using Docker, ensure the .env file is correctly mounted and formatted.")
50 | logger.warning("Example format in .env file (no quotes):")
51 | logger.warning("SECURE_1PSID=your_secure_1psid_value_here")
52 | logger.warning("SECURE_1PSIDTS=your_secure_1psidts_value_here")
53 | else:
54 | # Only log the first few characters for security
55 | logger.info(f"Credentials found. SECURE_1PSID starts with: {SECURE_1PSID[:5]}...")
56 | logger.info(f"Credentials found. SECURE_1PSIDTS starts with: {SECURE_1PSIDTS[:5]}...")
57 |
58 | if not API_KEY:
59 | logger.warning("⚠️ API_KEY is not set or empty! API authentication will not work.")
60 | logger.warning("Make sure API_KEY is correctly set in your .env file or environment.")
61 | else:
62 | logger.info(f"API_KEY found. API_KEY starts with: {API_KEY[:5]}...")
63 |
64 |
65 | def correct_markdown(md_text: str) -> str:
66 | """
67 | 修正Markdown文本,移除Google搜索链接包装器,并根据显示文本简化目标URL。
68 | """
69 |
70 | def simplify_link_target(text_content: str) -> str:
71 | match_colon_num = re.match(r"([^:]+:\d+)", text_content)
72 | if match_colon_num:
73 | return match_colon_num.group(1)
74 | return text_content
75 |
76 | def replacer(match: re.Match) -> str:
77 | outer_open_paren = match.group(1)
78 | display_text = match.group(2)
79 |
80 | new_target_url = simplify_link_target(display_text)
81 | new_link_segment = f"[`{display_text}`]({new_target_url})"
82 |
83 | if outer_open_paren:
84 | return f"{outer_open_paren}{new_link_segment})"
85 | else:
86 | return new_link_segment
87 |
88 | pattern = r"(\()?\[`([^`]+?)`\]\((https://www.google.com/search\?q=)(.*?)(? Model:
208 | """根据模型名称字符串查找匹配的 Model 枚举值"""
209 | # 打印所有可用模型以便调试
210 | all_models = [m.model_name if hasattr(m, "model_name") else str(m) for m in Model]
211 | logger.info(f"Available models: {all_models}")
212 |
213 | # 首先尝试直接查找匹配的模型名称
214 | for m in Model:
215 | model_name = m.model_name if hasattr(m, "model_name") else str(m)
216 | if openai_model_name.lower() in model_name.lower():
217 | return m
218 |
219 | # 如果找不到匹配项,使用默认映射
220 | model_keywords = {
221 | "gemini-pro": ["pro", "2.0"],
222 | "gemini-pro-vision": ["vision", "pro"],
223 | "gemini-flash": ["flash", "2.0"],
224 | "gemini-1.5-pro": ["1.5", "pro"],
225 | "gemini-1.5-flash": ["1.5", "flash"],
226 | }
227 |
228 | # 根据关键词匹配
229 | keywords = model_keywords.get(openai_model_name, ["pro"]) # 默认使用pro模型
230 |
231 | for m in Model:
232 | model_name = m.model_name if hasattr(m, "model_name") else str(m)
233 | if all(kw.lower() in model_name.lower() for kw in keywords):
234 | return m
235 |
236 | # 如果还是找不到,返回第一个模型
237 | return next(iter(Model))
238 |
239 |
240 | # Prepare conversation history from OpenAI messages format
241 | def prepare_conversation(messages: List[Message]) -> tuple:
242 | conversation = ""
243 | temp_files = []
244 |
245 | for msg in messages:
246 | if isinstance(msg.content, str):
247 | # String content handling
248 | if msg.role == "system":
249 | conversation += f"System: {msg.content}\n\n"
250 | elif msg.role == "user":
251 | conversation += f"Human: {msg.content}\n\n"
252 | elif msg.role == "assistant":
253 | conversation += f"Assistant: {msg.content}\n\n"
254 | else:
255 | # Mixed content handling
256 | if msg.role == "user":
257 | conversation += "Human: "
258 | elif msg.role == "system":
259 | conversation += "System: "
260 | elif msg.role == "assistant":
261 | conversation += "Assistant: "
262 |
263 | for item in msg.content:
264 | if item.type == "text":
265 | conversation += item.text or ""
266 | elif item.type == "image_url" and item.image_url:
267 | # Handle image
268 | image_url = item.image_url.get("url", "")
269 | if image_url.startswith("data:image/"):
270 | # Process base64 encoded image
271 | try:
272 | # Extract the base64 part
273 | base64_data = image_url.split(",")[1]
274 | image_data = base64.b64decode(base64_data)
275 |
276 | # Create temporary file to hold the image
277 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
278 | tmp.write(image_data)
279 | temp_files.append(tmp.name)
280 | except Exception as e:
281 | logger.error(f"Error processing base64 image: {str(e)}")
282 |
283 | conversation += "\n\n"
284 |
285 | # Add a final prompt for the assistant to respond to
286 | conversation += "Assistant: "
287 |
288 | return conversation, temp_files
289 |
290 |
291 | # Dependency to get the initialized Gemini client
292 | async def get_gemini_client():
293 | global gemini_client
294 | if gemini_client is None:
295 | try:
296 | gemini_client = GeminiClient(SECURE_1PSID, SECURE_1PSIDTS)
297 | await gemini_client.init(timeout=300)
298 | except Exception as e:
299 | logger.error(f"Failed to initialize Gemini client: {str(e)}")
300 | raise HTTPException(status_code=500, detail=f"Failed to initialize Gemini client: {str(e)}")
301 | return gemini_client
302 |
303 |
304 | @app.post("/v1/chat/completions")
305 | async def create_chat_completion(request: ChatCompletionRequest, api_key: str = Depends(verify_api_key)):
306 | try:
307 | # 确保客户端已初始化
308 | global gemini_client
309 | if gemini_client is None:
310 | gemini_client = GeminiClient(SECURE_1PSID, SECURE_1PSIDTS)
311 | await gemini_client.init(timeout=300)
312 | logger.info("Gemini client initialized successfully")
313 |
314 | # 转换消息为对话格式
315 | conversation, temp_files = prepare_conversation(request.messages)
316 | logger.info(f"Prepared conversation: {conversation}")
317 | logger.info(f"Temp files: {temp_files}")
318 |
319 | # 获取适当的模型
320 | model = map_model_name(request.model)
321 | logger.info(f"Using model: {model}")
322 |
323 | # 生成响应
324 | logger.info("Sending request to Gemini...")
325 | if temp_files:
326 | # With files
327 | response = await gemini_client.generate_content(conversation, files=temp_files, model=model)
328 | else:
329 | # Text only
330 | response = await gemini_client.generate_content(conversation, model=model)
331 |
332 | # 清理临时文件
333 | for temp_file in temp_files:
334 | try:
335 | os.unlink(temp_file)
336 | except Exception as e:
337 | logger.warning(f"Failed to delete temp file {temp_file}: {str(e)}")
338 |
339 | # 提取文本响应
340 | reply_text = ""
341 | # 提取思考内容
342 | if ENABLE_THINKING and hasattr(response, "thoughts"):
343 | reply_text += f"{response.thoughts}"
344 | if hasattr(response, "text"):
345 | reply_text += response.text
346 | else:
347 | reply_text += str(response)
348 | reply_text = reply_text.replace("<", "<").replace("\\<", "<").replace("\\_", "_").replace("\\>", ">")
349 | reply_text = correct_markdown(reply_text)
350 |
351 | logger.info(f"Response: {reply_text}")
352 |
353 | if not reply_text or reply_text.strip() == "":
354 | logger.warning("Empty response received from Gemini")
355 | reply_text = "服务器返回了空响应。请检查 Gemini API 凭据是否有效。"
356 |
357 | # 创建响应对象
358 | completion_id = f"chatcmpl-{uuid.uuid4()}"
359 | created_time = int(time.time())
360 |
361 | # 检查客户端是否请求流式响应
362 | if request.stream:
363 | # 实现流式响应
364 | async def generate_stream():
365 | # 创建 SSE 格式的流式响应
366 | # 先发送开始事件
367 | data = {
368 | "id": completion_id,
369 | "object": "chat.completion.chunk",
370 | "created": created_time,
371 | "model": request.model,
372 | "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}],
373 | }
374 | yield f"data: {json.dumps(data)}\n\n"
375 |
376 | # 模拟流式输出 - 将文本按字符分割发送
377 | for char in reply_text:
378 | data = {
379 | "id": completion_id,
380 | "object": "chat.completion.chunk",
381 | "created": created_time,
382 | "model": request.model,
383 | "choices": [{"index": 0, "delta": {"content": char}, "finish_reason": None}],
384 | }
385 | yield f"data: {json.dumps(data)}\n\n"
386 | # 可选:添加短暂延迟以模拟真实的流式输出
387 | await asyncio.sleep(0.01)
388 |
389 | # 发送结束事件
390 | data = {
391 | "id": completion_id,
392 | "object": "chat.completion.chunk",
393 | "created": created_time,
394 | "model": request.model,
395 | "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
396 | }
397 | yield f"data: {json.dumps(data)}\n\n"
398 | yield "data: [DONE]\n\n"
399 |
400 | return StreamingResponse(generate_stream(), media_type="text/event-stream")
401 | else:
402 | # 非流式响应(原来的逻辑)
403 | result = {
404 | "id": completion_id,
405 | "object": "chat.completion",
406 | "created": created_time,
407 | "model": request.model,
408 | "choices": [{"index": 0, "message": {"role": "assistant", "content": reply_text}, "finish_reason": "stop"}],
409 | "usage": {
410 | "prompt_tokens": len(conversation.split()),
411 | "completion_tokens": len(reply_text.split()),
412 | "total_tokens": len(conversation.split()) + len(reply_text.split()),
413 | },
414 | }
415 |
416 | logger.info(f"Returning response: {result}")
417 | return result
418 |
419 | except Exception as e:
420 | logger.error(f"Error generating completion: {str(e)}", exc_info=True)
421 | raise HTTPException(status_code=500, detail=f"Error generating completion: {str(e)}")
422 |
423 |
424 | @app.get("/")
425 | async def root():
426 | return {"status": "online", "message": "Gemini API FastAPI Server is running"}
427 |
428 |
429 | if __name__ == "__main__":
430 | import uvicorn
431 |
432 | uvicorn.run("main:app", host="0.0.0.0", port=8000, log_level="info")
433 |
--------------------------------------------------------------------------------