├── .python-version
├── app
├── meeting
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── agent_factory.py
│ │ └── human_agent.py
│ ├── utils
│ │ ├── __init__.py
│ │ └── summary_generator.py
│ ├── meeting_modes
│ │ ├── __init__.py
│ │ ├── base_mode.py
│ │ ├── role_playing.py
│ │ ├── brainstorming.py
│ │ ├── swot_analysis.py
│ │ ├── six_thinking_hats.py
│ │ ├── discussion.py
│ │ └── debate.py
│ ├── logger
│ │ └── meeting_logger.py
│ └── meeting
│ │ └── meeting.py
├── static
│ ├── favicon.ico
│ ├── css
│ │ └── custom-speaking-order.css
│ ├── js
│ │ ├── thinking.js
│ │ └── custom-speaking-order.js
│ ├── login.js
│ └── login.html
├── models
│ ├── __init__.py
│ ├── schemas.py
│ ├── database.py
│ └── multi_step_collaboration.py
├── routes
│ ├── __init__.py
│ ├── auth.py
│ ├── api_key.py
│ ├── model.py
│ └── configuration.py
├── clients
│ ├── __init__.py
│ ├── base_client.py
│ ├── openai_client.py
│ ├── deepseek_client.py
│ ├── grok3_client.py
│ ├── claude_client.py
│ └── gemini_client.py
├── migration
│ ├── add_topic_column.py
│ └── add_summary_columns.py
├── meeting_modes
│ └── base_mode.py
├── routers
│ ├── models.py
│ ├── roles.py
│ ├── discussion_groups.py
│ └── discussions.py
└── utils
│ ├── logger.py
│ └── auth.py
├── .dockerignore
├── docker-compose.yml
├── .gitignore
├── Dockerfile
├── .env.example
├── pyproject.toml
├── migrate.py
├── alembic
├── script.py.mako
├── versions
│ ├── xxxx_add_custom_parameters.py
│ ├── xxxx_update_custom_parameters.py
│ ├── 7b27956007a6_initial.py
│ └── xxxx_update_model_defaults.py
└── env.py
├── alembic.ini
├── README_zh.md
└── README.md
/.python-version:
--------------------------------------------------------------------------------
1 | 3.11
2 |
--------------------------------------------------------------------------------
/app/meeting/__init__.py:
--------------------------------------------------------------------------------
1 | # 会议模块初始化
--------------------------------------------------------------------------------
/app/meeting/agents/__init__.py:
--------------------------------------------------------------------------------
1 | # 智能体模块初始化
--------------------------------------------------------------------------------
/app/meeting/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # 工具模块初始化
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/__init__.py:
--------------------------------------------------------------------------------
1 | # 会议模式模块初始化
2 |
--------------------------------------------------------------------------------
/app/static/favicon.ico:
--------------------------------------------------------------------------------
1 | [二进制文件内容 - 这是一个 16x16 和 32x32 像素的 ICO 格式图标文件]
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | .gitignore
3 | .env
4 | *.db
5 | __pycache__
6 | *.pyc
7 | .pytest_cache
8 | .coverage
9 | htmlcov
10 | .venv
11 | venv
--------------------------------------------------------------------------------
/app/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .collaboration import ModelCollaboration
2 | from .multi_step_collaboration import MultiStepModelCollaboration
3 |
4 | __all__ = ['ModelCollaboration', 'MultiStepModelCollaboration']
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | deepgemini:
5 | image: bradleylzh/deepgemini:latest
6 | ports:
7 | - "8000:8000"
8 | volumes:
9 | - ./.env:/app/.env
10 | - ./deepgemini.db:/app/deepgemini.db
11 | restart: unless-stopped
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 | *.db
9 | # Environment
10 | .env
11 | .env.*
12 | !.env.example
13 | scripts/
14 | # Virtual environments
15 | .venv
16 | logs/
17 | logger/
18 | backups/
19 | data/
20 |
21 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # 使用 Python 3.11 作为基础镜像
2 | FROM python:3.11-slim
3 |
4 | # 设置工作目录
5 | WORKDIR /app
6 |
7 | # 安装 uv
8 | RUN pip install uv
9 |
10 | # 复制项目文件
11 | COPY . .
12 |
13 | # 使用 uv 安装依赖
14 | RUN uv sync
15 |
16 | # 暴露端口
17 | EXPOSE 8000
18 |
19 | # 启动命令
20 | CMD ["uv", "run", "-m", "app.main"]
--------------------------------------------------------------------------------
/app/routes/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import router as model_router
2 | from .configuration import router as configuration_router
3 | from .api_key import router as api_key_router
4 | from .auth import router as auth_router
5 |
6 | __all__ = ['model_router', 'configuration_router', 'api_key_router', 'auth_router']
--------------------------------------------------------------------------------
/app/clients/__init__.py:
--------------------------------------------------------------------------------
1 | """客户端模块"""
2 | from .base_client import BaseClient
3 | from .claude_client import ClaudeClient
4 | from .deepseek_client import DeepSeekClient
5 | from .gemini_client import GeminiClient
6 | from .openai_client import OpenAIClient
7 | from .grok3_client import Grok3Client
8 |
9 | __all__ = [
10 | 'BaseClient',
11 | 'ClaudeClient',
12 | 'DeepSeekClient',
13 | 'GeminiClient',
14 | 'OpenAIClient',
15 | 'Grok3Client'
16 | ]
17 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | # 客户端请求时允许通过请求的 API KEY,无需在当前环境变量当中手动添加 Bearer
2 | ALLOW_API_KEY=[{"id":1,"key":"sk-api-xxxxxxxxxxxxxxxx","description":"Default API Key"}]
3 |
4 | # 服务端跨域配置
5 | # 允许访问的域名,多个域名使用逗号分隔(中间不能有空格),例如:http://localhost:3000,https://chat.example.com
6 | # 如果允许所有域名访问,则填写 *
7 | ALLOW_ORIGINS=*
8 |
9 | # 日志配置
10 | # 可选值:DEBUG, INFO, WARNING, ERROR, CRITICAL
11 | LOG_LEVEL=INFO
12 |
13 | # 管理员账户配置
14 | ADMIN_USERNAME=admin
15 | ADMIN_PASSWORD=admin123
16 |
17 | # JWT密钥
18 | JWT_SECRET=your-secret-key
19 |
20 |
21 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "deepgemini"
3 | version = "0.1.2"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.11"
7 | dependencies = [
8 | "aiohttp>=3.11.12",
9 | "colorlog>=6.9.0",
10 | "fastapi>=0.115.8",
11 | "httpx>=0.28.1",
12 | "langchain>=0.3.20",
13 | "langchain-openai>=0.3.8",
14 | "passlib>=1.7.4",
15 | "pyjwt>=2.10.1",
16 | "python-dotenv>=1.0.1",
17 | "sqlalchemy>=2.0.38",
18 | "uvicorn>=0.34.0",
19 | ]
20 |
--------------------------------------------------------------------------------
/migrate.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from alembic.config import Config
4 | from alembic import command
5 |
6 | def run_migrations():
7 | # 获取当前目录
8 | current_dir = os.path.dirname(os.path.abspath(__file__))
9 |
10 | # 创建 Alembic 配置
11 | alembic_cfg = Config(os.path.join(current_dir, "alembic.ini"))
12 |
13 | try:
14 | # 运行迁移
15 | command.upgrade(alembic_cfg, "head")
16 | print("数据库迁移成功完成!")
17 | except Exception as e:
18 | print(f"迁移过程中发生错误: {e}")
19 | sys.exit(1)
20 |
21 | if __name__ == "__main__":
22 | run_migrations()
--------------------------------------------------------------------------------
/alembic/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | ${imports if imports else ""}
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = ${repr(up_revision)}
16 | down_revision: Union[str, None] = ${repr(down_revision)}
17 | branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
18 | depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
19 |
20 |
21 | def upgrade() -> None:
22 | ${upgrades if upgrades else "pass"}
23 |
24 |
25 | def downgrade() -> None:
26 | ${downgrades if downgrades else "pass"}
--------------------------------------------------------------------------------
/app/migration/add_topic_column.py:
--------------------------------------------------------------------------------
1 | # 创建迁移脚本来添加缺失的列
2 |
3 | from sqlalchemy import create_engine, text
4 | from sqlalchemy.orm import sessionmaker
5 | import os
6 |
7 | # 获取数据库URL
8 | DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///./deepgemini.db")
9 |
10 | # 创建SQLAlchemy引擎和会话
11 | engine = create_engine(DATABASE_URL)
12 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
13 |
14 | # 创建会话
15 | db = SessionLocal()
16 |
17 | # 执行原始SQL命令添加列
18 | try:
19 | # 使用text()函数包装SQL语句
20 | sql = text("ALTER TABLE discussion_groups ADD COLUMN topic TEXT")
21 | db.execute(sql)
22 | db.commit()
23 | print("成功添加topic列到discussion_groups表")
24 | except Exception as e:
25 | db.rollback()
26 | print(f"添加列时出错: {str(e)}")
27 | finally:
28 | db.close()
--------------------------------------------------------------------------------
/alembic/versions/xxxx_add_custom_parameters.py:
--------------------------------------------------------------------------------
1 | """add custom parameters
2 |
3 | Revision ID: xxxx
4 | Revises: previous_revision
5 | Create Date: 2024-03-21 xx:xx:xx.xxxxxx
6 | """
7 | from alembic import op
8 | import sqlalchemy as sa
9 | from sqlalchemy.dialects import sqlite
10 |
11 | revision = 'xxxx'
12 | down_revision = 'previous_revision'
13 | branch_labels = None
14 | depends_on = None
15 |
16 | def upgrade() -> None:
17 | with op.batch_alter_table('models') as batch_op:
18 | batch_op.add_column(sa.Column('custom_parameters', sa.JSON(), nullable=True))
19 |
20 | # 为现有记录设置默认值
21 | op.execute("UPDATE models SET custom_parameters = '{}' WHERE custom_parameters IS NULL")
22 |
23 | def downgrade() -> None:
24 | with op.batch_alter_table('models') as batch_op:
25 | batch_op.drop_column('custom_parameters')
--------------------------------------------------------------------------------
/alembic.ini:
--------------------------------------------------------------------------------
1 | [alembic]
2 | # path to migration scripts
3 | script_location = alembic
4 |
5 | # SQLite database URL
6 | sqlalchemy.url = sqlite:///./deepgemini.db
7 |
8 | # Add Python path
9 | prepend_sys_path = .
10 |
11 | # template used to generate migration files
12 | # file_template = %%(rev)s_%%(slug)s
13 |
14 | [loggers]
15 | keys = root,sqlalchemy,alembic
16 |
17 | [handlers]
18 | keys = console
19 |
20 | [formatters]
21 | keys = generic
22 |
23 | [logger_root]
24 | level = WARN
25 | handlers = console
26 | qualname =
27 |
28 | [logger_sqlalchemy]
29 | level = WARN
30 | handlers =
31 | qualname = sqlalchemy.engine
32 |
33 | [logger_alembic]
34 | level = INFO
35 | handlers =
36 | qualname = alembic
37 |
38 | [handler_console]
39 | class = StreamHandler
40 | args = (sys.stderr,)
41 | level = NOTSET
42 | formatter = generic
43 |
44 | [formatter_generic]
45 | format = %(levelname)-5.5s [%(name)s] %(message)s
46 | datefmt = %H:%M:%S
--------------------------------------------------------------------------------
/app/migration/add_summary_columns.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import create_engine, text
2 | from sqlalchemy.orm import sessionmaker
3 | import os
4 |
5 | # 获取数据库URL
6 | DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///./deepgemini.db")
7 |
8 | # 创建SQLAlchemy引擎和会话
9 | engine = create_engine(DATABASE_URL)
10 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
11 |
12 | # 创建会话
13 | db = SessionLocal()
14 |
15 | # 执行原始SQL命令添加列
16 | try:
17 | # 使用text()函数包装SQL语句
18 | statements = [
19 | text("ALTER TABLE discussion_groups ADD COLUMN summary_model_id INTEGER REFERENCES models(id)"),
20 | text("ALTER TABLE discussion_groups ADD COLUMN summary_prompt TEXT")
21 | ]
22 |
23 | for sql in statements:
24 | try:
25 | db.execute(sql)
26 | print(f"成功执行: {sql}")
27 | except Exception as e:
28 | print(f"执行 {sql} 时出错: {str(e)}")
29 |
30 | db.commit()
31 | print("成功添加总结相关列到discussion_groups表")
32 | except Exception as e:
33 | db.rollback()
34 | print(f"添加列时出错: {str(e)}")
35 | finally:
36 | db.close()
--------------------------------------------------------------------------------
/alembic/versions/xxxx_update_custom_parameters.py:
--------------------------------------------------------------------------------
1 | """update custom parameters default
2 |
3 | Revision ID: xxxx
4 | Revises: previous_revision
5 | Create Date: 2024-03-21 xx:xx:xx.xxxxxx
6 | """
7 | from alembic import op
8 | import sqlalchemy as sa
9 | from sqlalchemy.dialects import sqlite
10 |
11 | revision = 'xxxx'
12 | down_revision = 'previous_revision'
13 | branch_labels = None
14 | depends_on = None
15 |
16 | def upgrade() -> None:
17 | # SQLite 不支持修改列,所以需要重建表
18 | with op.batch_alter_table('models') as batch_op:
19 | batch_op.alter_column('custom_parameters',
20 | existing_type=sa.JSON(),
21 | nullable=False,
22 | server_default='{}')
23 |
24 | # 更新现有记录
25 | op.execute("UPDATE models SET custom_parameters = '{}' WHERE custom_parameters IS NULL")
26 |
27 | def downgrade() -> None:
28 | with op.batch_alter_table('models') as batch_op:
29 | batch_op.alter_column('custom_parameters',
30 | existing_type=sa.JSON(),
31 | nullable=True,
32 | server_default=None)
--------------------------------------------------------------------------------
/app/meeting_modes/base_mode.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 |
3 | class BaseMeetingMode:
4 | """会议模式基类"""
5 |
6 | def __init__(self, name: str, description: str):
7 | self.name = name
8 | self.description = description
9 | self.max_rounds = 3 # 默认最大轮数
10 |
11 | def get_agent_prompt(self, agent_name: str, agent_role: str,
12 | meeting_topic: str, current_round: int) -> str:
13 | """获取智能体提示"""
14 | raise NotImplementedError("子类必须实现此方法")
15 |
16 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
17 | current_round: int) -> List[str]:
18 | """确定发言顺序"""
19 | raise NotImplementedError("子类必须实现此方法")
20 |
21 | def should_end_meeting(self, rounds_completed: int,
22 | meeting_history: List[Dict[str, Any]]) -> bool:
23 | """判断会议是否应该结束"""
24 | raise NotImplementedError("子类必须实现此方法")
25 |
26 | def summarize_meeting(self, meeting_topic: str,
27 | meeting_history: List[Dict[str, Any]]) -> str:
28 | """汇总会议结果"""
29 | raise NotImplementedError("子类必须实现此方法")
30 |
31 | def get_summary_prompt_template(self) -> str:
32 | """获取总结提示模板"""
33 | return """
34 | 你是一个会议总结专家。请对以下关于"{meeting_topic}"的会议进行总结。
35 | 会议记录如下:
36 |
37 | {history_text}
38 |
39 | 请提供以下内容:
40 | 1. 讨论的主要主题和观点概述
41 | 2. 讨论中达成的主要共识
42 | 3. 存在的主要分歧或不同视角
43 | 4. 提出的解决方案或行动建议
44 | 5. 可能需要进一步讨论或研究的问题
45 |
46 | 请以清晰、结构化的方式呈现总结。
47 | """
--------------------------------------------------------------------------------
/alembic/versions/7b27956007a6_initial.py:
--------------------------------------------------------------------------------
1 | """initial
2 |
3 | Revision ID: 7b27956007a6
4 | Revises:
5 | Create Date: 2025-02-27 18:20:11.715233
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = '7b27956007a6'
16 | down_revision: Union[str, None] = None
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | # ### commands auto generated by Alembic - please adjust! ###
23 | with op.batch_alter_table('models', schema=None) as batch_op:
24 | batch_op.add_column(sa.Column('enable_tools', sa.Boolean(), server_default='0', nullable=False))
25 | batch_op.add_column(sa.Column('tools', sa.JSON(), nullable=True))
26 | batch_op.add_column(sa.Column('tool_choice', sa.JSON(), nullable=True))
27 | batch_op.add_column(sa.Column('enable_thinking', sa.Boolean(), server_default='0', nullable=False))
28 | batch_op.add_column(sa.Column('thinking_budget_tokens', sa.Integer(), server_default='16000', nullable=False))
29 |
30 | # ### end Alembic commands ###
31 |
32 |
33 | def downgrade() -> None:
34 | # ### commands auto generated by Alembic - please adjust! ###
35 | with op.batch_alter_table('models', schema=None) as batch_op:
36 | batch_op.drop_column('thinking_budget_tokens')
37 | batch_op.drop_column('enable_thinking')
38 | batch_op.drop_column('tool_choice')
39 | batch_op.drop_column('tools')
40 | batch_op.drop_column('enable_tools')
41 |
42 | # ### end Alembic commands ###
--------------------------------------------------------------------------------
/app/routes/auth.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends
2 | from pydantic import BaseModel
3 | import os
4 | from app.utils.auth import create_access_token, verify_token, update_admin_credentials
5 | from typing import Optional
6 |
7 | router = APIRouter()
8 |
9 | class LoginRequest(BaseModel):
10 | username: str
11 | password: str
12 |
13 | class CredentialsUpdate(BaseModel):
14 | current_password: str
15 | new_username: Optional[str] = None
16 | new_password: Optional[str] = None
17 |
18 | @router.post("/login")
19 | async def login(request: LoginRequest):
20 | # 验证用户名和密码
21 | if (request.username == os.getenv("ADMIN_USERNAME") and
22 | request.password == os.getenv("ADMIN_PASSWORD")):
23 | access_token = create_access_token(data={"sub": request.username})
24 | return {"access_token": access_token, "token_type": "bearer"}
25 | raise HTTPException(status_code=401, detail="Incorrect username or password")
26 |
27 | @router.post("/update-credentials")
28 | async def update_credentials(
29 | request: CredentialsUpdate,
30 | username: str = Depends(verify_token)
31 | ):
32 | # 验证当前密码
33 | if request.current_password != os.getenv("ADMIN_PASSWORD"):
34 | raise HTTPException(status_code=401, detail="Current password is incorrect")
35 |
36 | # 更新凭据
37 | new_username = request.new_username or os.getenv("ADMIN_USERNAME")
38 | new_password = request.new_password or os.getenv("ADMIN_PASSWORD")
39 |
40 | try:
41 | update_admin_credentials(new_username, new_password)
42 | return {"message": "Credentials updated successfully"}
43 | except Exception as e:
44 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/app/routers/models.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, Depends, HTTPException, Body
2 | from sqlalchemy.orm import Session
3 | from typing import List, Dict, Any, Optional
4 | import logging
5 |
6 | from app.models.database import get_db, Model, Role, DiscussionGroup
7 | from app.models.schemas import Model as ModelSchema, ModelCreate
8 | from app.processors.role_processor import RoleProcessor
9 | from app.processors.discussion_processor import DiscussionProcessor
10 |
11 | router = APIRouter(
12 | prefix="/models",
13 | tags=["models"],
14 | responses={404: {"description": "Not found"}},
15 | )
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 | @router.get("/", response_model=List[Dict[str, Any]])
20 | def get_all_models(db: Session = Depends(get_db)):
21 | """获取所有模型,包括角色和讨论组"""
22 | # 获取常规模型
23 | models = db.query(Model).all()
24 | result = []
25 |
26 | for model in models:
27 | result.append({
28 | "id": model.id,
29 | "name": model.name,
30 | "type": model.type,
31 | "provider": model.provider,
32 | "model_type": "model" # 标记为常规模型
33 | })
34 |
35 | # 获取角色
36 | roles = db.query(Role).all()
37 | for role in roles:
38 | result.append({
39 | "id": f"role_{role.id}", # 添加前缀以区分
40 | "name": f"角色: {role.name}",
41 | "type": "both", # 角色可以用于推理和执行
42 | "provider": "deepgemini",
43 | "model_type": "role" # 标记为角色
44 | })
45 |
46 | # 获取讨论组
47 | groups = db.query(DiscussionGroup).all()
48 | for group in groups:
49 | result.append({
50 | "id": f"group_{group.id}", # 添加前缀以区分
51 | "name": f"讨论组: {group.name}",
52 | "type": "both", # 讨论组可以用于推理和执行
53 | "provider": "deepgemini",
54 | "model_type": "discussion_group" # 标记为讨论组
55 | })
56 |
57 | return result
58 |
59 | # 其他现有的模型路由...
--------------------------------------------------------------------------------
/app/utils/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import colorlog
3 | import sys
4 | import os
5 | from dotenv import load_dotenv
6 | from datetime import datetime
7 |
8 | # 确保环境变量被加载
9 | load_dotenv()
10 |
11 | def get_log_level() -> int:
12 | """从环境变量获取日志级别
13 |
14 | Returns:
15 | int: logging 模块定义的日志级别
16 | """
17 | level_map = {
18 | 'DEBUG': logging.DEBUG,
19 | 'INFO': logging.INFO,
20 | 'WARNING': logging.WARNING,
21 | 'ERROR': logging.ERROR,
22 | 'CRITICAL': logging.CRITICAL
23 | }
24 |
25 | level = os.getenv('LOG_LEVEL', 'INFO').upper()
26 | return level_map.get(level, logging.INFO)
27 |
28 | def setup_logger(name: str = "DeepGemini") -> logging.Logger:
29 | """设置一个彩色的logger
30 |
31 | Args:
32 | name (str, optional): logger的名称. Defaults to "DeepClaude".
33 |
34 | Returns:
35 | logging.Logger: 配置好的logger实例
36 | """
37 | # 获取logger实例
38 | logger = colorlog.getLogger(name)
39 |
40 | # 清除现有处理器,避免重复
41 | if logger.handlers:
42 | for handler in logger.handlers:
43 | logger.removeHandler(handler)
44 |
45 | # 从环境变量获取日志级别
46 | log_level = get_log_level()
47 |
48 | # 设置日志级别
49 | logger.setLevel(log_level)
50 |
51 | # 创建控制台处理器
52 | console_handler = logging.StreamHandler(sys.stdout)
53 | console_handler.setLevel(log_level)
54 |
55 | # 设置彩色日志格式
56 | formatter = colorlog.ColoredFormatter(
57 | "%(log_color)s%(asctime)s - %(name)s - %(levelname)s - %(message)s",
58 | datefmt="%Y-%m-%d %H:%M:%S",
59 | log_colors={
60 | 'DEBUG': 'cyan',
61 | 'INFO': 'green',
62 | 'WARNING': 'yellow',
63 | 'ERROR': 'red',
64 | 'CRITICAL': 'red,bg_white',
65 | }
66 | )
67 |
68 | console_handler.setFormatter(formatter)
69 | logger.addHandler(console_handler)
70 |
71 | # 防止日志传播到父logger,这可以减少重复日志
72 | logger.propagate = False
73 |
74 | return logger
75 |
76 | # 创建一个默认的logger实例
77 | logger = setup_logger()
78 |
--------------------------------------------------------------------------------
/alembic/versions/xxxx_update_model_defaults.py:
--------------------------------------------------------------------------------
1 | """update model defaults
2 |
3 | Revision ID: xxxx
4 | Revises: previous_revision
5 | Create Date: 2024-03-21 xx:xx:xx.xxxxxx
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 |
13 | # revision identifiers, used by Alembic.
14 | revision: str = 'new_revision'
15 | down_revision: Union[str, None] = '7b27956007a6'
16 | branch_labels: Union[str, Sequence[str], None] = None
17 | depends_on: Union[str, Sequence[str], None] = None
18 |
19 | def upgrade() -> None:
20 | # 更新现有记录的默认值
21 | with op.batch_alter_table('models') as batch_op:
22 | # 先设置现有记录的默认值
23 | op.execute("UPDATE models SET enable_tools = '0' WHERE enable_tools IS NULL")
24 | op.execute("UPDATE models SET enable_thinking = '0' WHERE enable_thinking IS NULL")
25 | op.execute("UPDATE models SET thinking_budget_tokens = 16000 WHERE thinking_budget_tokens IS NULL")
26 |
27 | # 修改列定义
28 | batch_op.alter_column('enable_tools',
29 | existing_type=sa.Boolean(),
30 | nullable=False,
31 | server_default='0'
32 | )
33 | batch_op.alter_column('enable_thinking',
34 | existing_type=sa.Boolean(),
35 | nullable=False,
36 | server_default='0'
37 | )
38 | batch_op.alter_column('thinking_budget_tokens',
39 | existing_type=sa.Integer(),
40 | nullable=False,
41 | server_default='16000'
42 | )
43 |
44 | def downgrade() -> None:
45 | with op.batch_alter_table('models') as batch_op:
46 | batch_op.alter_column('enable_tools',
47 | existing_type=sa.Boolean(),
48 | nullable=True,
49 | server_default=None
50 | )
51 | batch_op.alter_column('enable_thinking',
52 | existing_type=sa.Boolean(),
53 | nullable=True,
54 | server_default=None
55 | )
56 | batch_op.alter_column('thinking_budget_tokens',
57 | existing_type=sa.Integer(),
58 | nullable=True,
59 | server_default=None
60 | )
--------------------------------------------------------------------------------
/alembic/env.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from logging.config import fileConfig
4 | import codecs
5 |
6 | from sqlalchemy import engine_from_config
7 | from sqlalchemy import pool
8 |
9 | from alembic import context
10 |
11 | # Add project root directory to Python path
12 | sys.path.append(os.path.dirname(os.path.dirname(__file__)))
13 |
14 | # Set default encoding to UTF-8
15 | if sys.getdefaultencoding() != 'utf-8':
16 | reload(sys)
17 | sys.setdefaultencoding('utf-8')
18 |
19 | # this is the Alembic Config object, which provides
20 | # access to the values within the .ini file in use.
21 | config = context.config
22 |
23 | # Interpret the config file for Python logging.
24 | # This line sets up loggers basically.
25 | if config.config_file_name is not None:
26 | fileConfig(config.config_file_name)
27 |
28 | # 只导入必要的数据库模型,避免导入其他模块
29 | import importlib.util
30 | spec = importlib.util.spec_from_file_location(
31 | "database",
32 | os.path.join(os.path.dirname(os.path.dirname(__file__)), "app", "models", "database.py")
33 | )
34 | database = importlib.util.module_from_spec(spec)
35 | spec.loader.exec_module(database)
36 |
37 | target_metadata = database.Base.metadata
38 | DATABASE_URL = database.DATABASE_URL
39 |
40 | # other values from the config, defined by the needs of env.py,
41 | # can be acquired:
42 | # my_important_option = config.get_main_option("my_important_option")
43 | # ... etc.
44 |
45 | def run_migrations_offline() -> None:
46 | """Run migrations in 'offline' mode."""
47 | url = DATABASE_URL
48 | context.configure(
49 | url=url,
50 | target_metadata=target_metadata,
51 | literal_binds=True,
52 | dialect_opts={"paramstyle": "named"},
53 | )
54 |
55 | with context.begin_transaction():
56 | context.run_migrations()
57 |
58 | def run_migrations_online() -> None:
59 | """Run migrations in 'online' mode."""
60 | configuration = config.get_section(config.config_ini_section)
61 | configuration["sqlalchemy.url"] = DATABASE_URL
62 |
63 | connectable = engine_from_config(
64 | configuration,
65 | prefix="sqlalchemy.",
66 | poolclass=pool.NullPool,
67 | )
68 |
69 | with connectable.connect() as connection:
70 | context.configure(
71 | connection=connection,
72 | target_metadata=target_metadata,
73 | # 添加这些选项以支持 SQLite
74 | render_as_batch=True,
75 | compare_type=True
76 | )
77 |
78 | with context.begin_transaction():
79 | context.run_migrations()
80 |
81 | if context.is_offline_mode():
82 | run_migrations_offline()
83 | else:
84 | run_migrations_online()
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/base_mode.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import List, Dict, Any
3 | from app.meeting.utils.summary_generator import SummaryGenerator
4 |
5 | class BaseMeetingMode(ABC):
6 | """会议模式基类"""
7 |
8 | def __init__(self, name: str, description: str, max_rounds: int = 3):
9 | self.name = name
10 | self.description = description
11 | self.max_rounds = max_rounds # 默认最大轮数,但允许在初始化时设置
12 | self.custom_speaking_order = None # 自定义发言顺序
13 |
14 | def set_max_rounds(self, max_rounds: int):
15 | """设置最大轮数"""
16 | if max_rounds > 0:
17 | self.max_rounds = max_rounds
18 | return True
19 | return False
20 |
21 | def set_custom_speaking_order(self, custom_speaking_order: List[str]):
22 | """设置自定义发言顺序"""
23 | if custom_speaking_order and isinstance(custom_speaking_order, list):
24 | self.custom_speaking_order = custom_speaking_order
25 | return True
26 | return False
27 |
28 | @abstractmethod
29 | def get_agent_prompt(self, agent_name: str, agent_role: str,
30 | meeting_topic: str, current_round: int) -> str:
31 | """获取智能体提示"""
32 | pass
33 |
34 | @abstractmethod
35 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
36 | current_round: int) -> List[str]:
37 | """确定发言顺序"""
38 | pass
39 |
40 | @abstractmethod
41 | def should_end_meeting(self, rounds_completed: int,
42 | meeting_history: List[Dict[str, Any]]) -> bool:
43 | """
44 | 判断会议是否应该结束
45 | 基类实现始终返回False,让Meeting类负责基于max_rounds终止会议
46 | """
47 | # 让Meeting类基于self.max_rounds决定会议结束
48 | return False
49 |
50 | def get_summary_prompt_template(self) -> str:
51 | """获取总结提示模板"""
52 | return """
53 | 你是一个会议总结专家。请对以下关于"{meeting_topic}"的会议进行总结。
54 | 会议记录如下:
55 |
56 | {history_text}
57 |
58 | 请提供以下内容:
59 | 1. 讨论的主要主题和观点概述
60 | 2. 讨论中达成的主要共识
61 | 3. 存在的主要分歧或不同视角
62 | 4. 提出的解决方案或行动建议
63 | 5. 可能需要进一步讨论或研究的问题
64 |
65 | 请以清晰、结构化的方式呈现总结。
66 | """
67 |
68 | def summarize_meeting(self, meeting_topic: str,
69 | meeting_history: List[Dict[str, Any]]) -> str:
70 | """汇总会议结果"""
71 | # 获取当前模式的总结提示模板
72 | prompt_template = self.get_summary_prompt_template()
73 |
74 | # 使用总结生成器生成总结
75 | return SummaryGenerator.generate_summary(
76 | meeting_topic=meeting_topic,
77 | meeting_history=meeting_history,
78 | summary_prompt_template=prompt_template
79 | )
--------------------------------------------------------------------------------
/app/static/css/custom-speaking-order.css:
--------------------------------------------------------------------------------
1 | /* 自定义发言顺序样式 */
2 | .custom-speaking-order-container {
3 | margin: 15px 0;
4 | }
5 |
6 | .sortable-list {
7 | border: 1px solid #e0e0e0;
8 | border-radius: 5px;
9 | padding: 10px;
10 | background-color: #f9f9f9;
11 | min-height: 50px;
12 | }
13 |
14 | .role-item {
15 | display: flex;
16 | align-items: center;
17 | padding: 8px 12px;
18 | margin-bottom: 8px;
19 | background-color: white;
20 | border: 1px solid #e0e0e0;
21 | border-radius: 4px;
22 | cursor: grab;
23 | transition: all 0.2s ease;
24 | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
25 | }
26 |
27 | .role-item:last-child {
28 | margin-bottom: 0;
29 | }
30 |
31 | .role-item .handle {
32 | margin-right: 10px;
33 | color: #999;
34 | cursor: grab;
35 | }
36 |
37 | .role-item .role-name {
38 | flex-grow: 1;
39 | font-weight: 500;
40 | }
41 |
42 | /* 拖拽状态样式 */
43 | .role-item.dragging {
44 | opacity: 0.5;
45 | background-color: #f0f0f0;
46 | }
47 |
48 | /* 目标元素样式 */
49 | .role-item.drag-over {
50 | border: 2px dashed #3498db;
51 | background-color: #ecf0f1;
52 | }
53 |
54 | /* 交换动画效果 */
55 | .role-item.swapped {
56 | animation: swap-highlight 0.3s ease;
57 | }
58 |
59 | @keyframes swap-highlight {
60 | 0% {
61 | background-color: #fff;
62 | transform: scale(1);
63 | }
64 | 50% {
65 | background-color: #e3f2fd;
66 | transform: scale(1.05);
67 | }
68 | 100% {
69 | background-color: #fff;
70 | transform: scale(1);
71 | }
72 | }
73 |
74 | /* 暗色模式样式 */
75 | .dark-theme .sortable-list {
76 | border-color: #404040;
77 | background-color: #252525;
78 | }
79 |
80 | .dark-theme .role-item {
81 | background-color: #2d2d2d;
82 | border-color: #404040;
83 | color: #ecf0f1;
84 | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.3);
85 | }
86 |
87 | .dark-theme .role-item .handle {
88 | color: #aaa;
89 | }
90 |
91 | .dark-theme .role-item.dragging {
92 | opacity: 0.5;
93 | background-color: #3d3d3d;
94 | }
95 |
96 | .dark-theme .role-item.drag-over {
97 | border: 2px dashed #3498db;
98 | background-color: #353535;
99 | }
100 |
101 | .dark-theme .role-item.swapped {
102 | animation: dark-swap-highlight 0.3s ease;
103 | }
104 |
105 | @keyframes dark-swap-highlight {
106 | 0% {
107 | background-color: #2d2d2d;
108 | transform: scale(1);
109 | }
110 | 50% {
111 | background-color: #1a4971;
112 | transform: scale(1.05);
113 | }
114 | 100% {
115 | background-color: #2d2d2d;
116 | transform: scale(1);
117 | }
118 | }
119 |
--------------------------------------------------------------------------------
/app/routes/api_key.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pydantic import BaseModel
3 | import os
4 | from typing import List, Optional
5 | from dotenv import load_dotenv
6 | import json
7 | from app.utils.logger import logger
8 |
9 | router = APIRouter()
10 |
11 | class ApiKey(BaseModel):
12 | api_key: str
13 | description: Optional[str] = None
14 |
15 | class ApiKeyInDB(ApiKey):
16 | id: int
17 |
18 | def update_env_api_keys(api_keys_list: List[ApiKeyInDB]):
19 | """更新 .env 文件中的 API 密钥"""
20 | env_path = '.env'
21 | with open(env_path, 'r', encoding='utf-8') as file:
22 | lines = file.readlines()
23 |
24 | # 将 API 密钥列表转换为 JSON 格式
25 | api_keys_data = [
26 | {
27 | "key": key.api_key,
28 | "description": key.description or "",
29 | "id": key.id
30 | }
31 | for key in api_keys_list
32 | ]
33 | api_keys_json = json.dumps(api_keys_data)
34 |
35 | with open(env_path, 'w', encoding='utf-8') as file:
36 | for line in lines:
37 | if line.startswith('ALLOW_API_KEY='):
38 | file.write(f'ALLOW_API_KEY={api_keys_json}\n')
39 | else:
40 | file.write(line)
41 |
42 | # 重新加载环境变量并直接更新 os.environ
43 | load_dotenv(override=True)
44 | # 直接修改环境变量
45 | os.environ['ALLOW_API_KEY'] = api_keys_json
46 | logger.info(f"已更新 API 密钥环境变量: {api_keys_json}")
47 |
48 | # 存储 API 密钥的列表
49 | api_keys: List[ApiKeyInDB] = []
50 | current_id = 1
51 |
52 | # 初始化时从环境变量加载默认 API 密钥
53 | try:
54 | default_keys_json = os.getenv('ALLOW_API_KEY', '[]')
55 | default_keys_data = json.loads(default_keys_json)
56 | for key_data in default_keys_data:
57 | api_keys.append(ApiKeyInDB(
58 | id=key_data["id"],
59 | api_key=key_data["key"],
60 | description=key_data["description"]
61 | ))
62 | current_id = max(current_id, key_data["id"] + 1)
63 | except json.JSONDecodeError:
64 | logger.warning("无法解析 API 密钥 JSON,使用空列表初始化")
65 | except Exception as e:
66 | logger.error(f"加载 API 密钥时出错: {str(e)}")
67 |
68 | @router.get("/api_keys")
69 | async def get_api_keys():
70 | return api_keys
71 |
72 | @router.post("/api_keys")
73 | async def create_api_key(api_key: ApiKey):
74 | global current_id
75 | current_id += 1
76 | new_key = ApiKeyInDB(
77 | id=current_id,
78 | api_key=api_key.api_key,
79 | description=api_key.description
80 | )
81 | api_keys.append(new_key)
82 | # 更新 .env 文件
83 | update_env_api_keys(api_keys)
84 | return new_key
85 |
86 | @router.delete("/api_keys/{key_id}")
87 | async def delete_api_key(key_id: int):
88 | key_index = next((index for (index, key) in enumerate(api_keys) if key.id == key_id), None)
89 | if key_index is None:
90 | raise HTTPException(status_code=404, detail="API key not found")
91 | api_keys.pop(key_index)
92 | # 更新 .env 文件
93 | update_env_api_keys(api_keys)
94 | return {"message": "API key deleted"}
--------------------------------------------------------------------------------
/app/clients/base_client.py:
--------------------------------------------------------------------------------
1 | """基础客户端类,定义通用接口"""
2 | from typing import AsyncGenerator, Any
3 | import aiohttp
4 | from app.utils.logger import logger
5 | from abc import ABC, abstractmethod
6 |
7 |
8 | class BaseClient(ABC):
9 | def __init__(self, api_key: str, api_url: str):
10 | """初始化基础客户端
11 |
12 | Args:
13 | api_key: API密钥
14 | api_url: API地址
15 | """
16 | self.api_key = api_key
17 | self.api_url = api_url
18 |
19 | def _prepare_request_data(self, messages: list, model: str, **kwargs) -> dict:
20 | """准备请求数据,包括自定义参数
21 |
22 | Args:
23 | messages: 消息列表
24 | model: 模型名称
25 | **kwargs: 其他参数,包括自定义参数
26 |
27 | Returns:
28 | dict: 处理后的请求数据
29 | """
30 | data = {
31 | "model": model,
32 | "messages": messages,
33 | "stream": kwargs.get("stream", True),
34 | "temperature": kwargs.get("temperature", 0.7),
35 | "max_tokens": kwargs.get("max_tokens", 2000),
36 | "top_p": kwargs.get("top_p", 1.0),
37 | "presence_penalty": kwargs.get("presence_penalty", 0.0),
38 | "frequency_penalty": kwargs.get("frequency_penalty", 0.0)
39 | }
40 |
41 | # 添加自定义参数
42 | custom_parameters = kwargs.get("custom_parameters", {})
43 | if custom_parameters:
44 | data.update(custom_parameters)
45 |
46 | return data
47 |
48 | async def _make_request(self, headers: dict, data: dict, url: str = None) -> AsyncGenerator[bytes, None]:
49 | """发送请求并处理响应
50 |
51 | Args:
52 | headers: 请求头
53 | data: 请求数据
54 | url: 自定义请求URL,如不提供则使用self.api_url
55 |
56 | Yields:
57 | bytes: 原始响应数据
58 | """
59 | try:
60 | request_url = url if url else self.api_url
61 | async with aiohttp.ClientSession() as session:
62 | async with session.post(
63 | request_url,
64 | headers=headers,
65 | json=data
66 | ) as response:
67 | if response.status != 200:
68 | error_text = await response.text()
69 | logger.error(f"API 请求失败: {error_text}")
70 | return
71 |
72 | async for chunk in response.content.iter_any():
73 | yield chunk
74 |
75 | except Exception as e:
76 | logger.error(f"请求 API 时发生错误: {e}")
77 |
78 | @abstractmethod
79 | async def stream_chat(self, messages: list, model: str) -> AsyncGenerator[tuple[str, str], None]:
80 | """流式对话,由子类实现
81 |
82 | Args:
83 | messages: 消息列表
84 | model: 模型名称
85 |
86 | Yields:
87 | tuple[str, str]: (内容类型, 内容)
88 | """
89 | pass
90 |
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/role_playing.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | from .base_mode import BaseMeetingMode
3 | from app.meeting.utils.summary_generator import SummaryGenerator
4 |
5 |
6 | class RolePlayingMode(BaseMeetingMode):
7 | """角色扮演会议模式"""
8 |
9 | def __init__(self):
10 | super().__init__(
11 | name="角色扮演",
12 | description="每个智能体扮演特定的角色或利益相关者,从不同视角探讨问题。"
13 | )
14 |
15 | def get_agent_prompt(self, agent_name: str, agent_role: str,
16 | meeting_topic: str, current_round: int) -> str:
17 | """根据轮次获取角色扮演模式下的提示"""
18 | if current_round == 1:
19 | return f"""你正在参与一个关于"{meeting_topic}"的角色扮演会议。
20 | 作为{agent_role},你需要从你扮演的角色视角出发,表达对该议题的看法和关注点。
21 | 请完全沉浸在你的角色中,从该角色的立场、利益和价值观出发思考问题。
22 | 在本轮中,请简要介绍你的角色立场,并表达你对议题的初步看法。"""
23 |
24 | elif current_round != 1 and current_round != self.max_rounds:
25 | return f"""这是角色扮演会议的中间阶段。
26 | 继续保持你作为{agent_role}的角色,回应其他参与者的观点。
27 | 你可以表达同意、不同意或提出问题,但一定要保持在角色的立场和视角内。
28 | 请对其他角色的发言做出回应,并进一步深化你的观点。"""
29 |
30 | else:
31 | return f"""这是角色扮演会议的最后阶段。
32 | 作为{agent_role},请总结你的立场和关键观点。
33 | 在坚持你的角色视角的同时,可以适当提出一些妥协或合作的可能性。
34 | 请提出1-2个从你的角色视角出发,认为可行的解决方案或建议。"""
35 |
36 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
37 | current_round: int) -> List[str]:
38 | """角色扮演模式下的发言顺序"""
39 | # 获取所有代理名称
40 | agent_names = [agent["name"] for agent in agents]
41 |
42 | # 如果设置了自定义发言顺序,则使用自定义顺序
43 | if self.custom_speaking_order:
44 | # 验证自定义顺序中的所有名称都在代理列表中
45 | valid_names = [name for name in self.custom_speaking_order if name in agent_names]
46 |
47 | # 添加自定义顺序中没有的代理(可能是后来添加的)
48 | for name in agent_names:
49 | if name not in valid_names:
50 | valid_names.append(name)
51 |
52 | return valid_names
53 |
54 | # 否则使用默认顺序(按提供的顺序)
55 | return agent_names
56 |
57 | def should_end_meeting(self, rounds_completed: int,
58 | meeting_history: List[Dict[str, Any]]) -> bool:
59 | """当完成预设的轮数后结束会议"""
60 | return rounds_completed >= self.max_rounds
61 |
62 | def get_summary_prompt_template(self) -> str:
63 | """获取角色扮演模式的总结提示模板"""
64 | return """
65 | 你是一个会议总结专家。请对以下关于"{meeting_topic}"的角色扮演会议进行总结。
66 | 会议记录如下:
67 |
68 | {history_text}
69 |
70 | 请提供以下内容:
71 | 1. 每个角色的主要立场和观点概述
72 | 2. 各角色之间的主要分歧点和共识点
73 | 3. 提出的主要解决方案或建议
74 | 4. 不同角色视角带来的洞见
75 | 5. 可能的后续行动或讨论方向
76 |
77 | 请以清晰、结构化的方式呈现总结。
78 | """
79 |
80 | def summarize_meeting(self, meeting_topic: str,
81 | meeting_history: List[Dict[str, Any]]) -> str:
82 | """汇总角色扮演会议的结果"""
83 | # 使用统一的总结生成方法
84 | return SummaryGenerator.generate_summary(
85 | meeting_topic=meeting_topic,
86 | meeting_history=meeting_history,
87 | prompt_template=self.get_summary_prompt_template()
88 | )
--------------------------------------------------------------------------------
/app/routers/roles.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, Depends, HTTPException
2 | from sqlalchemy.orm import Session
3 | from typing import List, Dict, Any
4 | import logging
5 |
6 | from app.models.database import get_db
7 | from app.processors.role_processor import RoleProcessor
8 |
9 | router = APIRouter(
10 | prefix="/v1/roles",
11 | tags=["roles"],
12 | )
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 | @router.get("", response_model=List[Dict[str, Any]])
17 | def get_roles(db: Session = Depends(get_db)):
18 | """获取所有角色"""
19 | processor = RoleProcessor(db)
20 | try:
21 | return processor.get_roles()
22 | except Exception as e:
23 | logger.error(f"获取角色失败: {str(e)}")
24 | raise HTTPException(status_code=500, detail=f"获取角色失败: {str(e)}")
25 |
26 | @router.get("/{role_id}", response_model=Dict[str, Any])
27 | def get_role(role_id: int, db: Session = Depends(get_db)):
28 | """获取特定角色"""
29 | processor = RoleProcessor(db)
30 | try:
31 | role = processor.get_role(role_id)
32 | if not role:
33 | raise HTTPException(status_code=404, detail=f"角色ID {role_id} 不存在")
34 | return role
35 | except HTTPException:
36 | raise
37 | except Exception as e:
38 | logger.error(f"获取角色 {role_id} 失败: {str(e)}")
39 | raise HTTPException(status_code=500, detail=f"获取角色失败: {str(e)}")
40 |
41 | @router.post("", response_model=Dict[str, Any])
42 | def create_role(role_data: Dict[str, Any], db: Session = Depends(get_db)):
43 | """创建新角色"""
44 | processor = RoleProcessor(db)
45 | try:
46 | return processor.create_role(role_data)
47 | except ValueError as e:
48 | raise HTTPException(status_code=400, detail=str(e))
49 | except Exception as e:
50 | logger.error(f"创建角色失败: {str(e)}")
51 | raise HTTPException(status_code=500, detail=f"创建角色失败: {str(e)}")
52 |
53 | @router.put("/{role_id}", response_model=Dict[str, Any])
54 | def update_role(role_id: int, role_data: Dict[str, Any], db: Session = Depends(get_db)):
55 | """更新角色"""
56 | processor = RoleProcessor(db)
57 | try:
58 | role = processor.update_role(role_id, role_data)
59 | if not role:
60 | raise HTTPException(status_code=404, detail=f"角色ID {role_id} 不存在")
61 | return role
62 | except HTTPException:
63 | raise
64 | except Exception as e:
65 | logger.error(f"更新角色 {role_id} 失败: {str(e)}")
66 | raise HTTPException(status_code=500, detail=f"更新角色失败: {str(e)}")
67 |
68 | @router.delete("/{role_id}", response_model=Dict[str, str])
69 | def delete_role(role_id: int, db: Session = Depends(get_db)):
70 | """删除角色"""
71 | processor = RoleProcessor(db)
72 | try:
73 | success = processor.delete_role(role_id)
74 | if not success:
75 | raise HTTPException(status_code=404, detail=f"角色ID {role_id} 不存在")
76 | return {"message": "角色已删除"}
77 | except HTTPException:
78 | raise
79 | except Exception as e:
80 | logger.error(f"删除角色 {role_id} 失败: {str(e)}")
81 | raise HTTPException(status_code=500, detail=f"删除角色失败: {str(e)}")
--------------------------------------------------------------------------------
/app/routes/model.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends
2 | from sqlalchemy.orm import Session
3 | from typing import List
4 | import logging
5 |
6 | from app.models.database import get_db, Model as DBModel
7 | from app.models.schemas import Model, ModelCreate
8 |
9 | router = APIRouter()
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | @router.get("/models", response_model=List[Model])
14 | async def get_models(db: Session = Depends(get_db)):
15 | return db.query(DBModel).all()
16 |
17 | @router.post("/models", response_model=Model)
18 | async def create_model(model: ModelCreate, db: Session = Depends(get_db)):
19 | try:
20 | logger.debug(f"Creating model with data: {model.dict()}")
21 | model_data = {
22 | 'name': model.name,
23 | 'type': model.type,
24 | 'provider': model.provider,
25 | 'api_key': model.api_key,
26 | 'api_url': model.api_url,
27 | 'model_name': model.model_name,
28 | 'temperature': model.temperature,
29 | 'top_p': model.top_p,
30 | 'max_tokens': model.max_tokens,
31 | 'presence_penalty': model.presence_penalty,
32 | 'frequency_penalty': model.frequency_penalty,
33 | 'enable_tools': model.enable_tools,
34 | 'tools': model.tools,
35 | 'tool_choice': model.tool_choice,
36 | 'enable_thinking': model.enable_thinking,
37 | 'thinking_budget_tokens': model.thinking_budget_tokens,
38 | 'custom_parameters': model.custom_parameters if model.custom_parameters else {}
39 | }
40 |
41 | logger.debug(f"Processed model data: {model_data}")
42 |
43 | db_model = DBModel(**model_data)
44 | db.add(db_model)
45 | db.commit()
46 | db.refresh(db_model)
47 | logger.debug(f"Created model: {db_model.__dict__}")
48 | return db_model
49 | except Exception as e:
50 | db.rollback()
51 | logger.error(f"Error creating model: {str(e)}")
52 | raise HTTPException(status_code=500, detail=str(e))
53 |
54 | @router.put("/models/{model_id}", response_model=Model)
55 | async def update_model(model_id: int, model: ModelCreate, db: Session = Depends(get_db)):
56 | db_model = db.query(DBModel).filter(DBModel.id == model_id).first()
57 | if not db_model:
58 | raise HTTPException(status_code=404, detail="Model not found")
59 |
60 | for key, value in model.dict().items():
61 | setattr(db_model, key, value)
62 |
63 | try:
64 | db.commit()
65 | db.refresh(db_model)
66 | return db_model
67 | except Exception as e:
68 | db.rollback()
69 | raise HTTPException(status_code=500, detail=str(e))
70 |
71 | @router.delete("/models/{model_id}")
72 | async def delete_model(model_id: int, db: Session = Depends(get_db)):
73 | db_model = db.query(DBModel).filter(DBModel.id == model_id).first()
74 | if not db_model:
75 | raise HTTPException(status_code=404, detail="Model not found")
76 |
77 | db.delete(db_model)
78 | db.commit()
79 | return {"status": "success"}
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/brainstorming.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | import random
3 |
4 | from app.meeting.meeting_modes.base_mode import BaseMeetingMode
5 | from app.meeting.utils.summary_generator import SummaryGenerator
6 |
7 | class BrainstormingMode(BaseMeetingMode):
8 | """头脑风暴模式"""
9 |
10 | def __init__(self):
11 | super().__init__(
12 | name="brainstorming",
13 | description="头脑风暴模式,鼓励参与者提出创新想法,不进行批评"
14 | )
15 |
16 | def get_agent_prompt(self, agent_name: str, agent_role: str,
17 | meeting_topic: str, current_round: int) -> str:
18 | """获取智能体提示"""
19 | if current_round == 1:
20 | return f"""你是{agent_name},{agent_role}。
21 | 你正在参加一个关于"{meeting_topic}"的头脑风暴会议。
22 | 这是第一轮讨论,请尽可能提出创新的、有创意的想法。
23 | 记住头脑风暴的规则:
24 | 1. 数量胜于质量,尽可能提出多的想法
25 | 2. 不要批评或评判其他人的想法
26 | 3. 欢迎奇怪或不寻常的想法
27 | 4. 可以结合和改进已有的想法
28 |
29 | 请根据你的专业背景和角色,提供至少3个相关的创意或解决方案。
30 | """
31 | elif current_round != 1 and current_round != self.max_rounds:
32 | return f"""你是{agent_name},{agent_role}。
33 | 你正在参加一个关于"{meeting_topic}"的头脑风暴会议。
34 | 这是讨论的中间阶段,请根据之前的讨论内容,进一步发展想法,或提出全新的创意。
35 | """
36 | else:
37 | return f"""你是{agent_name},{agent_role}。
38 | 你正在参加一个关于"{meeting_topic}"的头脑风暴会议。
39 | 这是最后一轮讨论。
40 | 请根据之前的讨论内容,总结出你认为最有潜力的想法。
41 | """
42 |
43 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
44 | current_round: int) -> List[str]:
45 | """确定发言顺序"""
46 | # 获取所有代理名称
47 | agent_names = [agent["name"] for agent in agents]
48 |
49 | # 如果设置了自定义发言顺序,则使用自定义顺序
50 | if self.custom_speaking_order:
51 | # 验证自定义顺序中的所有名称都在代理列表中
52 | valid_names = [name for name in self.custom_speaking_order if name in agent_names]
53 |
54 | # 添加自定义顺序中没有的代理(可能是后来添加的)
55 | for name in agent_names:
56 | if name not in valid_names:
57 | valid_names.append(name)
58 |
59 | return valid_names
60 |
61 | # 否则使用默认的随机顺序
62 | random.shuffle(agent_names)
63 | return agent_names
64 |
65 | def should_end_meeting(self, rounds_completed: int,
66 | meeting_history: List[Dict[str, Any]]) -> bool:
67 | """判断会议是否应该结束"""
68 | # 当完成的轮数达到最大轮数时结束
69 | return rounds_completed >= self.max_rounds
70 |
71 | def summarize_meeting(self, meeting_topic: str,
72 | meeting_history: List[Dict[str, Any]]) -> str:
73 | """汇总会议结果"""
74 | # 使用统一的总结生成方法
75 | return SummaryGenerator.generate_summary(
76 | meeting_topic=meeting_topic,
77 | meeting_history=meeting_history,
78 | prompt_template=self.get_summary_prompt_template()
79 | )
80 |
81 | def get_summary_prompt_template(self) -> str:
82 | """获取总结提示模板"""
83 | return """
84 | 你是一个头脑风暴会议的总结专家。请对以下关于"{meeting_topic}"的头脑风暴会议进行总结。
85 | 会议记录如下:
86 |
87 | {history_text}
88 |
89 | 请提供以下内容:
90 | 1. 产生的主要创意和想法概述
91 | 2. 最有潜力的想法
92 | 3. 特别独特或创新的视角
93 | 4. 可能的下一步行动
94 | 5. 需要进一步探索的领域
95 |
96 | 请以清晰、结构化的方式呈现总结,重点突出最有价值的创意。
97 | """
--------------------------------------------------------------------------------
/app/models/schemas.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, Field, validator
2 | from typing import Optional, Dict, Union, List
3 | import json
4 |
5 | class ModelBase(BaseModel):
6 | name: str
7 | type: str
8 | provider: str
9 | api_key: str
10 | api_url: str
11 | model_name: str
12 | max_tokens: int = 2000
13 | temperature: float = 0.7
14 | top_p: float = 1.0
15 | presence_penalty: float = 0.0
16 | frequency_penalty: float = 0.0
17 | enable_tools: bool = False
18 | tools: Optional[List[Dict]] = None
19 | tool_choice: Optional[Dict] = None
20 | enable_thinking: bool = False
21 | thinking_budget_tokens: int = 16000
22 | custom_parameters: Optional[Dict[str, Union[str, int, float, bool]]] = Field(default_factory=dict)
23 |
24 | @validator('temperature', 'top_p', pre=True)
25 | def convert_to_float(cls, v):
26 | if isinstance(v, str):
27 | return float(v)
28 | return v
29 |
30 | @validator('type')
31 | def validate_type(cls, v):
32 | valid_types = {'reasoning', 'execution', 'both'}
33 | if v.lower() not in valid_types:
34 | raise ValueError(f'Type must be one of {valid_types}')
35 | return v.lower()
36 |
37 | @validator('provider')
38 | def validate_provider(cls, v):
39 | valid_providers = {
40 | 'deepseek', 'google', 'anthropic', 'oneapi',
41 | 'openrouter', '腾讯云', 'grok3', 'openai-completion', 'other'
42 | }
43 | if v.lower() not in valid_providers:
44 | raise ValueError(f'Provider must be one of {valid_providers}')
45 | return v.lower()
46 |
47 | @validator('tools', 'tool_choice', pre=True)
48 | def validate_json_fields(cls, v):
49 | if isinstance(v, str):
50 | try:
51 | return json.loads(v)
52 | except:
53 | return None
54 | return v
55 |
56 | @validator('custom_parameters', pre=True)
57 | def validate_custom_parameters(cls, v):
58 | if isinstance(v, str):
59 | try:
60 | return json.loads(v)
61 | except:
62 | return {}
63 | return v if isinstance(v, dict) else {}
64 |
65 | class Config:
66 | from_attributes = True
67 |
68 | class ModelCreate(ModelBase):
69 | pass
70 |
71 | class Model(ModelBase):
72 | id: int
73 |
74 | class ConfigurationStepBase(BaseModel):
75 | model_id: int
76 | step_type: str # "reasoning" or "execution"
77 | step_order: int
78 | system_prompt: str = ""
79 |
80 | class ConfigurationStepCreate(ConfigurationStepBase):
81 | pass
82 |
83 | class ConfigurationStep(ConfigurationStepBase):
84 | id: int
85 | configuration_id: int
86 |
87 | class Config:
88 | from_attributes = True
89 |
90 | class ConfigurationBase(BaseModel):
91 | name: str
92 | is_active: bool = True
93 | transfer_content: Dict = {}
94 |
95 | class ConfigurationCreate(ConfigurationBase):
96 | steps: List[ConfigurationStepCreate]
97 |
98 | class Configuration(ConfigurationBase):
99 | id: int
100 | steps: List[ConfigurationStep]
101 |
102 | class Config:
103 | from_attributes = True
--------------------------------------------------------------------------------
/app/meeting/logger/meeting_logger.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from datetime import datetime
4 | from typing import List, Dict, Any, Optional
5 |
6 | class MeetingLogger:
7 | """会议日志管理类,负责存储和检索会议记录"""
8 |
9 | def __init__(self, log_dir: str = "logs"):
10 | self.log_dir = log_dir
11 | # 确保日志目录存在
12 | if not os.path.exists(log_dir):
13 | os.makedirs(log_dir)
14 |
15 | def save_meeting_log(self, meeting_data: Dict[str, Any]) -> str:
16 | """保存会议日志"""
17 | meeting_id = meeting_data.get("meeting_id")
18 | if not meeting_id:
19 | meeting_id = datetime.now().strftime("%Y%m%d%H%M%S")
20 | meeting_data["meeting_id"] = meeting_id
21 |
22 | # 构建文件名和路径
23 | filename = f"{meeting_id}.json"
24 | filepath = os.path.join(self.log_dir, filename)
25 |
26 | # 保存为JSON文件
27 | with open(filepath, "w", encoding="utf-8") as f:
28 | json.dump(meeting_data, f, ensure_ascii=False, indent=2)
29 |
30 | return meeting_id
31 |
32 | def get_meeting_log(self, meeting_id: str) -> Optional[Dict[str, Any]]:
33 | """根据会议ID获取会议日志"""
34 | filepath = os.path.join(self.log_dir, f"{meeting_id}.json")
35 |
36 | if not os.path.exists(filepath):
37 | return None
38 |
39 | with open(filepath, "r", encoding="utf-8") as f:
40 | meeting_data = json.load(f)
41 |
42 | return meeting_data
43 |
44 | def get_all_meetings(self) -> List[Dict[str, Any]]:
45 | """获取所有会议的摘要信息"""
46 | meetings = []
47 |
48 | # 遍历日志目录中的所有JSON文件
49 | for filename in os.listdir(self.log_dir):
50 | if not filename.endswith(".json"):
51 | continue
52 |
53 | filepath = os.path.join(self.log_dir, filename)
54 |
55 | try:
56 | with open(filepath, "r", encoding="utf-8") as f:
57 | meeting_data = json.load(f)
58 |
59 | # 提取摘要信息
60 | meetings.append({
61 | "meeting_id": meeting_data.get("meeting_id"),
62 | "topic": meeting_data.get("topic"),
63 | "mode": meeting_data.get("mode"),
64 | "start_time": meeting_data.get("start_time"),
65 | "end_time": meeting_data.get("end_time"),
66 | "status": meeting_data.get("status"),
67 | "agent_count": len(meeting_data.get("agents", []))
68 | })
69 | except Exception as e:
70 | print(f"读取文件 {filename} 时出错: {e}")
71 |
72 | # 按开始时间排序,最新的会议排在前面
73 | meetings.sort(key=lambda x: x.get("start_time", ""), reverse=True)
74 |
75 | return meetings
76 |
77 | def search_meetings(self, keyword: str) -> List[Dict[str, Any]]:
78 | """搜索会议记录"""
79 | all_meetings = self.get_all_meetings()
80 |
81 | # 过滤包含关键字的会议
82 | filtered_meetings = []
83 | for meeting in all_meetings:
84 | if (keyword.lower() in meeting.get("topic", "").lower() or
85 | keyword.lower() in meeting.get("mode", "").lower()):
86 | filtered_meetings.append(meeting)
87 |
88 | return filtered_meetings
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/swot_analysis.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | from .base_mode import BaseMeetingMode
3 | from app.meeting.utils.summary_generator import SummaryGenerator
4 |
5 | class SWOTAnalysisMode(BaseMeetingMode):
6 | """SWOT分析会议模式"""
7 |
8 | def __init__(self, max_rounds: int = 4):
9 | super().__init__(
10 | name="SWOT分析",
11 | description="对主题进行优势(Strengths)、劣势(Weaknesses)、机会(Opportunities)和威胁(Threats)的系统性分析。",
12 | max_rounds=max_rounds # 为SWOT分析设置默认4轮
13 | )
14 | self.swot_aspects = ["优势(Strengths)", "劣势(Weaknesses)",
15 | "机会(Opportunities)", "威胁(Threats)"]
16 |
17 | def get_agent_prompt(self, agent_name: str, agent_role: str,
18 | meeting_topic: str, current_round: int) -> str:
19 | """根据轮次获取SWOT分析模式下的提示"""
20 | # 确保轮次在有效范围内
21 | if current_round < 1 or current_round > len(self.swot_aspects):
22 | current_round = 1
23 |
24 | # 获取当前讨论的SWOT方面
25 | current_aspect = self.swot_aspects[current_round - 1]
26 |
27 | return f"""你正在参与一个关于"{meeting_topic}"的SWOT分析会议。
28 | 当前我们正在讨论SWOT分析的"{current_aspect}"方面。
29 |
30 | 作为{agent_role},请从你的专业视角出发,针对主题"{meeting_topic}"的{current_aspect},提出你的分析和见解。
31 | 请尽量具体、深入,并考虑到你独特角色和技能可能带来的特殊洞察。
32 |
33 | 如果是"优势",请关注内部积极因素;
34 | 如果是"劣势",请关注内部消极因素;
35 | 如果是"机会",请关注外部积极因素;
36 | 如果是"威胁",请关注外部消极因素。
37 |
38 | 请提出至少3-5点相关的分析。"""
39 |
40 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
41 | current_round: int) -> List[str]:
42 | """SWOT分析模式下的发言顺序"""
43 | # 获取所有代理名称
44 | agent_names = [agent["name"] for agent in agents]
45 |
46 | # 如果设置了自定义发言顺序,则使用自定义顺序
47 | if self.custom_speaking_order:
48 | # 验证自定义顺序中的所有名称都在代理列表中
49 | valid_names = [name for name in self.custom_speaking_order if name in agent_names]
50 |
51 | # 添加自定义顺序中没有的代理(可能是后来添加的)
52 | for name in agent_names:
53 | if name not in valid_names:
54 | valid_names.append(name)
55 |
56 | return valid_names
57 |
58 | # 否则使用默认顺序(按提供的顺序)
59 | return agent_names
60 |
61 | def should_end_meeting(self, rounds_completed: int,
62 | meeting_history: List[Dict[str, Any]]) -> bool:
63 | """当完成预设的轮数后结束会议"""
64 | return rounds_completed >= self.max_rounds
65 |
66 | def summarize_meeting(self, meeting_topic: str,
67 | meeting_history: List[Dict[str, Any]]) -> str:
68 | """汇总SWOT分析会议的结果"""
69 | # 使用统一的总结生成方法
70 | return SummaryGenerator.generate_summary(
71 | meeting_topic=meeting_topic,
72 | meeting_history=meeting_history,
73 | prompt_template=self.get_summary_prompt_template()
74 | )
75 |
76 | def get_summary_prompt_template(self) -> str:
77 | """获取SWOT分析的总结提示模板"""
78 | return """
79 | 你是一个战略分析专家。请对以下关于"{meeting_topic}"的SWOT分析会议进行总结。
80 | 会议按照优势(Strengths)、劣势(Weaknesses)、机会(Opportunities)和威胁(Threats)四个方面进行了讨论。
81 |
82 | 会议记录如下:
83 |
84 | {history_text}
85 |
86 | 请提供以下内容:
87 | 1. 每个SWOT方面的关键点概括(每个方面3-5点)
88 | 2. 这些因素之间可能的相互作用和影响
89 | 3. 基于SWOT分析的战略建议(至少3-5条)
90 | 4. 可能需要进一步探讨的领域或问题
91 |
92 | 请以表格或列表形式呈现SWOT矩阵,然后提供详细的战略分析和建议。
93 | """
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/six_thinking_hats.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | from .base_mode import BaseMeetingMode
3 | from app.meeting.utils.summary_generator import SummaryGenerator
4 |
5 | class SixThinkingHatsMode(BaseMeetingMode):
6 | """六顶思考帽会议模式"""
7 |
8 | def __init__(self, max_rounds: int = 6):
9 | super().__init__(
10 | name="六顶思考帽",
11 | description="使用爱德华·德博诺的六顶思考帽方法,从六个不同角度思考问题。",
12 | max_rounds=6 # 为六顶思考帽模式设置默认6轮
13 | )
14 | self.max_rounds = max_rounds # 六顶帽子对应六轮
15 | self.hats = [
16 | {"color": "白色", "focus": "事实和信息", "description": "关注客观事实和数据,不做判断和推测"},
17 | {"color": "红色", "focus": "感受和直觉", "description": "表达直觉、情感和感受,不需要解释理由"},
18 | {"color": "黑色", "focus": "谨慎和风险", "description": "指出潜在问题、风险和挑战,进行批判性思考"},
19 | {"color": "黄色", "focus": "积极和机会", "description": "关注积极面、价值和收益,寻找可能性"},
20 | {"color": "绿色", "focus": "创意和可能性", "description": "提出新想法、创新方案和替代选择"},
21 | {"color": "蓝色", "focus": "思考的整合", "description": "管理和总结思考过程,进行元认知"}
22 | ]
23 |
24 | def get_agent_prompt(self, agent_name: str, agent_role: str,
25 | meeting_topic: str, current_round: int) -> str:
26 | """根据轮次获取六顶思考帽模式下的提示"""
27 | # 确保轮次在有效范围内
28 | if current_round < 1 or current_round > len(self.hats):
29 | current_round = 1
30 |
31 | # 获取当前思考帽
32 | current_hat = self.hats[current_round - 1]
33 |
34 | return f"""你正在参与一个关于"{meeting_topic}"的六顶思考帽会议。
35 | 当前我们正在使用{current_hat["color"]}思考帽,专注于{current_hat["focus"]}。
36 | {current_hat["color"]}思考帽的特点是:{current_hat["description"]}。
37 |
38 | 作为{agent_role},请在{current_hat["color"]}思考帽的框架下,对"{meeting_topic}"发表你的见解。
39 | 请完全遵循当前思考帽的思维方式,不要混入其他思考帽的视角。
40 |
41 | 具体而言:
42 | - 白帽:请提供客观事实、数据和信息,不加入个人观点和判断
43 | - 红帽:表达你的情感反应、直觉和感受,不需要逻辑支持
44 | - 黑帽:指出方案中的风险、问题和挑战,进行严谨的批判性思考
45 | - 黄帽:探索积极方面、价值和好处,保持乐观和建设性
46 | - 绿帽:提出新的创意、可能性和替代方案,打破常规思维
47 | - 蓝帽:梳理讨论脉络,总结前面的思考,提出下一步行动
48 |
49 | 请提出至少3-5点符合当前思考帽特点的观点。"""
50 |
51 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
52 | current_round: int) -> List[str]:
53 | """六顶思考帽模式下的发言顺序,按照列表顺序轮流发言"""
54 | return [agent["name"] for agent in agents]
55 |
56 | def should_end_meeting(self, rounds_completed: int,
57 | meeting_history: List[Dict[str, Any]]) -> bool:
58 | """当完成预设的轮数后结束会议"""
59 | return rounds_completed >= self.max_rounds
60 |
61 | def summarize_meeting(self, meeting_topic: str,
62 | meeting_history: List[Dict[str, Any]]) -> str:
63 | """汇总六顶思考帽会议的结果"""
64 | # 使用统一的总结生成方法
65 | return SummaryGenerator.generate_summary(
66 | meeting_topic=meeting_topic,
67 | meeting_history=meeting_history,
68 | prompt_template=self.get_summary_prompt_template()
69 | )
70 |
71 | def get_summary_prompt_template(self) -> str:
72 | """获取六顶思考帽的总结提示模板"""
73 | return """
74 | 你是一个思维方法专家。请对以下关于"{meeting_topic}"的六顶思考帽会议进行总结。
75 | 会议按照六顶思考帽的方法进行了讨论:白帽(事实)、红帽(情感)、黑帽(风险)、黄帽(积极)、绿帽(创意)和蓝帽(整合)。
76 |
77 | 会议记录如下:
78 |
79 | {history_text}
80 |
81 | 请提供以下内容:
82 | 1. 每个思考帽下的关键见解概括(每个帽子3-5点)
83 | 2. 综合所有思考角度后对议题的全面理解
84 | 3. 根据六帽思考得出的主要行动建议(至少3-5条)
85 | 4. 思考过程中发现的关键矛盾或需要进一步探讨的问题
86 |
87 | 请以结构化的方式呈现总结,确保每个思考帽的视角都得到充分体现,并提供整合性的结论。
88 | """
--------------------------------------------------------------------------------
/app/clients/openai_client.py:
--------------------------------------------------------------------------------
1 | """OpenAI API 客户端"""
2 | import json
3 | from typing import AsyncGenerator
4 | from app.utils.logger import logger
5 | from .base_client import BaseClient
6 |
7 |
8 | class OpenAIClient(BaseClient):
9 | def __init__(self, api_key: str, api_url: str = "https://api.openai.com/v1/chat/completions"):
10 | """初始化 OpenAI 客户端
11 |
12 | Args:
13 | api_key: OpenAI API密钥
14 | api_url: OpenAI API地址
15 | """
16 | super().__init__(api_key, api_url)
17 | self.provider = "openai"
18 |
19 | async def stream_chat(
20 | self,
21 | messages: list,
22 | model: str = "gpt-3.5-turbo",
23 | stream: bool = True,
24 | **kwargs
25 | ) -> AsyncGenerator[tuple[str, str], None]:
26 | """流式或非流式对话
27 |
28 | Args:
29 | messages: 消息列表
30 | model: 模型名称
31 | stream: 是否使用流式输出
32 | **kwargs: 其他参数,包括自定义参数
33 |
34 | Yields:
35 | tuple[str, str]: (内容类型, 内容)
36 | 内容类型: "answer" 或 "reasoning"
37 | 内容: 实际的文本内容
38 | """
39 | headers = {
40 | "Authorization": f"Bearer {self.api_key}",
41 | "Content-Type": "application/json",
42 | # "Accept": "text/event-stream" if stream else "application/json",
43 | }
44 |
45 | # 使用基类方法准备请求数据
46 | data = self._prepare_request_data(messages, model, stream=stream, **kwargs)
47 | logger.debug(f"OpenAI 请求数据: {data}")
48 | if stream:
49 | first_chunk = True
50 | async for chunk in self._make_request(headers, data):
51 | try:
52 | chunk_str = chunk.decode('utf-8')
53 | if not chunk_str.strip():
54 | continue
55 |
56 | for line in chunk_str.split('\n'):
57 | if line.startswith('data: '):
58 | json_str = line[6:]
59 | if json_str.strip() == '[DONE]':
60 | return
61 |
62 | data = json.loads(json_str)
63 | delta = data.get('choices', [{}])[0].get('delta', {})
64 |
65 | if first_chunk:
66 | content = delta.get('content', '')
67 | role = delta.get('role', '')
68 | if role:
69 | first_chunk = False
70 | if content:
71 | yield "answer", content
72 | continue
73 |
74 | content = delta.get('content', '')
75 | if content:
76 | yield "answer", content
77 |
78 | except json.JSONDecodeError:
79 | continue
80 | except Exception as e:
81 | logger.error(f"处理 OpenAI 流式响应时发生错误: {e}")
82 | continue
83 | else:
84 | async for chunk in self._make_request(headers, data):
85 | try:
86 | response = json.loads(chunk.decode('utf-8'))
87 | content = response.get('choices', [{}])[0].get('message', {}).get('content', '')
88 | if content:
89 | yield "answer", content
90 | return
91 | except json.JSONDecodeError:
92 | continue
93 | except Exception as e:
94 | logger.error(f"处理 OpenAI 非流式响应时发生错误: {e}")
95 | continue
--------------------------------------------------------------------------------
/app/routes/configuration.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends
2 | from sqlalchemy.orm import Session
3 | from typing import List
4 |
5 | from app.models.database import get_db, Configuration as DBConfiguration, ConfigurationStep, Model as DBModel
6 | from app.models.schemas import Configuration, ConfigurationCreate
7 |
8 | router = APIRouter()
9 |
10 | @router.get("/configurations", response_model=List[Configuration])
11 | async def get_configurations(db: Session = Depends(get_db)):
12 | return db.query(DBConfiguration).all()
13 |
14 | @router.post("/configurations", response_model=Configuration)
15 | async def create_configuration(config: ConfigurationCreate, db: Session = Depends(get_db)):
16 | try:
17 | # 验证所有模型是否存在且用途类型正确
18 | for step in config.steps:
19 | model = db.query(DBModel).filter(DBModel.id == step.model_id).first()
20 | if not model:
21 | raise HTTPException(status_code=404, detail=f"Model {step.model_id} not found")
22 | if model.type not in ["both", step.step_type]:
23 | raise HTTPException(
24 | status_code=400,
25 | detail=f"Model {model.name} cannot be used for {step.step_type}"
26 | )
27 |
28 | # 创建配置
29 | db_config = DBConfiguration(
30 | name=config.name,
31 | is_active=config.is_active,
32 | transfer_content=config.transfer_content
33 | )
34 | db.add(db_config)
35 | db.commit()
36 | db.refresh(db_config)
37 |
38 | # 创建配置步骤
39 | for step in config.steps:
40 | db_step = ConfigurationStep(
41 | configuration_id=db_config.id,
42 | model_id=step.model_id,
43 | step_type=step.step_type,
44 | step_order=step.step_order,
45 | system_prompt=step.system_prompt
46 | )
47 | db.add(db_step)
48 |
49 | db.commit()
50 | db.refresh(db_config)
51 | return db_config
52 |
53 | except Exception as e:
54 | db.rollback()
55 | raise HTTPException(status_code=500, detail=str(e))
56 |
57 | @router.put("/configurations/{config_id}", response_model=Configuration)
58 | async def update_configuration(config_id: int, config: ConfigurationCreate, db: Session = Depends(get_db)):
59 | try:
60 | db_config = db.query(DBConfiguration).filter(DBConfiguration.id == config_id).first()
61 | if not db_config:
62 | raise HTTPException(status_code=404, detail="Configuration not found")
63 |
64 | db_config.name = config.name
65 | db_config.is_active = config.is_active
66 | db_config.transfer_content = config.transfer_content
67 |
68 | db.query(ConfigurationStep).filter(
69 | ConfigurationStep.configuration_id == config_id
70 | ).delete()
71 |
72 | for step in config.steps:
73 | db_step = ConfigurationStep(
74 | configuration_id=config_id,
75 | model_id=step.model_id,
76 | step_type=step.step_type,
77 | step_order=step.step_order,
78 | system_prompt=step.system_prompt
79 | )
80 | db.add(db_step)
81 |
82 | db.commit()
83 | db.refresh(db_config)
84 | return db_config
85 |
86 | except Exception as e:
87 | db.rollback()
88 | raise HTTPException(status_code=500, detail=str(e))
89 |
90 | @router.delete("/configurations/{config_id}")
91 | async def delete_configuration(config_id: int, db: Session = Depends(get_db)):
92 | db_config = db.query(DBConfiguration).filter(DBConfiguration.id == config_id).first()
93 | if not db_config:
94 | raise HTTPException(status_code=404, detail="Configuration not found")
95 |
96 | db.delete(db_config)
97 | db.commit()
98 | return {"status": "success"}
--------------------------------------------------------------------------------
/app/meeting/agents/agent_factory.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 | from .agent import Agent
3 |
4 | class AgentFactory:
5 | """智能体工厂类,用于创建预设智能体"""
6 |
7 | def get_predefined_agents(self, model_name: Optional[str] = None, base_url: Optional[str] = None, api_key: Optional[str] = None) -> List[Agent]:
8 | """获取预设智能体列表"""
9 | # 创建模型参数字典
10 | model_params = {}
11 | if model_name:
12 | model_params["model_name"] = model_name
13 |
14 | # 为每个智能体创建特定的个性化模型参数
15 | predefined_agents_with_params = [
16 | {
17 | "name": "创新者",
18 | "role_description": "负责提出创新性想法,打破传统思维",
19 | "personality": "积极主动,富有想象力",
20 | "skills": ["创意思维", "跨领域联想", "创新方案设计"],
21 | "model_params": {
22 | **model_params,
23 | "temperature": 0.9, # 高温度,更有创意
24 | "max_tokens": 1000,
25 | "model_name": model_name or "gemini-1.5-pro", # 可以为每个智能体指定不同模型
26 | },
27 | "base_url": base_url, # 可以配置不同的base_url
28 | "api_key": api_key # 可以配置不同的api_key
29 | },
30 | {
31 | "name": "批判者",
32 | "role_description": "负责质疑现有方案,从不同角度分析问题",
33 | "personality": "理性冷静,善于分析",
34 | "skills": ["逻辑推理", "风险评估", "批判性思维"],
35 | "model_params": {
36 | **model_params,
37 | "temperature": 0.5, # 低温度,更有逻辑性
38 | "max_tokens": 1200,
39 | "model_name": model_name or "gpt-4-turbo", # 批判者用更强大的模型
40 | },
41 | "base_url": base_url,
42 | "api_key": api_key
43 | },
44 | {
45 | "name": "协调者",
46 | "role_description": "整合各方观点,寻求共识和平衡",
47 | "personality": "平和包容,善于沟通",
48 | "skills": ["沟通协调", "冲突解决", "团队建设"],
49 | "model_params": {
50 | **model_params,
51 | "temperature": 0.7, # 中等温度,平衡创意和逻辑
52 | "max_tokens": 1100,
53 | }
54 | },
55 | {
56 | "name": "执行者",
57 | "role_description": "关注实际操作和落地方案",
58 | "personality": "务实高效,注重细节",
59 | "skills": ["项目管理", "资源分配", "行动计划制定"],
60 | "model_params": {
61 | **model_params,
62 | "temperature": 0.4, # 低温度,更关注实际和细节
63 | "max_tokens": 900,
64 | }
65 | }
66 | ]
67 |
68 | # 创建智能体实例
69 | predefined_agents = []
70 | for agent_data in predefined_agents_with_params:
71 | agent = Agent(
72 | name=agent_data["name"],
73 | role_description=agent_data["role_description"],
74 | personality=agent_data["personality"],
75 | skills=agent_data["skills"],
76 | model_params=agent_data["model_params"],
77 | base_url=agent_data.get("base_url", base_url),
78 | api_key=agent_data.get("api_key", api_key)
79 | )
80 | predefined_agents.append(agent)
81 | return predefined_agents
82 |
83 | def create(self, name, role, personality, skills, model_params=None, base_url=None, api_key=None):
84 | """创建一个智能体实例"""
85 | agent = Agent(
86 | name=name,
87 | role_description=role,
88 | personality=personality,
89 | skills=skills,
90 | model_params=model_params,
91 | base_url=base_url,
92 | api_key=api_key
93 | )
94 | return agent
95 |
96 | def create_agent(self, name, role, personality, skills, model_params=None, base_url=None):
97 | """创建智能体的别名方法,与create方法功能相同"""
98 | return self.create(name, role, personality, skills, model_params, base_url)
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/discussion.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | import random
3 |
4 | from app.meeting.meeting_modes.base_mode import BaseMeetingMode
5 | from app.meeting.utils.summary_generator import SummaryGenerator
6 |
7 | class DiscussionMode(BaseMeetingMode):
8 | """普通讨论模式"""
9 |
10 | def __init__(self):
11 | super().__init__(
12 | name="discussion",
13 | description="普通讨论模式,参与者轮流发言,共同讨论主题"
14 | )
15 | self.summary_generator = SummaryGenerator()
16 |
17 | def get_agent_prompt(self, agent_name: str, agent_role: str,
18 | meeting_topic: str, current_round: int) -> str:
19 | """获取智能体提示"""
20 | if current_round == 1:
21 | return f"""你是{agent_name},{agent_role}。
22 | 你正在参加一个关于"{meeting_topic}"的讨论。
23 | 这是第一轮讨论,请分享你对这个主题的初步看法和观点。
24 | 请考虑你的专业背景和角色,提供有价值的见解。
25 | """
26 | elif current_round != 1 and current_round != self.max_rounds:
27 | return f"""你是{agent_name},{agent_role}。
28 | 你正在参加一个关于"{meeting_topic}"的讨论。
29 | 这是第{current_round}轮讨论,请根据之前的讨论内容,进一步发展你的观点,或回应其他参与者的意见。
30 | 你可以提出新的见解,也可以对之前的观点进行补充或质疑。
31 | """
32 | else:
33 | return f"""你是{agent_name},{agent_role}。
34 | 你正在参加一个关于"{meeting_topic}"的讨论。
35 | 这是讨论的最后一轮。
36 | 请根据之前的讨论内容,进一步总结你的观点,或回应其他参与者的意见。
37 | """
38 |
39 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
40 | current_round: int) -> List[str]:
41 | """确定发言顺序"""
42 | # 获取所有代理名称
43 | agent_names = [agent["name"] for agent in agents]
44 |
45 | # 如果设置了自定义发言顺序,则使用自定义顺序
46 | if self.custom_speaking_order:
47 | # logger.info(f"使用自定义发言顺序: {self.custom_speaking_order}")
48 | # 验证自定义顺序中的所有名称都在代理列表中
49 | valid_names = [name for name in self.custom_speaking_order if name in agent_names]
50 |
51 | # 添加自定义顺序中没有的代理(可能是后来添加的)
52 | for name in agent_names:
53 | if name not in valid_names:
54 | valid_names.append(name)
55 |
56 | return valid_names
57 |
58 | # 否则使用默认顺序(按提供的顺序)
59 | return agent_names
60 |
61 | def should_end_meeting(self, rounds_completed: int,
62 | meeting_history: List[Dict[str, Any]]) -> bool:
63 | """判断会议是否应该结束"""
64 | # 当完成的轮数达到最大轮数时结束
65 | return rounds_completed >= self.max_rounds
66 |
67 | def summarize_meeting(self, topic: str, meeting_history: List[Dict[str, str]]) -> str:
68 | """
69 | 生成会议总结
70 |
71 | 参数:
72 | topic: 会议主题
73 | meeting_history: 会议历史记录
74 |
75 | 返回:
76 | str: 会议总结
77 | """
78 | # 计算讨论轮数
79 | agent_messages = [msg for msg in meeting_history if msg["agent"] != "system"]
80 | agents = set([msg["agent"] for msg in agent_messages if msg["agent"] != "system"])
81 | message_per_agent = len(agent_messages) / max(len(agents), 1)
82 | estimated_rounds = max(1, int(message_per_agent))
83 |
84 | # 构建摘要提示模板
85 | prompt_template = f"""
86 | 请参考以下关于"{{topic}}"的讨论内容:
87 | {{history}}
88 |
89 | 作为一名会议总结专家,请提供以下内容:
90 | 1. 讨论的主要主题和观点概述(不超过3点)
91 | 2. 讨论中达成的主要共识(如果有)
92 | 3. 存在的主要分歧或不同视角(如果有)
93 | 4. 提出的解决方案或行动建议
94 | 5. 可能需要进一步讨论或研究的问题
95 |
96 | 请以清晰、结构化的方式呈现总结,重点突出最重要的内容。
97 | """
98 |
99 | # 使用统一的总结生成方法
100 | return SummaryGenerator.generate_summary(
101 | meeting_topic=topic,
102 | meeting_history=meeting_history,
103 | prompt_template=prompt_template
104 | )
105 |
106 | def _format_history_for_summary(self, meeting_history: List[Dict[str, str]]) -> str:
107 | """
108 | 将会议历史记录格式化为适合总结的文本格式
109 |
110 | 参数:
111 | meeting_history: 会议历史记录列表
112 |
113 | 返回:
114 | str: 格式化后的会议历史文本
115 | """
116 | formatted_text = ""
117 | for entry in meeting_history:
118 | if entry["agent"] != "system": # 排除系统消息
119 | formatted_text += f"[{entry['agent']}]: {entry['content']}\n\n"
120 |
121 | return formatted_text
--------------------------------------------------------------------------------
/app/meeting/meeting_modes/debate.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | from app.meeting.meeting_modes.base_mode import BaseMeetingMode
3 | from app.meeting.utils.summary_generator import SummaryGenerator
4 |
5 | class DebateMode(BaseMeetingMode):
6 | """对抗辩论会议模式"""
7 |
8 | def __init__(self, max_rounds: int = 3):
9 | super().__init__(
10 | name="对抗辩论",
11 | description="设定正反方,就议题进行辩论,旨在深入分析问题的不同方面。",
12 | max_rounds=max_rounds # 使用传入的最大轮数
13 | )
14 |
15 | def get_agent_prompt(self, agent_name: str, agent_role: str,
16 | meeting_topic: str, current_round: int) -> str:
17 | """根据轮次获取对抗辩论模式下的提示"""
18 | # 使用agent_name的索引位置来确定正反方
19 | # 这将确保大约一半的角色在每一方
20 | is_pro = len(agent_name) % 2 == 0 # 简单地使用名字长度的奇偶性
21 | side = "正方" if is_pro else "反方"
22 |
23 | print(f"max_rounds: {self.max_rounds}")
24 |
25 | if current_round == 1:
26 | return f"""你正在参与一个关于"{meeting_topic}"的对抗辩论会议。
27 | 你被分配到{side}。作为{agent_role},请从{side}立场出发,就"{meeting_topic}"提出你的论点和理由。
28 | 请提供3-5个有力的论据支持你的立场。"""
29 |
30 | elif current_round != 1 and current_round != self.max_rounds:
31 | return f"""这是对抗辩论的中间轮。
32 | 继续作为{side},请针对对方的论点进行反驳,并进一步强化自己的立场。
33 | 找出对方论点中的漏洞或不足,同时补充新的支持论据。"""
34 |
35 | elif current_round == self.max_rounds:
36 | return f"""这是对抗辩论的最后阶段。
37 | 作为{side},请总结你的立场和核心论点,驳斥对方主要观点,并给出最终的论述。
38 | 尝试说服听众接受你的立场,展示为什么你的立场更合理、更有说服力。"""
39 |
40 | def determine_speaking_order(self, agents: List[Dict[str, Any]],
41 | current_round: int) -> List[str]:
42 | """对抗辩论模式下的发言顺序,正反方交替发言"""
43 | # 使用与get_agent_prompt相同的哈希逻辑来区分正反方
44 | # 这样可以确保两个方法之间的一致性
45 |
46 | # 将智能体分为正反方
47 | pro_agents = []
48 | con_agents = []
49 |
50 | # 先根据哈希值分配
51 | for agent in agents:
52 | agent_name = agent["name"]
53 | agent_role = agent.get("role", "")
54 |
55 | # 使用与get_agent_prompt相同的逻辑
56 | agent_hash = hash(agent_name + agent_role)
57 | is_pro = agent_hash % 2 == 0
58 |
59 | if is_pro:
60 | pro_agents.append(agent_name)
61 | else:
62 | con_agents.append(agent_name)
63 |
64 | # 检查平衡性:确保两方都至少有一个角色
65 | if not pro_agents and con_agents:
66 | # 如果正方没有角色,但反方有,则将一半反方角色移到正方
67 | mid = len(con_agents) // 2
68 | pro_agents = con_agents[:mid]
69 | con_agents = con_agents[mid:]
70 | elif not con_agents and pro_agents:
71 | # 如果反方没有角色,但正方有,则将一半正方角色移到反方
72 | mid = len(pro_agents) // 2
73 | con_agents = pro_agents[:mid]
74 | pro_agents = pro_agents[mid:]
75 |
76 | # 交替排列正反方发言
77 | speaking_order = []
78 | max_len = max(len(pro_agents), len(con_agents))
79 | for i in range(max_len):
80 | if i < len(pro_agents):
81 | speaking_order.append(pro_agents[i])
82 | if i < len(con_agents):
83 | speaking_order.append(con_agents[i])
84 |
85 | return speaking_order
86 |
87 | def should_end_meeting(self, rounds_completed: int,
88 | meeting_history: List[Dict[str, Any]]) -> bool:
89 | """
90 | 判断会议是否应该结束
91 | 注意:应该根据会议实例中设置的max_rounds决定,而不是模式自身的max_rounds
92 | """
93 | # 这里不直接使用self.max_rounds,因为会议可能设置了不同的最大轮数
94 | # 我们依赖Meeting类中的条件检查: `if self.current_round > self.max_rounds`
95 | return False # 始终返回False,让Meeting类来决定何时结束会议
96 |
97 | def summarize_meeting(self, meeting_topic: str,
98 | meeting_history: List[Dict[str, Any]]) -> str:
99 | """汇总对抗辩论的结果"""
100 | # 使用统一的总结生成方法
101 | return SummaryGenerator.generate_summary(
102 | meeting_topic=meeting_topic,
103 | meeting_history=meeting_history,
104 | prompt_template=self.get_summary_prompt_template()
105 | )
106 |
107 | def get_summary_prompt_template(self) -> str:
108 | """获取总结提示模板"""
109 | return """
110 | 你是一个辩论分析专家。请对以下关于"{meeting_topic}"的对抗辩论会议进行总结。
111 | 会议记录如下:
112 |
113 | {history_text}
114 |
115 | 请提供以下内容:
116 | 1. 正方的主要论点和论据概述
117 | 2. 反方的主要论点和论据概述
118 | 3. 双方论点的对比分析
119 | 4. 各论点的强弱点评估
120 | 5. 辩论的关键洞见和启示
121 |
122 | 请保持中立,不要偏向任何一方,并以结构化的方式呈现总结。
123 | """
--------------------------------------------------------------------------------
/app/routers/discussion_groups.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, Depends, HTTPException, Request
2 | from sqlalchemy.orm import Session
3 | from typing import List, Dict, Any
4 | import logging
5 | import json
6 | import traceback
7 | from fastapi.responses import StreamingResponse
8 |
9 | from app.models.database import get_db
10 | from app.processors.discussion_processor import DiscussionProcessor
11 |
12 | router = APIRouter(
13 | prefix="/v1/discussion_groups",
14 | tags=["discussion_groups"],
15 | )
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 | @router.get("", response_model=List[Dict[str, Any]])
20 | def get_discussion_groups(db: Session = Depends(get_db)):
21 | """获取所有讨论组"""
22 | processor = DiscussionProcessor(db)
23 | try:
24 | return processor.get_groups()
25 | except Exception as e:
26 | logger.error(f"获取讨论组失败: {str(e)}")
27 | raise HTTPException(status_code=500, detail=f"获取讨论组失败: {str(e)}")
28 |
29 | @router.get("/{group_id}", response_model=Dict[str, Any])
30 | def get_discussion_group(group_id: int, db: Session = Depends(get_db)):
31 | """获取特定讨论组"""
32 | processor = DiscussionProcessor(db)
33 | try:
34 | group = processor.get_group(group_id)
35 | if not group:
36 | raise HTTPException(status_code=404, detail=f"讨论组ID {group_id} 不存在")
37 | return group
38 | except HTTPException:
39 | raise
40 | except Exception as e:
41 | logger.error(f"获取讨论组 {group_id} 失败: {str(e)}")
42 | raise HTTPException(status_code=500, detail=f"获取讨论组失败: {str(e)}")
43 |
44 | @router.post("", response_model=Dict[str, Any])
45 | def create_discussion_group(group_data: Dict[str, Any], db: Session = Depends(get_db)):
46 | """创建新讨论组"""
47 | # 验证必要字段
48 | required_fields = ["name", "mode", "role_ids"]
49 | for field in required_fields:
50 | if field not in group_data:
51 | raise HTTPException(status_code=400, detail=f"缺少必要字段: {field}")
52 |
53 | processor = DiscussionProcessor(db)
54 | try:
55 | return processor.create_group(group_data)
56 | except ValueError as e:
57 | raise HTTPException(status_code=400, detail=str(e))
58 | except Exception as e:
59 | logger.error(f"创建讨论组失败: {str(e)}")
60 | raise HTTPException(status_code=500, detail=f"创建讨论组失败: {str(e)}")
61 |
62 | @router.put("/{group_id}", response_model=Dict[str, Any])
63 | def update_discussion_group(group_id: int, group_data: Dict[str, Any], db: Session = Depends(get_db)):
64 | """更新讨论组"""
65 | processor = DiscussionProcessor(db)
66 | try:
67 | group = processor.update_group(group_id, group_data)
68 | if not group:
69 | raise HTTPException(status_code=404, detail=f"讨论组ID {group_id} 不存在")
70 | return group
71 | except HTTPException:
72 | raise
73 | except Exception as e:
74 | logger.error(f"更新讨论组 {group_id} 失败: {str(e)}")
75 | raise HTTPException(status_code=500, detail=f"更新讨论组失败: {str(e)}")
76 |
77 | @router.delete("/{group_id}", response_model=Dict[str, str])
78 | def delete_discussion_group(group_id: int, db: Session = Depends(get_db)):
79 | """删除讨论组"""
80 | processor = DiscussionProcessor(db)
81 | try:
82 | success = processor.delete_group(group_id)
83 | if not success:
84 | raise HTTPException(status_code=404, detail=f"讨论组ID {group_id} 不存在")
85 | return {"message": "讨论组已删除"}
86 | except HTTPException:
87 | raise
88 | except Exception as e:
89 | logger.error(f"删除讨论组 {group_id} 失败: {str(e)}")
90 | raise HTTPException(status_code=500, detail=f"删除讨论组失败: {str(e)}")
91 |
92 | @router.post("/stream/{group_id}")
93 | async def stream_discussion_process(group_id: int, request: Request, db: Session = Depends(get_db)):
94 | """开始流式讨论过程"""
95 | processor = DiscussionProcessor(db)
96 |
97 | try:
98 | # 开始会议
99 | meeting_id = processor.start_meeting(group_id)
100 |
101 | # 返回流式响应
102 | return StreamingResponse(
103 | processor._stream_discussion_process(meeting_id),
104 | media_type="text/event-stream"
105 | )
106 |
107 | except Exception as e:
108 | logger.error(f"启动流式讨论过程时出错: {str(e)}", exc_info=True)
109 |
110 | # 创建错误流
111 | async def error_stream():
112 | error_data = {
113 | "error": str(e),
114 | "detail": traceback.format_exc()
115 | }
116 | yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
117 | yield "data: [DONE]\n\n"
118 |
119 | return StreamingResponse(error_stream(), media_type="text/event-stream")
--------------------------------------------------------------------------------
/app/static/js/thinking.js:
--------------------------------------------------------------------------------
1 | // 思考内容流式输出处理
2 | class ThinkingHandler {
3 | constructor() {
4 | this.thinkingContent = null;
5 | this.isThinkingVisible = false;
6 | this.initialize();
7 | }
8 |
9 | initialize() {
10 | // 创建思考内容容器
11 | this.thinkingContent = document.createElement('div');
12 | this.thinkingContent.className = 'thinking-content collapsed';
13 | this.thinkingContent.style.whiteSpace = 'pre-wrap';
14 |
15 | // 创建切换按钮
16 | this.toggleButton = document.createElement('button');
17 | this.toggleButton.className = 'btn btn-sm btn-outline-secondary mb-2 mt-1';
18 | this.toggleButton.textContent = '显示思考过程';
19 | this.toggleButton.style.display = 'none';
20 | this.toggleButton.addEventListener('click', () => this.toggleThinking());
21 |
22 | // 初始化标记解析器配置
23 | this.markedOptions = {
24 | gfm: true,
25 | breaks: true,
26 | sanitize: false,
27 | highlight: null // 禁用代码高亮
28 | };
29 | }
30 |
31 | // 处理思考内容更新
32 | handleThinking(thinking, messageContainer) {
33 | if (!thinking || thinking.trim() === '') return;
34 |
35 | if (!messageContainer.querySelector('.thinking-content')) {
36 | messageContainer.prepend(this.thinkingContent.cloneNode(true));
37 | messageContainer.prepend(this.toggleButton.cloneNode(true));
38 |
39 | // 获取刚添加的元素
40 | this.currentThinkingContent = messageContainer.querySelector('.thinking-content');
41 | this.currentToggleButton = messageContainer.querySelector('button');
42 |
43 | // 添加事件监听器
44 | this.currentToggleButton.addEventListener('click', () => {
45 | this.currentThinkingContent.classList.toggle('collapsed');
46 | // 切换后确保滚动条重置到顶部
47 | if (!this.currentThinkingContent.classList.contains('collapsed')) {
48 | setTimeout(() => {
49 | this.currentThinkingContent.scrollTop = 0;
50 | }, 50);
51 | }
52 | this.currentToggleButton.textContent =
53 | this.currentThinkingContent.classList.contains('collapsed')
54 | ? '显示思考过程'
55 | : '隐藏思考过程';
56 | });
57 | }
58 |
59 | // 安全地渲染思考内容
60 | const sanitizedThinking = this.sanitizeAndFormatThinking(thinking);
61 | this.currentThinkingContent.innerHTML = sanitizedThinking;
62 |
63 | // 显示切换按钮
64 | this.currentToggleButton.style.display = 'inline-block';
65 | }
66 |
67 | // 处理并格式化思考内容
68 | sanitizeAndFormatThinking(thinking) {
69 | // 使用简单的转义处理,而不是完整的Markdown解析
70 | let formatted = thinking
71 | .replace(/&/g, '&')
72 | .replace(//g, '>')
74 | .replace(/"/g, '"')
75 | .replace(/'/g, ''');
76 |
77 | // 使用非贪婪匹配提取代码块
78 | formatted = formatted.replace(/```(.*?)\n([\s\S]*?)```/g, (match, language, code) => {
79 | // 对代码内容增加额外的处理,保留原始格式和缩进
80 | const processedCode = code
81 | .replace(/</g, '<')
82 | .replace(/>/g, '>')
83 | .trim();
84 |
85 | // 使用更简单的包装,不依赖highlight.js
86 | return `
${processedCode}
`;
87 | });
88 |
89 | // 基本的行内代码处理
90 | formatted = formatted.replace(/`([^`]+)`/g, '$1');
91 |
92 | // 处理换行,但保留多个连续换行
93 | formatted = formatted.replace(/\n\n+/g, '
').replace(/\n/g, '
');
94 |
95 | return formatted;
96 | }
97 |
98 | // 切换思考内容显示状态
99 | toggleThinking() {
100 | this.isThinkingVisible = !this.isThinkingVisible;
101 | if (this.currentThinkingContent) {
102 | this.currentThinkingContent.classList.toggle('collapsed', !this.isThinkingVisible);
103 |
104 | // 确保切换显示时滚动功能正常
105 | if (this.isThinkingVisible) {
106 | setTimeout(() => {
107 | this.currentThinkingContent.scrollTop = 0;
108 | }, 50);
109 | }
110 |
111 | if (this.currentToggleButton) {
112 | this.currentToggleButton.textContent = this.isThinkingVisible
113 | ? '隐藏思考过程'
114 | : '显示思考过程';
115 | }
116 | }
117 | }
118 |
119 | // 重置状态
120 | reset() {
121 | this.isThinkingVisible = false;
122 | this.currentThinkingContent = null;
123 | this.currentToggleButton = null;
124 | }
125 | }
126 |
127 | // 创建全局思考处理器实例
128 | const thinkingHandler = new ThinkingHandler();
--------------------------------------------------------------------------------
/README_zh.md:
--------------------------------------------------------------------------------
1 |
2 |
DeepGemini 🌟
3 |
一个灵活的多模型编排 API,兼容 OpenAI 接口
4 |
5 | [](https://fastapi.tiangolo.com)
6 | [](https://www.python.org)
7 | [](https://platform.openai.com)
8 | [](LICENSE)
9 | [](https://deepwiki.com/sligter/DeepGemini)
10 |
11 |
12 | [English](README.md)
13 |
14 | ## ✨ 特性
15 |
16 | - **多模型编排**:无缝组合多个 AI 模型,实现自定义接力链
17 | - **角色管理**:创建具有不同性格和技能的 AI 角色
18 | - **讨论组**:组合多个角色形成讨论组
19 | - **多种讨论模式**:
20 | - 一般讨论
21 | - 头脑风暴
22 | - 辩论
23 | - 角色扮演
24 | - SWOT 分析
25 | - 六顶思考帽
26 | - **灵活的提供商支持**:支持多个 AI 提供商:
27 | - DeepSeek
28 | - Claude
29 | - Gemini
30 | - Grok3
31 | - OpenAI
32 | - OneAPI
33 | - OpenRouter
34 | - Siliconflow
35 | - **OpenAI 兼容**:可作为 OpenAI API 的直接替代品
36 | - **流式响应**:支持实时流式响应,提供更好的用户体验
37 | - **高级配置**:精细控制模型参数和系统提示词
38 | - **Web 管理界面**:内置模型和配置管理界面
39 | - **多语言支持**:支持中文和英文界面
40 | - **人类参与**:支持人类加入AI讨论组进行发言
41 | - **对话界面**:支持模型、角色、接力链、讨论组在线对话
42 | - **灵活部署**:支持Docker或本地安装的简易部署方式
43 |
44 | ## 预览
45 |
46 | 
47 |
48 | 
49 |
50 | 
51 |
52 | 
53 |
54 | 
55 |
56 | 
57 |
58 | ## 🚀 快速开始
59 |
60 | ### 1. 安装
61 |
62 | ```bash
63 | git clone https://github.com/sligter/DeepGemini.git
64 | cd DeepGemini
65 | uv sync
66 | ```
67 |
68 | ### 2. 配置
69 |
70 | ```bash
71 | cp .env.example .env
72 | ```
73 |
74 | 必需的环境变量:
75 | - `ALLOW_API_KEY`:你的 API 访问密钥
76 | - `ALLOW_ORIGINS`:允许的 CORS 来源(逗号分隔或 "*")
77 |
78 | ### 3. 运行应用
79 |
80 | ```bash
81 | uv run uvicorn app.main:app --host 0.0.0.0 --port 8000
82 | ```
83 |
84 | 访问 `http://localhost:8000/dashboard` 进入 Web 管理界面。
85 |
86 | ## 🐳 Docker 部署
87 |
88 | ### 使用 Docker Compose(推荐)
89 |
90 | 1. 创建并配置 `.env` 文件:
91 |
92 | ```bash
93 | cp .env.example .env
94 | touch deepgemini.db
95 | echo "" > deepgemini.db
96 | ```
97 |
98 | 2. 构建并启动容器:
99 |
100 | ```bash
101 | docker-compose up -d
102 | ```
103 |
104 | 3. 访问 `http://localhost:8000/dashboard` 进入 Web 界面
105 |
106 | ### 直接使用 Docker
107 |
108 | 1. 拉取镜像:
109 | ```bash
110 | docker pull bradleylzh/deepgemini:latest
111 | ```
112 |
113 | 2. 创建必要文件:
114 |
115 | Linux/Mac 用户:
116 | ```bash
117 | cp .env.example .env
118 | touch deepgemini.db
119 | ```
120 |
121 | 运行容器
122 | ```bash
123 | docker run -d \
124 | -p 8000:8000 \
125 | -v $(pwd)/.env:/app/.env \
126 | -v $(pwd)/deepgemini.db:/app/deepgemini.db \
127 | --name deepgemini \
128 | bradleylzh/deepgemini:latest
129 | ```
130 |
131 | Windows PowerShell 用户:
132 |
133 | ```powershell
134 | cp .env.example .env
135 | python -c "import sqlite3; sqlite3.connect('deepgemini.db').close()"
136 | ```
137 |
138 | 运行容器
139 | ```powershell
140 | docker run -d -p 8000:8000 `
141 | -v ${PWD}\.env:/app/.env `
142 | -v ${PWD}\deepgemini.db:/app/deepgemini.db `
143 | --name deepgemini `
144 | bradleylzh/deepgemini:latest
145 | ```
146 |
147 | ## 🔧 模型配置
148 |
149 | DeepGemini 支持多种 AI 提供商:
150 |
151 | - **DeepSeek**:先进的推理能力
152 | - **Claude**:精细的文本生成和思考
153 | - **Gemini**:Google 的 AI 模型
154 | - **Grok3**:Grok 的 AI 模型
155 | - **自定义**:添加你自己的提供商集成
156 |
157 | 每个模型可配置:
158 | - API 凭证
159 | - 模型参数(temperature、top_p 等)
160 | - 系统提示词
161 | - 使用类型(推理/执行/两者)
162 |
163 | ## 🔄 中继链配置
164 |
165 | 通过组合模型创建自定义中继链:
166 |
167 | 1. **推理步骤**:初始分析和规划
168 | 2. **执行步骤**:最终响应生成
169 | 3. **自定义步骤**:根据需要添加多个步骤
170 |
171 | ## 👥 多角色讨论
172 | - **角色管理**:创建具有不同性格和技能的 AI 角色
173 | - **讨论组**:组合多个角色形成讨论组
174 | - **多种讨论模式**:
175 | - 一般讨论
176 | - 头脑风暴
177 | - 辩论
178 | - 角色扮演
179 | - SWOT 分析
180 | - 六顶思考帽
181 | - **人类参与**:允许人类加入AI讨论并发言
182 |
183 | ## 🔍 API 兼容性
184 | DeepGemini 提供兼容的API接口,可作为OpenAI API的直接替代品:
185 |
186 | - **/v1/chat/completions**:与OpenAI聊天完成端点兼容
187 | - **/v1/models**:以OpenAI兼容格式列出所有可用模型
188 | - 支持流式响应、工具和其他OpenAI API功能
189 |
190 | ## 🛠 技术栈
191 |
192 | - [FastAPI](https://fastapi.tiangolo.com/):现代 Web 框架
193 | - [SQLAlchemy](https://www.sqlalchemy.org/):数据库 ORM
194 | - [Alembic](https://alembic.sqlalchemy.org/):数据库迁移
195 | - [UV](https://github.com/astral-sh/uv):快速 Python 包安装器
196 | - [aiohttp](https://docs.aiohttp.org/):异步 HTTP 客户端
197 | - [deepclaude](https://github.com/getasterisk/deepclaude)
198 |
199 | ## ✨ 鸣谢
200 |
201 | [](https://dartnode.com "Powered by DartNode - Free VPS for Open Source")
202 |
203 | ## 📝 许可证
204 |
205 | 本项目采用 MIT 许可证 - 详见 [LICENSE](LICENSE) 文件。
206 |
207 | ## 🤝 贡献
208 |
209 | 欢迎贡献!请随时提交 Pull Request。
210 |
211 | ## 📬 联系
212 |
213 | 如有问题和支持需求,请在 GitHub 上开启 Issue。
214 |
--------------------------------------------------------------------------------
/app/routers/discussions.py:
--------------------------------------------------------------------------------
1 | import traceback
2 | from fastapi import APIRouter, Depends, HTTPException, Body, Request
3 | from fastapi.responses import JSONResponse, StreamingResponse
4 | from sqlalchemy.orm import Session
5 | from typing import Dict, Any
6 | import logging
7 | import json
8 |
9 | from app.models.database import get_db
10 | from app.adapters.meeting_adapter import MeetingAdapter
11 | from app.processors.discussion_processor import DiscussionProcessor
12 |
13 | router = APIRouter(
14 | prefix="/v1/discussions",
15 | tags=["discussions"],
16 | )
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 | @router.post("/{group_id}/start", response_model=Dict[str, Any])
21 | def start_discussion(
22 | group_id: int,
23 | data: Dict[str, Any] = Body(...),
24 | db: Session = Depends(get_db)
25 | ):
26 | """启动一个新的讨论"""
27 | adapter = MeetingAdapter(db)
28 |
29 | try:
30 | topic = data.get("topic", "")
31 | if not topic:
32 | raise HTTPException(status_code=400, detail="讨论主题不能为空")
33 |
34 | meeting_id = adapter.start_meeting(group_id, topic)
35 | return {"meeting_id": meeting_id, "message": "讨论已启动"}
36 | except ValueError as e:
37 | raise HTTPException(status_code=400, detail=str(e))
38 | except Exception as e:
39 | logger.error(f"启动讨论失败: {str(e)}")
40 | raise HTTPException(status_code=500, detail=f"启动讨论失败: {str(e)}")
41 |
42 | @router.post("/stream/{group_id}")
43 | async def stream_discussion_process(group_id: int, request: Request, db: Session = Depends(get_db)):
44 | """开始流式讨论过程"""
45 | processor = DiscussionProcessor(db)
46 |
47 | try:
48 | # 开始会议
49 | meeting_id = processor.start_meeting(group_id)
50 |
51 | # 返回流式响应
52 | return StreamingResponse(
53 | processor._stream_discussion_process(meeting_id),
54 | media_type="text/event-stream"
55 | )
56 |
57 | except Exception as e:
58 | logger.error(f"启动流式讨论过程时出错: {str(e)}", exc_info=True)
59 |
60 | # 创建错误流
61 | async def error_stream():
62 | error_data = {
63 | "error": str(e),
64 | "detail": traceback.format_exc()
65 | }
66 | yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
67 | yield "data: [DONE]\n\n"
68 |
69 | return StreamingResponse(error_stream(), media_type="text/event-stream")
70 |
71 | @router.get("/stream/{meeting_id}")
72 | async def continue_stream_discussion(meeting_id: str, db: Session = Depends(get_db)):
73 | """继续进行流式讨论过程(从当前状态继续)"""
74 | processor = DiscussionProcessor(db)
75 | processor.adapter = MeetingAdapter(db)
76 | # 设置当前会议ID
77 | processor.current_meeting_id = meeting_id
78 |
79 | try:
80 | logger.info(f"继续会议流程: meeting_id={meeting_id}")
81 |
82 | # 返回流式响应,直接使用处理器的_stream_discussion_process方法
83 | return StreamingResponse(
84 | processor._stream_discussion_process(meeting_id),
85 | media_type="text/event-stream"
86 | )
87 |
88 | except Exception as e:
89 | logger.error(f"继续流式讨论过程时出错: {str(e)}", exc_info=True)
90 |
91 | # 创建错误流
92 | async def error_stream():
93 | error_data = {
94 | "error": str(e),
95 | "detail": traceback.format_exc()
96 | }
97 | yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
98 | yield "data: [DONE]\n\n"
99 |
100 | return StreamingResponse(error_stream(), media_type="text/event-stream")
101 |
102 | @router.post("/{meeting_id}/human_input", response_model=Dict[str, Any])
103 | def submit_human_input(
104 | meeting_id: str,
105 | agent_name: str = Body(...),
106 | message: str = Body(...),
107 | db: Session = Depends(get_db)
108 | ):
109 | """提交人类角色的输入 - 直接使用discussion_processor处理"""
110 | # 使用DiscussionProcessor处理人类输入
111 | processor = DiscussionProcessor(db)
112 |
113 | try:
114 | # 设置当前会议ID
115 | processor.current_meeting_id = meeting_id
116 |
117 | # 如果需要兼容性,初始化adapter
118 | if not processor.adapter:
119 | processor.adapter = MeetingAdapter(db)
120 |
121 | # 使用processor的方法处理人类输入
122 | result = processor.process_human_input(meeting_id, agent_name, message)
123 | return result
124 | except ValueError as e:
125 | # 处理常见错误,如会议不存在或人类智能体不存在
126 | logger.error(f"处理人类输入失败: {str(e)}")
127 | raise HTTPException(status_code=404, detail=str(e))
128 | except Exception as e:
129 | # 处理其他错误
130 | logger.error(f"处理人类输入时出错: {str(e)}", exc_info=True)
131 | raise HTTPException(status_code=500, detail=f"处理人类输入失败: {str(e)}")
132 |
133 | @router.get("/{group_id}/info", response_model=Dict[str, Any])
134 | def get_discussion_group_info(
135 | group_id: int,
136 | db: Session = Depends(get_db)
137 | ):
138 | """获取讨论组信息"""
139 | adapter = MeetingAdapter(db)
140 |
141 | try:
142 | group = adapter._load_discussion_group(group_id)
143 | if not group:
144 | raise HTTPException(status_code=404, detail=f"讨论组ID {group_id} 不存在")
145 |
146 | return adapter._group_to_dict(group)
147 | except ValueError as e:
148 | raise HTTPException(status_code=404, detail=str(e))
149 | except Exception as e:
150 | logger.error(f"获取讨论组信息失败: {str(e)}")
151 | raise HTTPException(status_code=500, detail=f"获取讨论组信息失败: {str(e)}")
--------------------------------------------------------------------------------
/app/meeting/meeting/meeting.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import List, Dict, Any, Optional
3 | import uuid
4 | import time
5 |
6 | from agents.agent import Agent
7 | from meeting_modes.base_mode import BaseMeetingMode
8 |
9 | class Meeting:
10 | """会议主类,负责管理整个会议流程"""
11 |
12 | def __init__(
13 | self,
14 | topic: str,
15 | mode: BaseMeetingMode,
16 | agents: List[Agent],
17 | meeting_id: Optional[str] = None
18 | ):
19 | self.topic = topic
20 | self.mode = mode
21 | self.agents = agents
22 | self.meeting_id = meeting_id or str(uuid.uuid4())
23 | self.start_time = None
24 | self.end_time = None
25 | self.meeting_history = [] # 会议历史记录
26 | self.current_round = 0
27 | self.status = "未开始" # 状态:未开始、进行中、已结束
28 |
29 | def start(self):
30 | """开始会议"""
31 | if self.status != "未开始":
32 | raise ValueError(f"会议已经{self.status},无法再次开始")
33 |
34 | self.start_time = datetime.now()
35 | self.status = "进行中"
36 | self.current_round = 1
37 |
38 | # 记录会议开始的日志
39 | self._log_event("system", f"会议'{self.topic}'开始,模式:{self.mode.name}")
40 |
41 | # 记录参会智能体
42 | agent_names = [agent.name for agent in self.agents]
43 | self._log_event("system", f"参会智能体: {', '.join(agent_names)}")
44 |
45 | def conduct_round(self):
46 | """进行一轮会议"""
47 | if self.status != "进行中":
48 | raise ValueError(f"会议状态为{self.status},无法进行")
49 |
50 | # 确定发言顺序
51 | speaking_order = self.mode.determine_speaking_order(
52 | [{
53 | "name": agent.name,
54 | "role": agent.role_description
55 | } for agent in self.agents],
56 | self.current_round
57 | )
58 |
59 | # 每个智能体轮流发言
60 | for agent_name in speaking_order:
61 | # 获取当前智能体
62 | agent = next((a for a in self.agents if a.name == agent_name), None)
63 | if not agent:
64 | continue
65 |
66 | # 获取当前会议上下文
67 | current_context = self._get_current_context()
68 |
69 | # 获取特定会议模式的提示
70 | mode_specific_prompt = self.mode.get_agent_prompt(
71 | agent.name,
72 | agent.role_description,
73 | self.topic,
74 | self.current_round
75 | )
76 |
77 | # 记录开始思考的时间点
78 | self._log_event("system", f"{agent.name} 开始思考...", self.current_round)
79 |
80 | # 智能体发言
81 | response = agent.speak(
82 | meeting_topic=self.topic,
83 | meeting_mode=self.mode.name,
84 | current_context=current_context,
85 | mode_specific_prompt=mode_specific_prompt
86 | )
87 |
88 | # 记录发言到会议历史
89 | self._log_event(agent.name, response)
90 |
91 | # 更新所有其他智能体的会话历史
92 | for other_agent in self.agents:
93 | if other_agent.name != agent.name:
94 | other_agent.update_history(self.meeting_history[-1:])
95 |
96 | # 适当暂停,确保发言被处理
97 | time.sleep(10) # 暂停1秒
98 |
99 | # 更新轮次
100 | self.current_round += 1
101 |
102 | # 检查会议是否应该结束
103 | if self.mode.should_end_meeting(self.current_round - 1, self.meeting_history):
104 | self.end()
105 |
106 | def end(self):
107 | """结束会议"""
108 | if self.status != "进行中":
109 | return
110 |
111 | self.end_time = datetime.now()
112 | self.status = "已结束"
113 |
114 | # 生成会议总结
115 | summary = self.mode.summarize_meeting(self.topic, self.meeting_history)
116 | self._log_event("system", f"会议总结:\n{summary}")
117 |
118 | # 记录会议结束日志
119 | duration = self.end_time - self.start_time
120 | self._log_event("system", f"会议结束,持续时间: {duration}")
121 |
122 | def _log_event(self, agent: str, content: str, round_num=None):
123 | """记录会议事件"""
124 | # 如果没有指定轮次,则使用当前轮次
125 | if round_num is None:
126 | round_num = self.current_round
127 |
128 | self.meeting_history.append({
129 | "timestamp": datetime.now().isoformat(),
130 | "agent": agent,
131 | "content": content,
132 | "round": round_num
133 | })
134 |
135 | def _get_current_context(self) -> str:
136 | """获取当前会议上下文,用于提供给智能体"""
137 | # 获取最近10条记录或全部记录(如果少于10条)
138 | recent_history = self.meeting_history[-10:] if len(self.meeting_history) > 10 else self.meeting_history
139 |
140 | context = f"会议主题: {self.topic}\n当前轮次: {self.current_round}\n\n最近的讨论内容:\n"
141 | for entry in recent_history:
142 | context += f"[{entry['agent']}]: {entry['content']}\n\n"
143 |
144 | return context
145 |
146 | def to_dict(self) -> Dict[str, Any]:
147 | """将会议信息转换为字典,用于保存日志"""
148 | return {
149 | "meeting_id": self.meeting_id,
150 | "topic": self.topic,
151 | "mode": self.mode.name,
152 | "start_time": self.start_time.isoformat() if self.start_time else None,
153 | "end_time": self.end_time.isoformat() if self.end_time else None,
154 | "agents": [{"name": agent.name, "role": agent.role_description} for agent in self.agents],
155 | "history": self.meeting_history,
156 | "status": self.status
157 | }
158 |
159 | def get_current_context(self):
160 | """获取当前会议上下文的公共方法"""
161 | return self._get_current_context()
--------------------------------------------------------------------------------
/app/models/database.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import create_engine, Column, Integer, String, Boolean, Float, JSON, ForeignKey, Text, DateTime, Table
2 | from sqlalchemy.ext.declarative import declarative_base
3 | from sqlalchemy.orm import relationship, sessionmaker
4 | import os
5 | from datetime import datetime
6 |
7 | # Create the database engine
8 | DATABASE_URL = "sqlite:///./deepgemini.db"
9 | engine = create_engine(DATABASE_URL)
10 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
11 |
12 | Base = declarative_base()
13 |
14 | # 角色与讨论组的多对多关系表
15 | role_discussion_group = Table(
16 | 'role_discussion_group',
17 | Base.metadata,
18 | Column('role_id', Integer, ForeignKey('roles.id')),
19 | Column('discussion_group_id', Integer, ForeignKey('discussion_groups.id'))
20 | )
21 |
22 | class Model(Base):
23 | __tablename__ = "models"
24 |
25 | id = Column(Integer, primary_key=True, index=True)
26 | name = Column(String, unique=True, index=True)
27 | type = Column(String) # 'reasoning', 'execution', or 'both'
28 | provider = Column(String) # 'anthropic', 'google', etc.
29 | api_key = Column(String)
30 | api_url = Column(String)
31 | model_name = Column(String)
32 |
33 | # Default parameters
34 | temperature = Column(Float, default=0.7)
35 | top_p = Column(Float, default=1.0)
36 | max_tokens = Column(Integer, default=2000)
37 | presence_penalty = Column(Float, default=0.0)
38 | frequency_penalty = Column(Float, default=0.0)
39 |
40 | # Tool configuration
41 | enable_tools = Column(Boolean, server_default='0', nullable=False)
42 | tools = Column(JSON, nullable=True) # 存储工具配置的JSON
43 | tool_choice = Column(JSON, nullable=True) # 存储工具选择配置的JSON
44 |
45 | # Thinking configuration
46 | enable_thinking = Column(Boolean, server_default='0', nullable=False)
47 | thinking_budget_tokens = Column(Integer, server_default='16000', nullable=False)
48 |
49 | # 添加自定义参数字段,确保有默认值
50 | custom_parameters = Column(JSON, nullable=False, server_default='{}')
51 |
52 | # 添加与配置步骤的关系
53 | configuration_steps = relationship("ConfigurationStep", back_populates="model")
54 |
55 | # 添加关系
56 | roles = relationship("Role", back_populates="model")
57 |
58 | class Configuration(Base):
59 | __tablename__ = "configurations"
60 |
61 | id = Column(Integer, primary_key=True, index=True)
62 | name = Column(String, unique=True, index=True)
63 | is_active = Column(Boolean, default=True)
64 | transfer_content = Column(JSON, default=dict)
65 |
66 | # 保留步骤关系
67 | steps = relationship(
68 | "ConfigurationStep",
69 | back_populates="configuration",
70 | cascade="all, delete-orphan",
71 | order_by="ConfigurationStep.step_order"
72 | )
73 |
74 | class ConfigurationStep(Base):
75 | __tablename__ = "configuration_steps"
76 |
77 | id = Column(Integer, primary_key=True, index=True)
78 | configuration_id = Column(Integer, ForeignKey("configurations.id", ondelete="CASCADE"))
79 | model_id = Column(Integer, ForeignKey("models.id"))
80 | step_type = Column(String) # reasoning 或 execution
81 | step_order = Column(Integer) # 步骤顺序
82 | system_prompt = Column(String, default="")
83 |
84 | # 关系
85 | configuration = relationship("Configuration", back_populates="steps")
86 | model = relationship("Model", back_populates="configuration_steps")
87 |
88 | class Role(Base):
89 | """角色模型"""
90 | __tablename__ = 'roles'
91 |
92 | id = Column(Integer, primary_key=True, index=True)
93 | name = Column(String(100), nullable=False)
94 | description = Column(Text, nullable=True)
95 | model_id = Column(Integer, ForeignKey('models.id'), nullable=False)
96 | personality = Column(Text, nullable=True)
97 | skills = Column(JSON, nullable=True) # 存储技能列表
98 | parameters = Column(JSON, nullable=True) # 存储模型参数
99 | system_prompt = Column(Text, nullable=True) # 系统提示词
100 | created_at = Column(DateTime, default=datetime.now)
101 | updated_at = Column(DateTime, nullable=True)
102 | is_human = Column(Boolean, default=False) # 是否为人类角色
103 | host_role_id = Column(Integer, ForeignKey('roles.id'), nullable=True) # 人类角色寄生的agent角色ID
104 |
105 | # 关系
106 | model = relationship("Model", back_populates="roles")
107 | discussion_groups = relationship("DiscussionGroup",
108 | secondary=role_discussion_group,
109 | back_populates="roles")
110 | host_role = relationship("Role", remote_side=[id], foreign_keys=[host_role_id])
111 |
112 | class DiscussionGroup(Base):
113 | """讨论组模型"""
114 | __tablename__ = 'discussion_groups'
115 |
116 | id = Column(Integer, primary_key=True, index=True)
117 | name = Column(String(100), nullable=False)
118 | topic = Column(String, nullable=True)
119 | description = Column(Text, nullable=True)
120 | mode = Column(String(50), nullable=False, default="discussion") # 会议模式
121 | max_rounds = Column(Integer, default=3) # 最大轮数
122 | summary_model_id = Column(Integer, ForeignKey('models.id'), nullable=True) # 总结使用的模型
123 | summary_prompt = Column(Text, nullable=True) # 自定义总结提示模板
124 | custom_speaking_order = Column(JSON, nullable=True) # 自定义发言顺序
125 | created_at = Column(DateTime, default=datetime.now)
126 | updated_at = Column(DateTime, nullable=True)
127 |
128 | # 关系
129 | roles = relationship("Role",
130 | secondary=role_discussion_group,
131 | back_populates="discussion_groups")
132 | summary_model = relationship("Model", foreign_keys=[summary_model_id])
133 |
134 | # Create all tables
135 | def init_db():
136 | Base.metadata.create_all(bind=engine)
137 |
138 | # Dependency to get database session
139 | def get_db():
140 | db = SessionLocal()
141 | try:
142 | yield db
143 | finally:
144 | db.close()
--------------------------------------------------------------------------------
/app/utils/auth.py:
--------------------------------------------------------------------------------
1 | from fastapi import HTTPException, Header, Depends, status
2 | from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials, APIKeyHeader
3 | from jwt import InvalidTokenError, encode, decode # 从 PyJWT 导入
4 | from passlib.context import CryptContext
5 | from datetime import datetime, timedelta
6 | import os
7 | from dotenv import load_dotenv
8 | from app.utils.logger import logger
9 | from typing import Optional
10 | import json
11 | import secrets
12 | import string
13 | import random
14 |
15 | # 加载 .env 文件
16 | logger.info(f"当前工作目录: {os.getcwd()}")
17 | logger.info("尝试加载.env文件...")
18 |
19 | # 检查.env文件是否存在,不存在则创建
20 | env_path = '.env'
21 | if not os.path.exists(env_path):
22 | # 生成随机JWT密钥
23 | jwt_secret = secrets.token_hex(32)
24 |
25 | # 生成随机API密钥
26 | api_key = 'sk-api-' + ''.join(secrets.choice(string.ascii_lowercase + string.digits) for _ in range(32))
27 | api_key_id = 1
28 |
29 | logger.info("未找到.env文件,正在创建默认配置...")
30 | with open(env_path, 'w', encoding='utf-8') as f:
31 | f.write(f"JWT_SECRET={jwt_secret}\n")
32 | f.write("ADMIN_USERNAME=admin\n")
33 | f.write("ADMIN_PASSWORD=admin123\n")
34 | f.write(f"ALLOW_API_KEY=[{{\"id\": {api_key_id},\"key\":\"{api_key}\",\"description\":\"默认API密钥\"}}]\n")
35 | logger.info("已创建默认.env文件")
36 | logger.info(f"已生成随机JWT密钥: {jwt_secret[:8]}...")
37 | logger.info(f"已生成随机API密钥: {api_key[:8]}...")
38 |
39 | load_dotenv(override=True) # 添加override=True强制覆盖已存在的环境变量
40 |
41 | # 打印环境变量中的用户名和密码,用于调试
42 | admin_username = os.getenv("ADMIN_USERNAME", "未设置")
43 | admin_password = os.getenv("ADMIN_PASSWORD", "未设置")
44 | logger.info(f"当前管理员用户名: {admin_username}")
45 | logger.info(f"当前管理员密码: {admin_password}")
46 |
47 | # 获取环境变量
48 | try:
49 | api_keys_json = os.getenv('ALLOW_API_KEY', '[]')
50 | # 移除可能的多余空格和换行符
51 | api_keys_json = api_keys_json.strip()
52 | api_keys_data = json.loads(api_keys_json)
53 | # 确保 api_keys_data 是列表
54 | if isinstance(api_keys_data, list):
55 | # 确保我们只使用有效的密钥
56 | ALLOW_API_KEYS = [key_data["key"] for key_data in api_keys_data
57 | if isinstance(key_data, dict) and "key" in key_data]
58 | else:
59 | logger.error("API 密钥数据格式错误,应为 JSON 数组")
60 | ALLOW_API_KEYS = []
61 | except json.JSONDecodeError:
62 | logger.warning("无法解析 API 密钥 JSON,使用空列表初始化")
63 | ALLOW_API_KEYS = []
64 | except Exception as e:
65 | logger.error(f"加载 API 密钥时出错: {str(e)}")
66 | ALLOW_API_KEYS = []
67 |
68 | logger.info(f"已加载 {len(ALLOW_API_KEYS)} 个 API 密钥")
69 |
70 | if not ALLOW_API_KEYS:
71 | logger.warning("没有设置有效的 API 密钥,系统可能无法正常工作")
72 |
73 | # 打印API密钥的前4位用于调试
74 | for key in ALLOW_API_KEYS:
75 | logger.info(f"已加载 API 密钥,前缀为: {key[:8] if len(key) >= 8 else key}")
76 |
77 | # 安全配置
78 | SECRET_KEY = os.getenv("JWT_SECRET", "your-secret-key")
79 | ALGORITHM = "HS256"
80 | ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 # 24 小时
81 |
82 | pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
83 | security = HTTPBearer()
84 |
85 | # 定义 API 密钥头部
86 | api_key_header = APIKeyHeader(name="Authorization", auto_error=False)
87 |
88 | def verify_password(plain_password, hashed_password):
89 | return pwd_context.verify(plain_password, hashed_password)
90 |
91 | def get_password_hash(password):
92 | return pwd_context.hash(password)
93 |
94 | def create_access_token(data: dict):
95 | to_encode = data.copy()
96 | expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
97 | to_encode.update({"exp": expire})
98 | encoded_jwt = encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
99 | return encoded_jwt
100 |
101 | def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
102 | try:
103 | payload = decode(credentials.credentials, SECRET_KEY, algorithms=[ALGORITHM])
104 | username: str = payload.get("sub")
105 | if username is None:
106 | raise HTTPException(status_code=401, detail="Invalid authentication credentials")
107 | return username
108 | except InvalidTokenError:
109 | raise HTTPException(status_code=401, detail="Invalid authentication credentials")
110 |
111 | def update_admin_credentials(username: str, password: str):
112 | """更新 .env 文件中的管理员凭据"""
113 | env_path = '.env'
114 | with open(env_path, 'r', encoding='utf-8') as file:
115 | lines = file.readlines()
116 |
117 | with open(env_path, 'w', encoding='utf-8') as file:
118 | for line in lines:
119 | if line.startswith('ADMIN_USERNAME='):
120 | file.write(f'ADMIN_USERNAME={username}\n')
121 | elif line.startswith('ADMIN_PASSWORD='):
122 | file.write(f'ADMIN_PASSWORD={password}\n')
123 | else:
124 | file.write(line)
125 |
126 | # 重新加载环境变量
127 | load_dotenv(override=True)
128 | return {"message": "Credentials updated successfully"}
129 |
130 | def get_api_key_header(api_key_header: str = Depends(api_key_header)):
131 | if not api_key_header:
132 | raise HTTPException(
133 | status_code=status.HTTP_401_UNAUTHORIZED,
134 | detail="Missing API key",
135 | headers={"WWW-Authenticate": "Bearer"},
136 | )
137 |
138 | # 移除 "Bearer " 前缀(如果有)
139 | if api_key_header.startswith("Bearer "):
140 | api_key_header = api_key_header[7:]
141 |
142 | # 需要返回处理后的API密钥
143 | return api_key_header
144 |
145 | def verify_api_key(api_key: str = Depends(get_api_key_header)):
146 | """验证 API 密钥"""
147 | # 每次都从环境变量获取最新的 API 密钥列表
148 | try:
149 | api_keys_json = os.getenv('ALLOW_API_KEY', '[]')
150 | api_keys_data = json.loads(api_keys_json)
151 | # 确保我们只使用有效的密钥
152 | available_keys = [key_data["key"] for key_data in api_keys_data
153 | if isinstance(key_data, dict) and "key" in key_data]
154 |
155 | logger.debug(f"正在验证 API 密钥: {api_key[:8] if len(api_key) >= 8 else api_key}...")
156 | if available_keys:
157 | logger.debug(f"可用的 API 密钥: {[k[:8] if len(k) >= 8 else k for k in available_keys]}")
158 | else:
159 | logger.warning("没有可用的API密钥配置")
160 |
161 | if api_key not in available_keys:
162 | logger.warning(f"无效的API密钥: {api_key[:8] if len(api_key) >= 8 else api_key}...")
163 | raise HTTPException(
164 | status_code=401,
165 | detail="Invalid API key",
166 | headers={"WWW-Authenticate": "Bearer"},
167 | )
168 | return api_key
169 | except json.JSONDecodeError as e:
170 | logger.error(f"解析 API 密钥 JSON 时出错: {e}")
171 | raise HTTPException(
172 | status_code=500,
173 | detail="Internal server error: Invalid API key format",
174 | )
175 | except Exception as e:
176 | logger.error(f"验证 API 密钥时发生未知错误: {e}")
177 | raise HTTPException(
178 | status_code=500,
179 | detail="Internal server error",
180 | )
181 |
--------------------------------------------------------------------------------
/app/clients/deepseek_client.py:
--------------------------------------------------------------------------------
1 | """DeepSeek API 客户端"""
2 | import json
3 | from typing import AsyncGenerator
4 | from app.utils.logger import logger
5 | from .base_client import BaseClient
6 |
7 |
8 | class DeepSeekClient(BaseClient):
9 | def __init__(self, api_key: str, api_url: str, provider: str = "deepseek", is_origin_reasoning: bool = True):
10 | """初始化 DeepSeek 客户端
11 |
12 | Args:
13 | api_key: API密钥
14 | api_url: API地址
15 | provider: 提供商名称
16 | is_origin_reasoning: 是否为推理模型客户端
17 | """
18 | super().__init__(api_key, api_url)
19 | self.provider = provider
20 | self.is_origin_reasoning = is_origin_reasoning # 保存标志
21 |
22 | def _process_think_tag_content(self, content: str) -> tuple[bool, str]:
23 | """处理包含 think 标签的内容
24 |
25 | Args:
26 | content: 需要处理的内容字符串
27 |
28 | Returns:
29 | tuple[bool, str]:
30 | bool: 是否检测到完整的 think 标签对
31 | str: 处理后的内容
32 | """
33 | has_start = "" in content
34 | has_end = "" in content
35 |
36 | if has_start and has_end:
37 | return True, content
38 | elif has_start:
39 | return False, content
40 | elif not has_start and not has_end:
41 | return False, content
42 | else:
43 | return True, content
44 |
45 | async def stream_chat(
46 | self,
47 | messages: list,
48 | model: str = "deepseek-ai/DeepSeek-R1",
49 | **kwargs
50 | ) -> AsyncGenerator[tuple[str, str], None]:
51 | """流式对话"""
52 | headers = {
53 | "Authorization": f"Bearer {self.api_key}",
54 | "Content-Type": "application/json"
55 | }
56 |
57 | # 准备请求数据
58 | data = self._prepare_request_data(messages, model, **kwargs)
59 |
60 | logger.debug(f"DeepSeek 请求数据:{data}")
61 |
62 | first_chunk = True
63 | reasoning_completed = False # 添加标志来追踪推理是否完成
64 | think_content_buffer = "" # 添加缓冲区来收集 think 标签内的内容
65 | in_think_tag = False # 添加标志来追踪是否在 think 标签内
66 |
67 | async for chunk in self._make_request(headers, data):
68 | chunk_str = chunk.decode('utf-8')
69 |
70 | try:
71 | lines = chunk_str.splitlines()
72 | for line in lines:
73 | if line.startswith("data: "):
74 | json_str = line[len("data: "):]
75 | if json_str == "[DONE]":
76 | return
77 |
78 | data = json.loads(json_str)
79 | if data and data.get("choices") and data["choices"][0].get("delta"):
80 | delta = data["choices"][0]["delta"]
81 | if self.is_origin_reasoning:
82 | # 处理推理模型的输出
83 | if delta.get("reasoning_content"):
84 | content = delta["reasoning_content"]
85 | logger.debug(f"提取推理内容:{content}")
86 | yield "reasoning", content
87 | # 处理 content 中的 think 标签内容
88 | elif delta.get("content"):
89 | content = delta["content"]
90 | think_content_buffer += content
91 |
92 | if "" in content and not in_think_tag:
93 | # 开始收集推理内容
94 | logger.debug(f"检测到推理开始标记:{content}")
95 | in_think_tag = True
96 | # 提取 标签后的内容
97 | after_start_think = content.split("")[1]
98 | if after_start_think.strip():
99 | logger.debug(f"提取 think 标签后的推理内容")
100 | yield "reasoning", after_start_think
101 |
102 | elif in_think_tag and "" not in content:
103 | # 在 think 标签内的内容
104 | logger.debug(f"提取 think 标签内的推理内容")
105 | yield "reasoning", content
106 |
107 | elif in_think_tag and "" in content:
108 | # 推理内容结束
109 | logger.debug(f"检测到推理结束标记:{content}")
110 | # 提取 标签前的内容
111 | before_end_think = content.split("")[0]
112 | if before_end_think.strip():
113 | logger.debug(f"提取 think 结束标签前的推理内容")
114 | yield "reasoning", before_end_think
115 | in_think_tag = False
116 | think_content_buffer = ""
117 |
118 | elif not kwargs.get("is_last_step"):
119 | return
120 | else:
121 | # 处理执行模型的输出
122 | if delta.get("content"):
123 | content = delta["content"]
124 | if content.strip(): # 只处理非空内容
125 | if first_chunk and delta.get("role"):
126 | # 第一个块可能包含角色信息
127 | first_chunk = False
128 | if content.strip():
129 | logger.debug(f"执行模型首个响应:{content}")
130 | yield "answer", content
131 | else:
132 | logger.debug(f"执行模型响应:{content}")
133 | yield "answer", content
134 | elif delta.get("role") and first_chunk:
135 | # 处理第一个只包含角色信息的块
136 | first_chunk = False
137 | logger.debug("处理执行模型角色信息")
138 |
139 | except json.JSONDecodeError as e:
140 | logger.error(f"JSON解析错误: {e}")
141 | continue
142 | except Exception as e:
143 | logger.error(f"处理块数据时发生错误: {e}")
144 | continue
145 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
DeepGemini 🌟
3 |
A Flexible Multi-Model Orchestration API with OpenAI Compatibility
4 |
5 | [](https://fastapi.tiangolo.com)
6 | [](https://www.python.org)
7 | [](https://platform.openai.com)
8 | [](LICENSE)
9 | [](https://deepwiki.com/sligter/DeepGemini)
10 |
11 |
12 | [中文](README_zh.md) | [English](#features)
13 |
14 | ## ✨ Features
15 |
16 | - **Multi-Model Orchestration**: Seamlessly combine multiple AI models in customizable workflows
17 | - **Role Management**: Create AI roles with different personalities and skills
18 | - **Discussion Groups**: Combine multiple roles to form discussion groups
19 | - **Multiple Discussion Modes**:
20 | - General Discussion
21 | - Brainstorming
22 | - Debate
23 | - Role-playing
24 | - SWOT Analysis
25 | - Six Thinking Hats
26 | - **Provider Flexibility**: Support for multiple AI providers:
27 | - DeepSeek
28 | - Claude
29 | - Gemini
30 | - Grok3
31 | - OpenAI
32 | - OneAPI
33 | - OpenRouter
34 | - Siliconflow
35 | - **OpenAI Compatible**: Drop-in replacement for OpenAI's API in existing applications
36 | - **Stream Support**: Real-time streaming responses for better user experience
37 | - **Advanced Configuration**: Fine-grained control over model parameters and system prompts
38 | - **Database Integration**: SQLite-based configuration storage with Alembic migrations
39 | - **Web Management UI**: Built-in interface for managing models and configurations
40 | - **Multi-language Support**: English and Chinese interface
41 | - **Human Interaction**: Supports human participation in AI discussions
42 | - **Chat Interface**: Supports online conversations with models, roles, relay chains, and discussion groups
43 | - **Flexible Deployment**: Easy deployment with Docker or local installation
44 |
45 | ## Preview
46 |
47 | 
48 |
49 | 
50 |
51 | 
52 |
53 | 
54 |
55 | 
56 |
57 | 
58 |
59 |
60 |
61 | ## 🚀 Quick Start
62 |
63 | ### 1. Installation
64 |
65 | ```bash
66 | git clone https://github.com/sligter/DeepGemini.git
67 | cd DeepGemini
68 | uv sync
69 | ```
70 | ### 2. Configuration
71 |
72 | ```bash
73 | cp .env.example .env
74 | ```
75 |
76 |
77 | Required environment variables:
78 | - `ALLOW_API_KEY`: Your API access key
79 | - `ALLOW_ORIGINS`: Allowed CORS origins (comma-separated or "*")
80 |
81 | ### 3. Run the Application
82 |
83 | ```bash
84 | uv run uvicorn app.main:app --host 0.0.0.0 --port 8000
85 | ```
86 |
87 | Visit `http://localhost:8000/dashboard` to access the web management interface.
88 |
89 |
90 |
91 | ## 🐳 Docker Deployment
92 |
93 | ### Using Docker Compose (Recommended)
94 |
95 | 1. Create and configure your `.env` file:
96 |
97 | ```bash
98 | cp .env.example .env
99 | touch deepgemini.db
100 | echo "" > deepgemini.db
101 | ```
102 |
103 | 2. Build and start the container:
104 |
105 | ```bash
106 | docker-compose up -d
107 | ```
108 |
109 | 3. Access the web interface at `http://localhost:8000/dashboard`
110 |
111 | ### Using Docker Directly
112 |
113 | 1. Pull the image:
114 | ```bash
115 | docker pull bradleylzh/deepgemini:latest
116 | ```
117 |
118 | 2. Create necessary files:
119 |
120 | For Linux/Mac:
121 | ```bash
122 | # Create .env file
123 | cp .env.example .env
124 | touch deepgemini.db
125 | ```
126 |
127 | For Windows PowerShell:
128 | ```powershell
129 | # Create .env file
130 | cp .env.example .env
131 | python -c "import sqlite3; sqlite3.connect('deepgemini.db').close()"
132 | ```
133 |
134 | 3. Run the container:
135 |
136 | For Linux/Mac:
137 | ```bash
138 | docker run -d \
139 | -p 8000:8000 \
140 | -v $(pwd)/.env:/app/.env \
141 | -v $(pwd)/deepgemini.db:/app/deepgemini.db \
142 | --name deepgemini \
143 | bradleylzh/deepgemini:latest
144 | ```
145 |
146 | For Windows PowerShell:
147 | ```powershell
148 | docker run -d -p 8000:8000 `
149 | -v ${PWD}\.env:/app/.env `
150 | -v ${PWD}\deepgemini.db:/app/deepgemini.db `
151 | --name deepgemini `
152 | bradleylzh/deepgemini:latest
153 | ```
154 |
155 |
156 | ## 🔧 Model Configuration
157 |
158 | DeepGemini supports various AI providers:
159 |
160 | - **DeepSeek**: Advanced reasoning capabilities
161 | - **Claude**: Refined text generation and thinking
162 | - **Gemini**: Google's AI model
163 | - **Grok3**: Grok's AI model
164 | - **Custom**: Add your own provider integration
165 |
166 | Each model can be configured with:
167 | - API credentials
168 | - Model parameters (temperature, top_p, tool, etc.)
169 | - System prompts
170 | - Usage type (reasoning/execution/both)
171 |
172 | ## 🔄 Relay Chain Configuration
173 |
174 | Create custom Relay Chain by combining models:
175 |
176 | 1. **Reasoning Step**: Initial analysis and planning
177 | 2. **Execution Step**: Final response generation
178 | 3. **Custom Steps**: Add multiple steps as needed
179 |
180 | ## 👥 Multi-Role Discussion
181 | **Role Management**: Create AI roles with different personalities and skills
182 | - **Discussion Groups**: Combine multiple roles to form discussion groups
183 | - **Multiple Discussion Modes**:
184 | - General Discussion
185 | - Brainstorming
186 | - Debate
187 | - Role-playing
188 | - SWOT Analysis
189 | - Six Thinking Hats
190 | - **Human Participation**: Allow humans to join AI discussions and contribute
191 |
192 | ## 🔍 API Compatibility
193 | DeepGemini provides a compatible API interface that allows it to serve as a drop-in replacement for OpenAI's API:
194 |
195 | - **/v1/chat/completions**: Compatible with OpenAI chat completion endpoint
196 | - **/v1/models**: Lists all available models in OpenAI-compatible format
197 | - Support for streaming responses, tools, and other OpenAI API features
198 |
199 | ## 🛠 Tech Stack
200 |
201 | - [FastAPI](https://fastapi.tiangolo.com/): Modern web framework
202 | - [SQLAlchemy](https://www.sqlalchemy.org/): Database ORM
203 | - [Alembic](https://alembic.sqlalchemy.org/): Database migrations
204 | - [UV](https://github.com/astral-sh/uv): Fast Python package installer
205 | - [aiohttp](https://docs.aiohttp.org/): Async HTTP client
206 | - [deepclaude](https://github.com/getasterisk/deepclaude)
207 |
208 | ## ✨ Acknowledgements
209 |
210 | [](https://dartnode.com "Powered by DartNode - Free VPS for Open Source")
211 |
212 | ## 📝 License
213 |
214 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
215 |
216 | ## 🤝 Contributing
217 |
218 | Contributions are welcome! Please feel free to submit a Pull Request.
219 |
220 | ## 📬 Contact
221 |
222 | For questions and support, please open an issue on GitHub.
223 |
--------------------------------------------------------------------------------
/app/clients/grok3_client.py:
--------------------------------------------------------------------------------
1 | """Grok3 API 客户端"""
2 | import json
3 | from typing import AsyncGenerator
4 | from app.utils.logger import logger
5 | from .base_client import BaseClient
6 |
7 |
8 | class Grok3Client(BaseClient):
9 | def __init__(self, api_key: str, api_url: str, is_origin_reasoning: bool = True):
10 | """初始化 Grok3 客户端
11 |
12 | Args:
13 | api_key: API密钥
14 | api_url: API地址
15 | is_origin_reasoning: 是否为推理模型
16 | """
17 | super().__init__(api_key, api_url)
18 | self.provider = "grok3"
19 | self._current_line = "" # 初始化行缓存
20 | self.is_origin_reasoning = is_origin_reasoning
21 |
22 | async def stream_chat(
23 | self,
24 | messages: list,
25 | model: str = "grok3-reasoner",
26 | stream: bool = True,
27 | **kwargs
28 | ) -> AsyncGenerator[tuple[str, str], None]:
29 | """流式或非流式对话
30 |
31 | Args:
32 | messages: 消息列表
33 | model: 模型名称
34 | stream: 是否使用流式输出
35 | **kwargs: 其他参数
36 | is_last_step: 是否为最后一步
37 | is_first_step: 是否为第一步
38 |
39 | Yields:
40 | tuple[str, str]: (内容类型, 内容)
41 | 内容类型: "reasoning" 或 "content"
42 | 内容: 实际的文本内容
43 | """
44 | headers = {
45 | "Authorization": f"Bearer {self.api_key}",
46 | "Content-Type": "application/json",
47 | "Accept": "text/event-stream" if stream else "application/json",
48 | }
49 |
50 | # 准备请求数据
51 | data = self._prepare_request_data(messages, model, stream=stream, **kwargs)
52 |
53 | if stream:
54 | # 重置行缓存
55 | self._current_line = ""
56 | reasoning_completed = False # 追踪推理是否完成
57 |
58 | async for chunk in self._make_request(headers, data):
59 | try:
60 | chunk_str = chunk.decode('utf-8')
61 | if not chunk_str.strip():
62 | continue
63 |
64 | for line in chunk_str.split('\n'):
65 | if line.startswith('data: '):
66 | json_str = line[6:]
67 | if json_str.strip() == '[DONE]':
68 | # 处理最后可能剩余的内容
69 | if self._current_line.strip():
70 | last_line = self._current_line.strip()
71 | if last_line.startswith('>'):
72 | if self.is_origin_reasoning:
73 | yield "reasoning", last_line[1:].strip()
74 | else:
75 | if not self.is_origin_reasoning or kwargs.get("is_last_step"):
76 | yield "content", last_line
77 | return
78 |
79 | data = json.loads(json_str)
80 | content = data.get('choices', [{}])[0].get('delta', {}).get('content', '')
81 |
82 | if content:
83 | # 追加到当前行
84 | self._current_line += content
85 |
86 | # 处理完整行
87 | while "\n" in self._current_line:
88 | line, self._current_line = self._current_line.split("\n", 1)
89 | line = line.strip()
90 | # 跳过分隔符行
91 | if line == "---":
92 | continue
93 | if line: # 忽略空行
94 | if line.startswith(">"):
95 | if self.is_origin_reasoning:
96 | yield "reasoning", "\n"+line[1:].strip()
97 | else:
98 | # 检测推理内容是否结束
99 | if not reasoning_completed:
100 | reasoning_completed = True
101 | # 如果不是最后一步,直接结束流
102 | if not kwargs.get("is_last_step"):
103 | return
104 | # 如果是最后一步或非推理模型,继续处理 content
105 | if not self.is_origin_reasoning or kwargs.get("is_last_step"):
106 | yield "content", "\n"+line
107 |
108 | # 如果是结束标记,处理最后一行
109 | if data.get("finish_reason") == "stop":
110 | last_line = self._current_line.strip()
111 | if last_line:
112 | if last_line.startswith(">"):
113 | if self.is_origin_reasoning:
114 | yield "reasoning", "\n"+last_line[1:].strip()
115 | else:
116 | if not self.is_origin_reasoning or kwargs.get("is_last_step"):
117 | yield "content", "\n"+last_line
118 | self._current_line = ""
119 |
120 | except json.JSONDecodeError:
121 | continue
122 | except Exception as e:
123 | logger.error(f"处理 Grok3 流式响应时发生错误: {e}")
124 | continue
125 | else:
126 | # 非流式输出处理
127 | async for chunk in self._make_request(headers, data):
128 | try:
129 | response = json.loads(chunk.decode('utf-8'))
130 | content = response.get('choices', [{}])[0].get('message', {}).get('content', '')
131 |
132 | if content:
133 | reasoning_completed = False
134 | # 按行处理内容
135 | for line in content.split('\n'):
136 | line = line.strip()
137 | if line:
138 | if line.startswith('>'):
139 | if self.is_origin_reasoning:
140 | yield "reasoning", "\n"+line[1:].strip()
141 | else:
142 | # 检测推理内容是否结束
143 | if not reasoning_completed:
144 | reasoning_completed = True
145 | # 如果不是最后一步,直接结束流
146 | if not kwargs.get("is_last_step"):
147 | return
148 | # 如果是最后一步或非推理模型,继续处理 content
149 | if not self.is_origin_reasoning or kwargs.get("is_last_step"):
150 | yield "content", "\n"+line
151 |
152 | except json.JSONDecodeError:
153 | continue
154 | except Exception as e:
155 | logger.error(f"处理 Grok3 非流式响应时发生错误: {e}")
156 | continue
--------------------------------------------------------------------------------
/app/meeting/agents/human_agent.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional, Any, AsyncGenerator
2 | import logging
3 | import asyncio
4 | import time
5 | from app.meeting.agents.agent import Agent
6 | from datetime import datetime
7 |
8 | # 设置日志
9 | logger = logging.getLogger(__name__)
10 |
11 | class HumanAgent(Agent):
12 | """人类智能体,表示会议中的人类参与者"""
13 |
14 | def __init__(self, name: str, role_description: str, personality: Optional[str] = None, skills: Optional[List[str]] = None,
15 | model_params: Dict[str, Any] = None, base_url: str = None, api_key: str = None):
16 | """
17 | 初始化人类智能体
18 |
19 | 参数:
20 | name: 人类角色名称
21 | role_description: 人类角色描述
22 | personality: 人格特点描述
23 | skills: 技能列表
24 | model_params: 模型参数
25 | base_url: 基础URL
26 | api_key: API密钥
27 | """
28 | super().__init__(name, role_description, personality, skills, model_params, base_url, api_key)
29 | self.is_human = True
30 | self.human_responses = {} # 存储人类的响应,格式为 {round_id: response}
31 | self.current_round = 0
32 | self.conversation_history = [] # 确保有对话历史属性
33 | self.pending_response = None # 存储等待中的人类响应
34 | self.is_waiting_response = False # 标记是否正在等待响应
35 | self.is_interrupted = False # 标记是否被打断
36 | self.host_role_name = None # 寄生的角色名称
37 | self.response_queue = asyncio.Queue()
38 | self.stream_lock = asyncio.Lock()
39 | self.is_waiting_input = False # 是否等待人类输入
40 | self.pending_message = None # 等待发送的消息
41 | self.input_timeout = 600 # 默认等待人类输入的超时时间(秒)
42 | self.input_start_time = None # 记录开始等待人类输入的时间
43 |
44 | def wait_for_input(self):
45 | """设置为等待人类输入状态,并记录开始时间"""
46 | self.is_waiting_input = True
47 | self.input_start_time = time.time()
48 | logger.info(f"人类智能体 {self.name} 等待输入")
49 | # 通知系统会议暂停,等待人类输入
50 | return True
51 |
52 | def is_waiting_for_input(self):
53 | """检查是否正在等待人类输入"""
54 | # 返回实际的等待状态值
55 | return self.is_waiting_input
56 |
57 | def add_message(self, content: str):
58 | """添加人类消息"""
59 | self.pending_message = content
60 | # 将消息存入human_responses字典,以当前轮次为键
61 | self.human_responses[self.current_round] = content
62 | # 重置等待状态
63 | self.is_waiting_input = False
64 | self.input_start_time = None
65 | # 记录为最后响应
66 | self.last_response = content
67 | logger.info(f"人类智能体 {self.name} 收到消息: {content[:50]}..., 当前轮次: {self.current_round}")
68 | return True
69 |
70 | def get_input_wait_duration(self):
71 | """获取已等待人类输入的时间(秒)"""
72 | if not self.is_waiting_input or not self.input_start_time:
73 | return 0
74 | return time.time() - self.input_start_time
75 |
76 | def has_input_timeout(self):
77 | """检查是否已超过等待人类输入的超时时间"""
78 | wait_duration = self.get_input_wait_duration()
79 | return wait_duration > self.input_timeout if wait_duration > 0 else False
80 |
81 | def generate_response(self, prompt: str, context: List[Dict[str, Any]] = None) -> str:
82 | """
83 | 生成响应 - 对于人类智能体,返回预先输入的消息或等待输入
84 |
85 | 参数:
86 | prompt: 提示词
87 | context: 上下文消息历史
88 |
89 | 返回:
90 | 消息内容或等待人类输入的提示
91 | """
92 | # 如果有待发送的消息,返回它
93 | if self.pending_message:
94 | message = self.pending_message
95 | self.pending_message = None
96 | return message
97 |
98 | # 否则设置为等待输入状态并暂停会议
99 | self.wait_for_input()
100 | # 特殊标志,表示需要等待人类输入
101 | return f"[WAITING_FOR_HUMAN_INPUT:{self.name}]"
102 |
103 | async def generate_response_stream(self, prompt: str, context: List[Dict[str, Any]] = None):
104 | """流式生成响应 - 对于人类智能体,返回等待人类输入的特殊标记"""
105 | # 检查是否已有待处理的消息
106 | if self.pending_message:
107 | message = self.pending_message
108 | self.pending_message = None
109 | yield message
110 | return
111 |
112 | # 设置等待人类输入状态
113 | self.wait_for_input()
114 | # 返回特殊标记,表示需要等待人类输入
115 | yield f"[WAITING_FOR_HUMAN_INPUT:{self.name}]"
116 |
117 | async def add_response_chunk(self, chunk: str) -> None:
118 | """添加响应块到流中"""
119 | await self.response_queue.put(chunk)
120 |
121 | async def finish_response(self) -> None:
122 | """标记响应完成"""
123 | await self.response_queue.put("[END]")
124 | self.is_waiting_response = False
125 |
126 | def set_human_response(self, response: str) -> None:
127 | """设置人类用户的完整响应"""
128 | logger.info(f"收到人类角色 {self.name} 的响应: {response[:50]}...")
129 | self.pending_response = response
130 | self.last_response = response
131 | self.is_waiting_response = False
132 | self.is_waiting_input = False
133 | self.input_start_time = None
134 |
135 | async def interrupt(self, message: str) -> None:
136 | """中断当前会议,插入人类消息"""
137 | async with self.stream_lock:
138 | logger.info(f"人类角色 {self.name} 打断会议: {message[:50]}...")
139 | self.pending_response = message
140 | self.is_interrupted = True
141 | self.is_waiting_response = False
142 | self.is_waiting_input = False
143 | self.input_start_time = None
144 | # 清空现有队列
145 | while not self.response_queue.empty():
146 | try:
147 | self.response_queue.get_nowait()
148 | except asyncio.QueueEmpty:
149 | break
150 | # 添加中断消息
151 | await self.add_response_chunk(message)
152 | await self.finish_response()
153 |
154 | def clear_interrupt(self) -> None:
155 | """清除中断状态"""
156 | self.is_interrupted = False
157 |
158 | def is_interrupting(self) -> bool:
159 | """检查是否正在进行中断"""
160 | return self.is_interrupted
161 |
162 | def get_current_round(self) -> int:
163 | """获取当前轮次"""
164 | return self.current_round
165 |
166 | def set_current_round(self, round_id: int) -> None:
167 | """设置当前轮次"""
168 | self.current_round = round_id
169 |
170 | def response(self, meeting_id: str, round_id: int, context: str) -> str:
171 | """返回人类参与者的响应,如果没有输入则暂停会议"""
172 | try:
173 | # 更新当前轮次
174 | self.current_round = round_id
175 |
176 | # 如果已经有人类的回应,直接返回
177 | if round_id in self.human_responses and self.human_responses[round_id]:
178 | response = self.human_responses[round_id]
179 |
180 | # 更新对话历史
181 | if hasattr(self, 'conversation_history'):
182 | self.conversation_history.append({"role": "user", "content": context})
183 | self.conversation_history.append({"role": "assistant", "content": response})
184 |
185 | return response
186 |
187 | # 没有回应,设置等待状态
188 | self.wait_for_input()
189 |
190 | # 返回特殊格式的等待消息
191 | return f"[WAITING_FOR_HUMAN_INPUT:{self.name}]"
192 | except Exception as e:
193 | logger.error(f"人类智能体响应出错: {str(e)}", exc_info=True)
194 | return f"[错误] 无法获取 {self.name} 的响应。错误: {str(e)}"
195 |
196 | def to_dict(self) -> Dict[str, Any]:
197 | """转换为字典表示"""
198 | return {
199 | "name": self.name,
200 | "role_description": self.role_description,
201 | "personality": self.personality,
202 | "skills": self.skills,
203 | "is_human": True,
204 | "is_waiting_input": self.is_waiting_input,
205 | "input_wait_duration": self.get_input_wait_duration() if self.is_waiting_input else 0
206 | }
--------------------------------------------------------------------------------
/app/clients/claude_client.py:
--------------------------------------------------------------------------------
1 | """Claude API 客户端"""
2 | import json
3 | from typing import AsyncGenerator, Optional, List, Dict
4 | from app.utils.logger import logger
5 | from .base_client import BaseClient
6 |
7 |
8 | class ClaudeClient(BaseClient):
9 | def __init__(self, api_key: str, api_url: str = "https://api.anthropic.com/v1/messages", provider: str = "anthropic", is_origin_reasoning: bool = False):
10 | """初始化 Claude 客户端
11 |
12 | Args:
13 | api_key: Claude API密钥
14 | api_url: Claude API地址
15 | provider: API提供商
16 | is_origin_reasoning: 是否为原始推理模型
17 | """
18 | super().__init__(api_key, api_url)
19 | self.provider = provider
20 | self.is_origin_reasoning = is_origin_reasoning
21 | self.reasoning_content = []
22 | logger.debug(f"ClaudeClient url: {self.api_url}")
23 |
24 | async def stream_chat(
25 | self,
26 | messages: list,
27 | model_arg: tuple[float, float, float, float] = (0.7, 0.7, 0, 0),
28 | model: str = "claude-3-5-sonnet-20240620",
29 | stream: bool = True,
30 | tools: Optional[List[Dict]] = None,
31 | tool_choice: Optional[Dict] = None,
32 | enable_thinking: bool = False,
33 | thinking_budget_tokens: int = 16000,
34 | **kwargs
35 | ) -> AsyncGenerator[tuple[str, str], None]:
36 | """流式或非流式对话
37 |
38 | Args:
39 | messages: 消息列表
40 | model_arg: 模型参数元组[temperature, top_p, presence_penalty, frequency_penalty]
41 | model: 模型名称
42 | stream: 是否使用流式输出
43 | tools: 工具配置列表
44 | tool_choice: 工具选择配置
45 | enable_thinking: 是否启用扩展思考
46 | thinking_budget_tokens: 思考token预算
47 | """
48 | temperature, top_p, presence_penalty, frequency_penalty = model_arg
49 |
50 | # 准备基础请求数据
51 | data = self._prepare_request_data(
52 | messages=messages,
53 | model=model,
54 | stream=stream,
55 | temperature=temperature,
56 | top_p=top_p,
57 | presence_penalty=presence_penalty,
58 | frequency_penalty=frequency_penalty,
59 | **kwargs
60 | )
61 |
62 | # 添加 Claude 特定参数
63 | if tools:
64 | data["tools"] = tools
65 | if tool_choice:
66 | data["tool_choice"] = tool_choice
67 | if enable_thinking:
68 | data["thinking_budget_tokens"] = thinking_budget_tokens
69 |
70 | if self.provider == "anthropic":
71 | headers = {
72 | "Authorization": f"Bearer {self.api_key}",
73 | "Content-Type": "application/json",
74 | "Accept": "text/event-stream" if stream else "application/json",
75 | }
76 |
77 | # 用于收集推理内容
78 | self.reasoning_content = []
79 | in_thinking = False
80 |
81 | async for chunk in self._make_request(headers, data):
82 | chunk_str = chunk.decode('utf-8')
83 | if not chunk_str.strip():
84 | continue
85 |
86 | for line in chunk_str.split('\n'):
87 | if line.startswith('data: '):
88 | json_str = line[6:]
89 | if json_str.strip() == '[DONE]':
90 | return
91 |
92 | try:
93 | chunk_data = json.loads(json_str)
94 | logger.debug(f"chunk_data: {chunk_data}")
95 |
96 | # 处理新的响应格式
97 | if 'choices' in chunk_data:
98 | delta = chunk_data['choices'][0].get('delta', {})
99 | content = delta.get('content', '')
100 |
101 | if content:
102 | # 处理推理内容
103 | if '' in content:
104 | in_thinking = True
105 | content = content.replace('', '').strip()
106 |
107 | if '' in content:
108 | in_thinking = False
109 | content = content.replace('', '').strip()
110 | if content and self.is_origin_reasoning:
111 | yield "reasoning_content", content
112 | return
113 |
114 | # 如果在推理块内且是推理模型,输出推理内容
115 | if in_thinking and self.is_origin_reasoning:
116 | if content.strip():
117 | self.reasoning_content.append(content)
118 | yield "reasoning_content", content
119 | # 如果不是推理模型,直接输出内容
120 | elif not self.is_origin_reasoning and content.strip():
121 | yield "answer", content
122 |
123 | # 处理旧的响应格式
124 | elif chunk_data.get('type') == 'content_block_delta':
125 | delta = chunk_data.get('delta', {})
126 |
127 | if delta.get('type') == 'thinking_delta':
128 | thinking = delta.get('thinking', '')
129 | if thinking:
130 | yield "thinking", thinking
131 |
132 | elif delta.get('type') == 'tool_use':
133 | tool_content = json.dumps(delta.get('input', {}))
134 | if tool_content:
135 | yield "tool_use", tool_content
136 |
137 | elif delta.get('type') == 'text_delta':
138 | content = delta.get('text', '')
139 | if content:
140 | # 处理推理内容
141 | if '' in content:
142 | in_thinking = True
143 | content = content.replace('', '').strip()
144 |
145 | if '' in content:
146 | in_thinking = False
147 | content = content.replace('', '').strip()
148 | if content and self.is_origin_reasoning:
149 | yield "reasoning_content", content
150 | return
151 |
152 | # 如果在推理块内且是推理模型,输出推理内容
153 | if in_thinking and self.is_origin_reasoning:
154 | if content.strip():
155 | self.reasoning_content.append(content)
156 | yield "reasoning_content", content
157 | # 如果不是推理模型,直接输出内容
158 | elif not self.is_origin_reasoning and content.strip():
159 | yield "answer", content
160 |
161 | except json.JSONDecodeError:
162 | continue
163 | else:
164 | raise ValueError(f"不支持的Claude Provider: {self.provider}")
165 |
--------------------------------------------------------------------------------
/app/clients/gemini_client.py:
--------------------------------------------------------------------------------
1 | """Gemini API 客户端"""
2 | import json
3 | from typing import AsyncGenerator
4 | import re
5 | from urllib.parse import urlparse, parse_qs
6 | from app.utils.logger import logger
7 | from .base_client import BaseClient
8 |
9 | class GeminiClient(BaseClient):
10 | def __init__(self, api_key: str, api_url: str = "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent"):
11 | """初始化 Gemini 客户端
12 |
13 | Args:
14 | api_key: Gemini API密钥
15 | api_url: Gemini API地址
16 | """
17 | super().__init__(api_key, api_url)
18 | self.provider = "google"
19 |
20 | async def stream_chat(
21 | self,
22 | messages: list,
23 | model: str = "gemini-pro",
24 | stream: bool = True,
25 | **kwargs
26 | ) -> AsyncGenerator[tuple[str, str], None]:
27 | """流式或非流式对话
28 |
29 | Args:
30 | messages: 消息列表
31 | model: 模型名称
32 | stream: 是否使用流式输出
33 | **kwargs: 其他参数,包括自定义参数
34 |
35 | Yields:
36 | tuple[str, str]: (内容类型, 内容)
37 | """
38 | headers = {
39 | "Content-Type": "application/json"
40 | }
41 |
42 | # 准备基础请求数据
43 | data = self._prepare_request_data(messages, model, stream=stream, **kwargs)
44 |
45 | # 转换为 Gemini 格式
46 | gemini_data = {
47 | "contents": [
48 | {
49 | "role": "user" if msg["role"] == "user" else "model",
50 | "parts": [{"text": msg["content"]}]
51 | }
52 | for msg in messages
53 | ],
54 | "generationConfig": {
55 | "temperature": data.get("temperature", 0.7),
56 | "topP": data.get("top_p", 1.0),
57 | "maxOutputTokens": data.get("max_tokens", 2000)
58 | }
59 | }
60 |
61 | # 添加自定义参数
62 | custom_parameters = kwargs.get("custom_parameters", {})
63 | if custom_parameters:
64 | if "generationConfig" in custom_parameters:
65 | gemini_data["generationConfig"].update(custom_parameters["generationConfig"])
66 | if "safetySettings" in custom_parameters:
67 | gemini_data["safetySettings"] = custom_parameters["safetySettings"]
68 |
69 | # logger.debug(f"Gemini 请求数据: {gemini_data}")
70 |
71 | # 构建正确的URL
72 | base_url = self.api_url
73 |
74 | # 如果基础URL仅仅是域名,构建完整URL
75 | if base_url == "https://generativelanguage.googleapis.com" or not "/models/" in base_url:
76 | # 构建完整的API路径
77 | api_version = "v1beta"
78 | operation = "streamGenerateContent" if stream else "generateContent"
79 |
80 | # 移除尾部斜杠(如果有)
81 | base_url = base_url.rstrip('/')
82 |
83 | # 构建完整URL
84 | final_url = f"{base_url}/{api_version}/models/{model}:{operation}?key={self.api_key}"
85 |
86 | # 添加SSE参数(如果是流式请求)
87 | if stream:
88 | final_url += "&alt=sse"
89 | else:
90 | # 处理已经包含模型和操作的复杂URL
91 | # 从URL中提取模型名称,如果有的话替换为当前使用的模型
92 | model_in_url = re.search(r'models/([^/:]+)', base_url)
93 | if model_in_url:
94 | base_url = base_url.replace(model_in_url.group(1), model)
95 |
96 | # 确保使用正确的端点
97 | if stream:
98 | if ':generateContent' in base_url:
99 | base_url = base_url.replace(':generateContent', ':streamGenerateContent')
100 | elif ':streamGenerateContent' not in base_url:
101 | # 如果URL没有指定操作,添加streamGenerateContent
102 | if base_url.endswith('/'):
103 | base_url += 'streamGenerateContent'
104 | else:
105 | base_url += ':streamGenerateContent'
106 |
107 | # 添加查询参数
108 | parsed_url = urlparse(base_url)
109 | query_params = parse_qs(parsed_url.query)
110 |
111 | # 构建最终URL
112 | final_url = base_url
113 | if '?' not in final_url:
114 | final_url += '?'
115 | elif not final_url.endswith('?'):
116 | final_url += '&'
117 |
118 | # 添加必要的参数
119 | if stream and 'alt' not in query_params:
120 | final_url += 'alt=sse&'
121 |
122 | if 'key' not in query_params:
123 | final_url += f'key={self.api_key}'
124 |
125 | logger.debug(f"Gemini 最终请求URL: {final_url}")
126 |
127 | if stream:
128 | async for chunk in self._make_request(headers, gemini_data, final_url):
129 | try:
130 | chunk_str = chunk.decode('utf-8')
131 | if not chunk_str.strip():
132 | continue
133 |
134 | # 处理当前chunk中的每一行
135 | for line in chunk_str.split('\n'):
136 | line = line.strip()
137 | if not line:
138 | continue
139 |
140 | if line.startswith('data: '):
141 | json_str = line[6:]
142 | if json_str.strip() == '[DONE]':
143 | logger.debug("收到流式传输结束标记 [DONE]")
144 | return
145 |
146 | try:
147 | data = json.loads(json_str)
148 | if data.get("candidates"):
149 | # 获取文本内容
150 | candidate = data["candidates"][0]
151 | content = candidate.get("content", {})
152 | parts = content.get("parts", [])
153 |
154 | for part in parts:
155 | text = part.get("text", "")
156 | if text:
157 | logger.debug(f"流式响应片段: {text[:30]}...")
158 | yield "answer", text
159 | except json.JSONDecodeError as je:
160 | logger.warning(f"JSON解析错误: {je}, 原始数据: {json_str[:100]}")
161 | except Exception as e:
162 | logger.error(f"处理SSE数据时出错: {e}")
163 | except Exception as e:
164 | logger.error(f"处理 Gemini 流式响应时发生错误: {str(e)}")
165 | continue
166 | else:
167 | # 非流式请求处理
168 | full_response = ""
169 | async for chunk in self._make_request(headers, gemini_data, final_url):
170 | try:
171 | chunk_str = chunk.decode('utf-8')
172 | full_response += chunk_str
173 | except Exception as e:
174 | logger.error(f"处理 Gemini 非流式响应时发生错误: {e}")
175 |
176 | try:
177 | # 尝试解析完整响应
178 | response = json.loads(full_response)
179 | if response.get("candidates"):
180 | content = response["candidates"][0].get("content", {})
181 | parts = content.get("parts", [])
182 |
183 | result_text = ""
184 | for part in parts:
185 | text = part.get("text", "")
186 | result_text += text
187 |
188 | if result_text:
189 | yield "answer", result_text
190 | except json.JSONDecodeError:
191 | logger.error(f"非流式响应JSON解析失败: {full_response[:200]}")
192 | except Exception as e:
193 | logger.error(f"处理非流式响应时发生错误: {e}")
--------------------------------------------------------------------------------
/app/static/login.js:
--------------------------------------------------------------------------------
1 | async function handleLogin(event) {
2 | event.preventDefault();
3 |
4 | const form = document.getElementById('loginForm');
5 | const alert = document.getElementById('loginAlert');
6 | const alertMessage = document.getElementById('alertMessage');
7 | const loginButton = document.getElementById('loginButton');
8 |
9 | // 隐藏之前的错误信息
10 | alert.style.display = 'none';
11 |
12 | const formData = new FormData(form);
13 | const data = Object.fromEntries(formData.entries());
14 |
15 | // 验证表单
16 | const username = data.username.trim();
17 | const password = data.password;
18 |
19 | if (username === '' || password === '') {
20 | showAlert('Please enter both username and password');
21 | return;
22 | }
23 |
24 | // 禁用按钮并显示加载动画
25 | loginButton.disabled = true;
26 | loginButton.innerHTML = ' Signing in...';
27 |
28 | try {
29 | const response = await fetch('/v1/login', {
30 | method: 'POST',
31 | headers: {
32 | 'Content-Type': 'application/json'
33 | },
34 | body: JSON.stringify(data)
35 | });
36 |
37 | if (!response.ok) {
38 | throw new Error('Invalid credentials');
39 | }
40 |
41 | const result = await response.json();
42 | localStorage.setItem('access_token', result.access_token);
43 |
44 | // 添加过渡动画
45 | document.body.style.opacity = '0';
46 | setTimeout(() => {
47 | window.location.href = '/static/index.html';
48 | }, 300);
49 | } catch (error) {
50 | // 恢复按钮状态
51 | loginButton.disabled = false;
52 | loginButton.innerHTML = ' Sign In';
53 |
54 | // 显示错误信息
55 | showAlert(error.message);
56 |
57 | // 添加抖动动画
58 | form.classList.add('shake');
59 | setTimeout(() => {
60 | form.classList.remove('shake');
61 | }, 500);
62 | }
63 | }
64 |
65 | // 显示错误提示
66 | function showAlert(message) {
67 | const alertMessage = document.getElementById('alertMessage');
68 | const alertBox = document.getElementById('loginAlert');
69 |
70 | alertMessage.textContent = message;
71 | alertBox.style.display = 'block';
72 |
73 | setTimeout(function() {
74 | alertBox.style.display = 'none';
75 | }, 5000);
76 | }
77 |
78 | // 检查是否已登录
79 | document.addEventListener('DOMContentLoaded', function() {
80 | const token = localStorage.getItem('access_token');
81 | if (token) {
82 | window.location.href = '/static/index.html';
83 | }
84 |
85 | // 添加页面淡入效果
86 | document.body.style.opacity = '0';
87 | setTimeout(() => {
88 | document.body.style.opacity = '1';
89 | }, 100);
90 |
91 | // 获取DOM元素
92 | const loginForm = document.getElementById('loginForm');
93 | const togglePassword = document.getElementById('togglePassword');
94 | const passwordInput = document.getElementById('password');
95 | const loginButton = document.getElementById('loginButton');
96 | const passwordStrength = document.getElementById('passwordStrength');
97 | const passwordStrengthMeter = document.getElementById('passwordStrengthMeter');
98 |
99 | // 密码可见性切换
100 | if (togglePassword) {
101 | togglePassword.addEventListener('click', function() {
102 | const type = passwordInput.getAttribute('type') === 'password' ? 'text' : 'password';
103 | passwordInput.setAttribute('type', type);
104 |
105 | // 切换图标
106 | this.querySelector('i').classList.toggle('fa-eye');
107 | this.querySelector('i').classList.toggle('fa-eye-slash');
108 | });
109 | }
110 |
111 | // 按钮点击波纹效果
112 | if (loginButton) {
113 | loginButton.addEventListener('click', function(e) {
114 | // 确保点击事件发生在按钮上而不是其子元素上
115 | if (e.target !== this) return;
116 |
117 | const x = e.clientX - e.target.getBoundingClientRect().left;
118 | const y = e.clientY - e.target.getBoundingClientRect().top;
119 |
120 | const ripples = document.createElement('span');
121 | ripples.classList.add('btn-ripple');
122 | ripples.style.left = x + 'px';
123 | ripples.style.top = y + 'px';
124 |
125 | this.appendChild(ripples);
126 |
127 | setTimeout(() => {
128 | ripples.remove();
129 | }, 600);
130 | });
131 | }
132 |
133 | // 密码强度检测
134 | if (passwordInput && passwordStrength && passwordStrengthMeter) {
135 | passwordInput.addEventListener('input', function() {
136 | const password = this.value;
137 |
138 | if (password.length > 0) {
139 | passwordStrength.style.display = 'block';
140 |
141 | // 简单的密码强度检查
142 | let strength = 0;
143 |
144 | // 长度检查
145 | if (password.length >= 8) strength += 1;
146 |
147 | // 包含数字
148 | if (/\d/.test(password)) strength += 1;
149 |
150 | // 包含特殊字符
151 | if (/[!@#$%^&*]/.test(password)) strength += 1;
152 |
153 | // 包含大小写字母
154 | if (/[A-Z]/.test(password) && /[a-z]/.test(password)) strength += 1;
155 |
156 | // 更新强度指示器
157 | passwordStrengthMeter.className = '';
158 |
159 | if (strength === 0) {
160 | passwordStrengthMeter.classList.add('strength-weak');
161 | } else if (strength === 1 || strength === 2) {
162 | passwordStrengthMeter.classList.add('strength-medium');
163 | } else if (strength === 3) {
164 | passwordStrengthMeter.classList.add('strength-good');
165 | } else {
166 | passwordStrengthMeter.classList.add('strength-strong');
167 | }
168 | } else {
169 | passwordStrength.style.display = 'none';
170 | }
171 | });
172 | }
173 |
174 | // 表单提交处理
175 | if (loginForm) {
176 | loginForm.addEventListener('submit', handleLogin);
177 | }
178 |
179 | // 自动聚焦用户名字段
180 | const usernameInput = document.getElementById('username');
181 | if (usernameInput && usernameInput.value.trim() === '') {
182 | usernameInput.focus();
183 | }
184 | });
185 |
186 | // 添加动画样式
187 | const style = document.createElement('style');
188 | style.textContent = `
189 | body {
190 | transition: opacity 0.3s ease;
191 | }
192 |
193 | @keyframes shake {
194 | 0%, 100% { transform: translateX(0); }
195 | 25% { transform: translateX(-10px); }
196 | 75% { transform: translateX(10px); }
197 | }
198 |
199 | .shake {
200 | animation: shake 0.5s ease-in-out;
201 | }
202 |
203 | .btn-ripple {
204 | position: absolute;
205 | border-radius: 50%;
206 | background: rgba(255, 255, 255, 0.3);
207 | transform: scale(0);
208 | animation: ripple 0.6s linear;
209 | }
210 |
211 | @keyframes ripple {
212 | to {
213 | transform: scale(2.5);
214 | opacity: 0;
215 | }
216 | }
217 |
218 | /* 密码强度指示器样式 */
219 | .password-strength {
220 | height: 4px;
221 | border-radius: 2px;
222 | margin-top: 0.5rem;
223 | display: none;
224 | overflow: hidden;
225 | }
226 |
227 | .password-strength-meter {
228 | height: 100%;
229 | width: 0%;
230 | transition: width 0.3s ease, background-color 0.3s ease;
231 | }
232 |
233 | .strength-weak { background-color: #e74c3c; width: 25%; }
234 | .strength-medium { background-color: #f39c12; width: 50%; }
235 | .strength-good { background-color: #3498db; width: 75%; }
236 | .strength-strong { background-color: #2ecc71; width: 100%; }
237 | `;
238 |
239 | document.head.appendChild(style);
--------------------------------------------------------------------------------
/app/models/multi_step_collaboration.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, AsyncGenerator
2 | import asyncio
3 | import json
4 | import time
5 | from app.utils.logger import logger
6 | from app.clients import DeepSeekClient, ClaudeClient, GeminiClient
7 | from app.clients.uni_client import UniClient
8 | from app.clients.openai_client import OpenAIClient
9 |
10 | class MultiStepModelCollaboration:
11 | """处理多步骤模型协作的类"""
12 |
13 | def __init__(self, steps: List[Dict]):
14 | """初始化多步骤协作处理器
15 |
16 | Args:
17 | steps: 步骤列表,每个步骤包含:
18 | - model: 数据库模型对象
19 | - step_type: 步骤类型 (reasoning/execution)
20 | - system_prompt: 系统提示词
21 | """
22 | self.steps = steps
23 | self.clients = []
24 |
25 | # 检查是否为单模型情况
26 | self.is_single_model = len(steps) == 1
27 | if self.is_single_model:
28 | self.uni_client = UniClient.create_client(steps[0]['model'])
29 |
30 | # 初始化每个步骤的客户端
31 | for step in steps:
32 | model = step['model']
33 | client = self._init_client(
34 | model.provider,
35 | model.api_key,
36 | model.api_url,
37 | step['step_type'] == 'reasoning'
38 | )
39 | self.clients.append({
40 | 'client': client,
41 | 'model_name': model.model_name,
42 | 'temperature': model.temperature,
43 | 'max_tokens': model.max_tokens,
44 | 'top_p': model.top_p,
45 | 'frequency_penalty': model.frequency_penalty,
46 | 'presence_penalty': model.presence_penalty,
47 | 'step_type': step['step_type'],
48 | 'system_prompt': step['system_prompt'],
49 | 'tools': model.tools,
50 | 'tool_choice': model.tool_choice,
51 | 'enable_thinking': model.enable_thinking,
52 | 'thinking_budget_tokens': model.thinking_budget_tokens
53 | })
54 |
55 | def _init_client(self, provider: str, api_key: str, api_url: str, is_reasoning: bool):
56 | """初始化对应的客户端"""
57 | try:
58 | # 根据提供商类型初始化对应的客户端
59 | if provider == "deepseek":
60 | return DeepSeekClient(api_key, api_url, is_origin_reasoning=is_reasoning)
61 | elif provider == "google":
62 | # 对于多步骤处理使用专用客户端
63 | if not self.is_single_model:
64 | return GeminiClient(api_key, api_url)
65 | # 对于单模型处理,UniClient会处理
66 | return None
67 | elif provider == "anthropic":
68 | return ClaudeClient(api_key, api_url, is_origin_reasoning=is_reasoning)
69 | elif provider == "grok3":
70 | from app.clients import Grok3Client
71 | return Grok3Client(api_key, api_url, is_origin_reasoning=is_reasoning)
72 | elif provider in ["oneapi", "openrouter", "openai-completion"]:
73 | from app.clients import OpenAIClient
74 | return OpenAIClient(api_key, api_url)
75 | elif provider == "腾讯云":
76 | # 腾讯云使用与 DeepSeek 相同的客户端
77 | return DeepSeekClient(api_key, api_url, provider="腾讯云", is_origin_reasoning=is_reasoning)
78 | else:
79 | raise ValueError(f"Unsupported provider: {provider}")
80 | except Exception as e:
81 | logger.error(f"初始化客户端时发生错误: {e}")
82 | raise
83 |
84 | async def process_with_stream(
85 | self,
86 | messages: list
87 | ) -> AsyncGenerator[bytes, None]:
88 | """处理多步骤流式输出
89 |
90 | Args:
91 | messages: 初始消息列表
92 |
93 | Yields:
94 | bytes: 流式响应数据
95 | """
96 | chat_id = f"chatcmpl-{hex(int(time.time() * 1000))[2:]}"
97 | created_time = int(time.time())
98 |
99 | current_messages = messages.copy()
100 | previous_result = ""
101 |
102 | # 如果是单模型,直接使用通用客户端
103 | if self.is_single_model:
104 | step = self.steps[0]
105 | async for chunk in self.uni_client.generate_stream(
106 | messages=messages,
107 | system_prompt=step.get('system_prompt')
108 | ):
109 | yield chunk
110 | return
111 |
112 | for idx, client_info in enumerate(self.clients):
113 | client = client_info['client']
114 | step_type = client_info['step_type']
115 | system_prompt = client_info['system_prompt']
116 | is_last_step = idx == len(self.clients) - 1
117 | is_first_step = idx == 0
118 |
119 | # 添加系统提示词
120 | if system_prompt:
121 | current_messages = self._add_system_prompt(current_messages, system_prompt)
122 | # logger.debug(f"current_messages: {current_messages}")
123 | # 如果不是第一步,添加前一步的结果到提示中
124 | if idx > 0:
125 | current_messages = self._add_previous_step_result(
126 | current_messages,
127 | previous_result,
128 | step_type
129 | )
130 |
131 | # 收集当前步骤的输出
132 | current_output = []
133 | async for content_type, content in client.stream_chat(
134 | messages=current_messages,
135 | model=client_info['model_name'],
136 | temperature=client_info['temperature'],
137 | max_tokens=client_info['max_tokens'],
138 | top_p=client_info['top_p'],
139 | frequency_penalty=client_info['frequency_penalty'],
140 | presence_penalty=client_info['presence_penalty'],
141 | is_last_step=is_last_step,
142 | is_first_step=is_first_step,
143 | tools=client_info.get('tools'),
144 | tool_choice=client_info.get('tool_choice'),
145 | enable_thinking=client_info.get('enable_thinking', False),
146 | thinking_budget_tokens=client_info.get('thinking_budget_tokens', 16000)
147 | ):
148 | current_output.append(content)
149 |
150 | # 构建响应
151 | delta = {
152 | "role": "assistant",
153 | "thinking_content": content if content_type == "thinking" else "",
154 | "tool_use_content": content if content_type == "tool_use" else "",
155 | f"{step_type}_content": content if step_type == "reasoning" else ""
156 | }
157 |
158 | # 只有执行模型或最后一步的推理模型才输出 content
159 | if step_type == "execution" or is_last_step:
160 | delta["content"] = content
161 | # logger.debug(f"delta: {delta}")
162 | # 生成流式响应
163 | response = {
164 | "id": chat_id,
165 | "object": "chat.completion.chunk",
166 | "created": created_time,
167 | "model": client_info['model_name'],
168 | "choices": [{
169 | "index": 0,
170 | "delta": delta
171 | }]
172 | }
173 |
174 | yield f"data: {json.dumps(response)}\n\n".encode('utf-8')
175 |
176 | # 保存当前步骤的完整输出,用于下一步
177 | previous_result = "".join(current_output)
178 |
179 | async def process_without_stream(self, messages: list) -> dict:
180 | """处理非流式输出
181 |
182 | Args:
183 | messages: 初始消息列表
184 |
185 | Returns:
186 | dict: 完整的响应数据
187 | """
188 | chat_id = f"chatcmpl-{hex(int(time.time() * 1000))[2:]}"
189 | created_time = int(time.time())
190 |
191 | current_messages = messages.copy()
192 | previous_result = ""
193 | final_response = {
194 | "role": "assistant",
195 | "reasoning_content": "",
196 | "execution_content": "",
197 | "content": ""
198 | }
199 |
200 | # 如果是单模型,直接使用通用客户端
201 | if self.is_single_model:
202 | step = self.steps[0]
203 | return await self.uni_client.generate(
204 | messages=messages,
205 | system_prompt=step.get('system_prompt')
206 | )
207 |
208 | for idx, client_info in enumerate(self.clients):
209 | client = client_info['client']
210 | step_type = client_info['step_type']
211 | system_prompt = client_info['system_prompt']
212 | is_last_step = idx == len(self.clients) - 1
213 | is_first_step = idx == 0
214 | if system_prompt:
215 | current_messages = self._add_system_prompt(current_messages, system_prompt)
216 |
217 | if idx > 0:
218 | current_messages = self._add_previous_step_result(
219 | current_messages,
220 | previous_result,
221 | step_type
222 | )
223 |
224 | current_output = []
225 | async for content_type, content in client.stream_chat(
226 | messages=current_messages,
227 | model=client_info['model_name'],
228 | is_last_step=is_last_step,
229 | is_first_step=is_first_step
230 | ):
231 | current_output.append(content)
232 |
233 | output_text = "".join(current_output)
234 | previous_result = output_text
235 |
236 | # 更新响应内容
237 | final_response[f"{step_type}_content"] = output_text
238 | if step_type == "execution" or is_last_step:
239 | final_response["content"] = output_text
240 |
241 | return {
242 | "id": chat_id,
243 | "object": "chat.completion",
244 | "created": created_time,
245 | "model": self.clients[-1]['model_name'],
246 | "choices": [{
247 | "index": 0,
248 | "message": final_response
249 | }]
250 | }
251 |
252 | def _add_system_prompt(self, messages: list, system_prompt: str) -> list:
253 | """添加系统提示词到消息列表"""
254 | new_messages = messages.copy()
255 | if new_messages and new_messages[0].get("role") == "system":
256 | new_messages[0]["content"] = f"{system_prompt}"
257 | else:
258 | new_messages.insert(0, {
259 | "role": "system",
260 | "content": system_prompt
261 | })
262 | return new_messages
263 |
264 | def _add_previous_step_result(self, messages: list, previous_result: str, step_type: str) -> list:
265 | """添加前一步结果到消息中"""
266 | new_messages = messages.copy()
267 | last_message = new_messages[-1]
268 |
269 | prefix = "reasoning" if step_type == "execution" else "previous step"
270 | # 模型接力提示词
271 | prompt = f"""
272 | Here's the {prefix} result:\n{previous_result}\n\n
273 | Based on this, please provide your response in a new line:
274 | """
275 |
276 | if last_message.get("role") == "user":
277 | last_message["content"] = f"{last_message['content']}\n\n{prompt}"
278 |
279 | return new_messages
--------------------------------------------------------------------------------
/app/static/js/custom-speaking-order.js:
--------------------------------------------------------------------------------
1 | /**
2 | * 自定义发言顺序相关功能
3 | */
4 |
5 | // 获取自定义发言顺序
6 | function getCustomSpeakingOrder() {
7 | const orderList = document.getElementById('customSpeakingOrderList');
8 | if (!orderList) return null;
9 |
10 | const roleItems = orderList.querySelectorAll('.role-item');
11 | if (roleItems.length === 0) return null;
12 |
13 | const order = [];
14 | roleItems.forEach(item => {
15 | order.push(item.getAttribute('data-role-name'));
16 | });
17 |
18 | return order;
19 | }
20 |
21 | // 更新自定义发言顺序UI
22 | function updateCustomSpeakingOrderUI() {
23 | const orderList = document.getElementById('customSpeakingOrderList');
24 | if (!orderList) return;
25 |
26 | // 清空当前列表
27 | orderList.innerHTML = '';
28 |
29 | // 获取选中的角色
30 | const checkboxes = document.querySelectorAll('#roleCheckboxes input[type="checkbox"]:checked');
31 | const selectedRoles = [];
32 |
33 | checkboxes.forEach(checkbox => {
34 | const roleId = checkbox.value;
35 | const roleName = checkbox.getAttribute('data-role-name');
36 | selectedRoles.push({ id: roleId, name: roleName });
37 | });
38 |
39 | // 添加角色到发言顺序列表
40 | selectedRoles.forEach(role => {
41 | const roleItem = document.createElement('div');
42 | roleItem.className = 'role-item';
43 | roleItem.setAttribute('data-role-id', role.id);
44 | roleItem.setAttribute('data-role-name', role.name);
45 | roleItem.innerHTML = `
46 |
47 | ${role.name}
48 | `;
49 | orderList.appendChild(roleItem);
50 | });
51 |
52 | // 初始化拖拽排序
53 | initSortable();
54 | }
55 |
56 | // 初始化可拖拽排序功能
57 | function initSortable() {
58 | const orderList = document.getElementById('customSpeakingOrderList');
59 | if (!orderList) return;
60 |
61 | // 使用HTML5拖拽 API实现直接交换位置
62 | const items = orderList.querySelectorAll('.role-item');
63 | let draggedItem = null;
64 | let targetItem = null;
65 |
66 | items.forEach(item => {
67 | item.setAttribute('draggable', 'true');
68 |
69 | // 开始拖拽
70 | item.addEventListener('dragstart', function(e) {
71 | draggedItem = this;
72 | setTimeout(() => this.classList.add('dragging'), 0);
73 | e.dataTransfer.setData('text/plain', item.getAttribute('data-role-id'));
74 | });
75 |
76 | // 拖拽结束
77 | item.addEventListener('dragend', function() {
78 | this.classList.remove('dragging');
79 | draggedItem = null;
80 | targetItem = null;
81 |
82 | // 清除所有项的拖拽目标样式
83 | items.forEach(item => {
84 | item.classList.remove('drag-over');
85 | });
86 | });
87 |
88 | // 拖拽进入目标区域
89 | item.addEventListener('dragenter', function(e) {
90 | e.preventDefault();
91 | if (this !== draggedItem) {
92 | this.classList.add('drag-over');
93 | targetItem = this;
94 | }
95 | });
96 |
97 | // 拖拽移出目标区域
98 | item.addEventListener('dragleave', function() {
99 | this.classList.remove('drag-over');
100 | if (this === targetItem) {
101 | targetItem = null;
102 | }
103 | });
104 |
105 | // 拖拽经过目标区域
106 | item.addEventListener('dragover', function(e) {
107 | e.preventDefault();
108 | });
109 |
110 | // 放置拖拽项
111 | item.addEventListener('drop', function(e) {
112 | e.preventDefault();
113 | if (this !== draggedItem) {
114 | // 直接交换位置
115 | swapElements(draggedItem, this);
116 | this.classList.remove('drag-over');
117 | }
118 | });
119 | });
120 | }
121 |
122 | // 辅助函数,用于直接交换两个元素的位置
123 | function swapElements(el1, el2) {
124 | if (!el1 || !el2) return;
125 |
126 | // 获取两个元素的父元素
127 | const parent = el1.parentNode;
128 | if (!parent) return;
129 |
130 | // 获取两个元素的位置
131 | const el1Next = el1.nextElementSibling;
132 | const el2Next = el2.nextElementSibling;
133 |
134 | // 如果第一个元素是第二个元素的下一个元素
135 | if (el1Next === el2) {
136 | parent.insertBefore(el2, el1);
137 | }
138 | // 如果第二个元素是第一个元素的下一个元素
139 | else if (el2Next === el1) {
140 | parent.insertBefore(el1, el2);
141 | }
142 | // 其他情况,直接交换位置
143 | else {
144 | // 先将第一个元素移到第二个元素的位置
145 | if (el2Next) {
146 | parent.insertBefore(el1, el2Next);
147 | } else {
148 | parent.appendChild(el1);
149 | }
150 |
151 | // 然后将第二个元素移到第一个元素的原始位置
152 | if (el1Next) {
153 | parent.insertBefore(el2, el1Next);
154 | } else {
155 | parent.appendChild(el2);
156 | }
157 | }
158 |
159 | // 添加交换动画效果
160 | el1.classList.add('swapped');
161 | el2.classList.add('swapped');
162 |
163 | // 移除动画类
164 | setTimeout(() => {
165 | el1.classList.remove('swapped');
166 | el2.classList.remove('swapped');
167 | }, 300);
168 | }
169 |
170 | // 监听角色选择变化,更新发言顺序列表
171 | function setupRoleCheckboxListeners() {
172 | const checkboxes = document.querySelectorAll('#roleCheckboxes input[type="checkbox"]');
173 | checkboxes.forEach(checkbox => {
174 | checkbox.addEventListener('change', updateCustomSpeakingOrderUI);
175 | });
176 | }
177 |
178 | // 当模式选择改变时更新自定义发言顺序显示
179 | function updateCustomSpeakingOrderVisibility() {
180 | const modeSelect = document.querySelector('select[name="mode"]');
181 | const customSpeakingOrderSection = document.getElementById('customSpeakingOrderSection');
182 |
183 | if (modeSelect && customSpeakingOrderSection) {
184 | const mode = modeSelect.value;
185 | if (mode === 'debate' || mode === 'six_thinking_hats') {
186 | customSpeakingOrderSection.style.display = 'none';
187 | } else {
188 | customSpeakingOrderSection.style.display = 'block';
189 | updateCustomSpeakingOrderUI();
190 | }
191 | }
192 | }
193 |
194 | // 初始化自定义发言顺序功能
195 | function initCustomSpeakingOrder() {
196 | // 监听模式选择变化
197 | const modeSelect = document.querySelector('select[name="mode"]');
198 | if (modeSelect) {
199 | modeSelect.addEventListener('change', updateCustomSpeakingOrderVisibility);
200 | }
201 |
202 | // 初始化时设置可见性
203 | updateCustomSpeakingOrderVisibility();
204 |
205 | // 监听角色选择变化
206 | setupRoleCheckboxListeners();
207 | }
208 |
209 | // 在讨论组模态框显示时初始化
210 | document.addEventListener('DOMContentLoaded', function() {
211 | const addGroupModal = document.getElementById('addGroupModal');
212 | if (addGroupModal) {
213 | addGroupModal.addEventListener('shown.bs.modal', function() {
214 | initCustomSpeakingOrder();
215 | });
216 | }
217 | });
218 |
219 | // 设置编辑模式下的自定义发言顺序
220 | function setCustomSpeakingOrderForEdit(groupData) {
221 | if (!groupData) return;
222 |
223 | // console.log('设置自定义发言顺序,组数据:', groupData);
224 |
225 | // 使用更长的延迟确保DOM已完全加载
226 | setTimeout(() => {
227 | // 先确保所有角色复选框已正确选中
228 | if (groupData.role_ids && Array.isArray(groupData.role_ids)) {
229 | groupData.role_ids.forEach(roleId => {
230 | const checkbox = document.querySelector(`#roleCheckboxes input[value="${roleId}"]`);
231 | if (checkbox) {
232 | checkbox.checked = true;
233 | }
234 | });
235 | }
236 |
237 | // 等待复选框状态更新后再处理自定义发言顺序
238 | setTimeout(() => {
239 | const orderList = document.getElementById('customSpeakingOrderList');
240 | if (!orderList) {
241 | console.error('找不到自定义发言顺序列表元素');
242 | return;
243 | }
244 |
245 | // 强制清空当前列表
246 | orderList.innerHTML = '';
247 |
248 | // 如果有自定义发言顺序数据,使用它
249 | if (groupData.custom_speaking_order && Array.isArray(groupData.custom_speaking_order) && groupData.custom_speaking_order.length > 0) {
250 | // console.log('使用自定义发言顺序:', groupData.custom_speaking_order);
251 |
252 | // 创建角色映射对象
253 | const roleMap = {};
254 |
255 | // 使用API响应中的roles数据创建映射
256 | if (groupData.roles && Array.isArray(groupData.roles)) {
257 | groupData.roles.forEach(role => {
258 | roleMap[role.name] = role;
259 | });
260 | }
261 |
262 | // 按照自定义发言顺序添加角色
263 | for (let i = 0; i < groupData.custom_speaking_order.length; i++) {
264 | const roleName = groupData.custom_speaking_order[i];
265 | if (roleMap[roleName]) {
266 | const role = roleMap[roleName];
267 | // console.log(`按顺序添加角色 ${i+1}: ${roleName} (ID: ${role.id})`);
268 |
269 | // 直接创建元素并添加到列表中
270 | const roleItem = document.createElement('div');
271 | roleItem.className = 'role-item';
272 | roleItem.setAttribute('data-role-id', role.id);
273 | roleItem.setAttribute('data-role-name', roleName);
274 | roleItem.innerHTML = `
275 |
276 | ${roleName}
277 | `;
278 | orderList.appendChild(roleItem);
279 | } else {
280 | console.warn(`找不到角色: ${roleName}`);
281 | }
282 | }
283 |
284 | // 添加任何不在自定义发言顺序中的角色
285 | if (groupData.roles) {
286 | const addedRoleNames = new Set(groupData.custom_speaking_order);
287 | groupData.roles.forEach(role => {
288 | if (!addedRoleNames.has(role.name)) {
289 | // console.log(`添加未在自定义顺序中的角色: ${role.name}`);
290 | const roleItem = document.createElement('div');
291 | roleItem.className = 'role-item';
292 | roleItem.setAttribute('data-role-id', role.id);
293 | roleItem.setAttribute('data-role-name', role.name);
294 | roleItem.innerHTML = `
295 |
296 | ${role.name}
297 | `;
298 | orderList.appendChild(roleItem);
299 | }
300 | });
301 | }
302 | } else {
303 | // console.log('没有自定义顺序数据,使用默认顺序');
304 | // 如果没有自定义顺序,使用API返回的角色顺序
305 | if (groupData.roles && Array.isArray(groupData.roles)) {
306 | groupData.roles.forEach(role => {
307 | const roleItem = document.createElement('div');
308 | roleItem.className = 'role-item';
309 | roleItem.setAttribute('data-role-id', role.id);
310 | roleItem.setAttribute('data-role-name', role.name);
311 | roleItem.innerHTML = `
312 |
313 | ${role.name}
314 | `;
315 | orderList.appendChild(roleItem);
316 | });
317 | }
318 | }
319 |
320 | // 初始化拖拽排序
321 | initSortable();
322 | }, 200); // 内部延迟确保复选框状态已更新
323 | }, 300); // 外部延迟确保DOM已完全加载
324 | }
325 |
326 | // 辅助函数:添加角色项到顺序列表
327 | function addRoleItemToOrderList(orderList, roleId, roleName) {
328 | const roleItem = document.createElement('div');
329 | roleItem.className = 'role-item';
330 | roleItem.setAttribute('data-role-id', roleId);
331 | roleItem.setAttribute('data-role-name', roleName);
332 | roleItem.innerHTML = `
333 |
334 | ${roleName}
335 | `;
336 | orderList.appendChild(roleItem);
337 | }
338 |
--------------------------------------------------------------------------------
/app/meeting/utils/summary_generator.py:
--------------------------------------------------------------------------------
1 | from langchain_openai import ChatOpenAI
2 | from langchain.schema import HumanMessage
3 | import os
4 | import traceback
5 | import logging
6 | import time
7 | import random
8 | from typing import List, Dict, Any
9 | import json
10 | import asyncio
11 |
12 | # 设置日志
13 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
14 | logger = logging.getLogger("SummaryGenerator")
15 |
16 | class SummaryGenerator:
17 | """会议总结生成器,负责生成各种会议模式的总结"""
18 |
19 | def __init__(self, model_name: str = "gpt-3.5-turbo", api_key: str = None, api_url: str = None):
20 | self.model_name = model_name
21 | self.api_key = api_key
22 | self.api_url = api_url
23 |
24 | @staticmethod
25 | def generate_summary(meeting_topic: str, meeting_history: List[Dict[str, Any]],
26 | prompt_template: str, model_name: str = None, api_key: str = None, api_base_url: str = None) -> str:
27 | """
28 | 生成会议总结
29 |
30 | Args:
31 | meeting_topic: 会议主题
32 | meeting_history: 会议历史记录
33 | prompt_template: 提示模板
34 | model_name: 模型名称,如果为None则使用默认模型
35 | api_key: API密钥
36 | api_base_url: API基础URL
37 |
38 | Returns:
39 | str: 生成的总结
40 | """
41 | try:
42 | logger.info(f"开始生成'{meeting_topic}'的会议总结: 历史消息数={len(meeting_history)}")
43 |
44 | # 构建历史文本
45 | history_text = ""
46 | for entry in meeting_history:
47 | if entry["agent"] != "system": # 排除系统消息
48 | history_text += f"[{entry['agent']}]: {entry['content']}\n\n"
49 |
50 | # 格式化提示模板
51 | summary_prompt = prompt_template.format(
52 | topic=meeting_topic,
53 | meeting_topic=meeting_topic,
54 | history=history_text,
55 | history_text=history_text
56 | )
57 |
58 | # 获取模型名称
59 | logger.info(f"使用模型生成总结: model_name={model_name or '默认模型'}")
60 |
61 | # 如果API URL或密钥为空,记录警告
62 | if not api_base_url:
63 | logger.warning("API基础URL为空,可能影响总结生成")
64 | if not api_key:
65 | logger.warning("API密钥为空,可能影响总结生成")
66 |
67 | try:
68 | # 创建模型时传入API密钥和基础URL
69 | model_kwargs = {"temperature": 0.3}
70 | if api_key:
71 | model_kwargs["api_key"] = api_key
72 | logger.info("使用提供的API密钥")
73 | if api_base_url:
74 | model_kwargs["base_url"] = api_base_url
75 | logger.info(f"使用提供的API基础URL: {api_base_url}")
76 |
77 | # 设置默认模型名称
78 | if not model_name:
79 | model_name = os.environ.get("OPENAI_MODEL_NAME", "gpt-3.5-turbo")
80 | logger.info(f"使用默认模型名称: {model_name}")
81 |
82 | logger.info(f"初始化LLM模型: {model_name}")
83 | llm = ChatOpenAI(model_name=model_name, **model_kwargs)
84 |
85 | # 智能重试机制
86 | max_retries = 3
87 | base_delay = 2
88 |
89 | for attempt in range(max_retries + 1):
90 | try:
91 | if attempt > 0:
92 | logger.info(f"总结生成尝试 {attempt}/{max_retries}...")
93 |
94 | logger.info("调用LLM生成总结")
95 | response = llm.invoke([HumanMessage(content=summary_prompt)])
96 | summary = response.content
97 |
98 | logger.info(f"成功生成总结: 长度={len(summary)}")
99 | return summary
100 |
101 | except Exception as e:
102 | logger.warning(f"总结生成错误: {str(e)}")
103 |
104 | if attempt == max_retries:
105 | raise # 最后一次尝试,重新抛出异常
106 |
107 | # 指数退避
108 | delay = base_delay * (2 ** attempt) + random.uniform(0, 1)
109 | logger.info(f"等待 {delay:.2f} 秒后重试...")
110 | time.sleep(delay)
111 |
112 | except Exception as e:
113 | logger.error(f"调用API生成总结失败: {str(e)}", exc_info=True)
114 | # 调用备用方法生成模板总结
115 | logger.info("使用模板总结作为备用")
116 | return SummaryGenerator._generate_template_summary(meeting_topic, len(meeting_history))
117 |
118 | except Exception as e:
119 | logger.error(f"生成会议总结失败: {str(e)}", exc_info=True)
120 | return f"[生成会议总结失败: {str(e)}]"
121 |
122 | @staticmethod
123 | def _generate_template_summary(meeting_topic: str, message_count: int) -> str:
124 | """生成模板总结(当API调用失败时使用)"""
125 | return f"""
126 | # 关于"{meeting_topic}"的讨论总结
127 |
128 | ## 主要主题和观点
129 | - 参与者讨论了{meeting_topic}的各个方面
130 | - 提出了多种观点和见解
131 |
132 | ## 达成的共识
133 | - 参与者在某些关键点上达成了一致
134 | - 认同了一些基本原则
135 |
136 | ## 存在的分歧
137 | - 在某些具体实施方法上存在不同意见
138 | - 对某些问题的优先级有不同看法
139 |
140 | ## 解决方案和建议
141 | - 提出了几种可能的解决方案
142 | - 建议进一步研究和讨论
143 |
144 | ## 需要进一步讨论的问题
145 | - 一些技术细节需要更深入的探讨
146 | - 某些方案的可行性需要进一步评估
147 |
148 | 这个总结是基于{message_count}条消息生成的。
149 | """
150 |
151 | @staticmethod
152 | async def generate_summary_stream(meeting_topic: str, meeting_history: List[Dict[str, Any]],
153 | prompt_template: str, model_name: str = None, api_key: str = None, api_base_url: str = None,
154 | model_params: Dict[str, Any] = None):
155 | """
156 | 流式生成会议总结,逐步返回生成的内容
157 |
158 | Args:
159 | meeting_topic: 会议主题
160 | meeting_history: 会议历史记录
161 | prompt_template: 提示模板
162 | model_name: 模型名称,如果为None则使用默认模型
163 | api_key: API密钥
164 | api_base_url: API基础URL
165 | model_params: 模型配置参数字典
166 |
167 | Yields:
168 | str: 生成的总结片段
169 | """
170 | try:
171 | logger.info(f"开始流式生成'{meeting_topic}'的会议总结: 历史消息数={len(meeting_history)}")
172 |
173 | # 构建历史文本
174 | history_text = ""
175 | for entry in meeting_history:
176 | if entry["agent"] != "system": # 排除系统消息
177 | history_text += f"[{entry['agent']}]: {entry['content']}\n\n"
178 |
179 | # 格式化提示模板
180 | summary_prompt = prompt_template.format(
181 | topic=meeting_topic,
182 | meeting_topic=meeting_topic,
183 | history=history_text,
184 | history_text=history_text
185 | )
186 |
187 | # 获取或设置默认模型参数
188 | if not model_params:
189 | model_params = {}
190 |
191 | # 定义默认参数 - 确保总是有这些基本参数
192 | default_params = {
193 | "temperature": 0.3,
194 | "max_tokens": 8096,
195 | "top_p": 1.0,
196 | "frequency_penalty": 0.0,
197 | "presence_penalty": 0.0
198 | }
199 |
200 | # 构建请求体 - 首先使用默认参数
201 | payload = default_params.copy()
202 |
203 | # 添加必要的消息参数
204 | payload.update({
205 | "messages": [{"role": "user", "content": summary_prompt}],
206 | "stream": True, # 必须启用流式输出
207 | })
208 |
209 | # 然后用模型参数覆盖默认参数(保留特定模型的自定义设置)
210 | for param, value in model_params.items():
211 | # 跳过一些已设置的关键参数
212 | if param not in ["messages", "stream"]:
213 | payload[param] = value
214 |
215 | # 确保model使用传入的model_name参数(优先级最高)
216 | if model_name:
217 | payload["model"] = model_name
218 | elif "model" not in payload:
219 | # 如果没有指定model_name且payload中也没有model,使用默认模型
220 | payload["model"] = os.environ.get("OPENAI_MODEL_NAME", "gpt-3.5-turbo")
221 |
222 | # logger.info(f"使用模型流式生成总结: model_name={payload.get('model', '默认模型')}, 参数={payload}")
223 |
224 | # 如果API URL或密钥为空,记录警告
225 | if not api_base_url:
226 | logger.warning("API基础URL为空,可能影响总结生成")
227 | if not api_key:
228 | logger.warning("API密钥为空,可能影响总结生成")
229 |
230 | try:
231 | # 准备API请求
232 | headers = {
233 | "Content-Type": "application/json"
234 | }
235 |
236 | # 添加API密钥
237 | if api_key:
238 | headers["Authorization"] = f"Bearer {api_key}"
239 | logger.info("使用提供的API密钥")
240 |
241 | # 打印完整参数配置(调试用,生产环境可注释)
242 | logger.debug(f"API请求参数: {payload}")
243 |
244 | import aiohttp
245 | async with aiohttp.ClientSession() as session:
246 | async with session.post(
247 | f"{api_base_url}/v1/chat/completions" if api_base_url else "https://api.openai.com/v1/chat/completions",
248 | headers=headers,
249 | json=payload
250 | ) as response:
251 | # 检查响应
252 | if response.status != 200:
253 | error_text = await response.text()
254 | logger.error(f"API调用失败: {response.status} - {error_text}")
255 | # 如果流式API失败,使用备用总结
256 | backup_summary = SummaryGenerator._generate_template_summary(meeting_topic, len(meeting_history))
257 | for char in backup_summary:
258 | yield char
259 | await asyncio.sleep(0.01)
260 | return
261 |
262 | # 处理流式响应
263 | logger.info("开始接收流式总结内容")
264 | accumulated_text = ""
265 | async for line in response.content:
266 | line = line.decode('utf-8').strip()
267 |
268 | # 如果是空行,跳过
269 | if not line:
270 | continue
271 |
272 | # 处理数据行
273 | if line.startswith("data: "):
274 | data = line[6:].strip()
275 |
276 | # 处理特殊的[DONE]标记
277 | if data == "[DONE]":
278 | break
279 |
280 | try:
281 | json_data = json.loads(data)
282 | choices = json_data.get("choices", [])
283 |
284 | if choices and len(choices) > 0:
285 | delta = choices[0].get("delta", {})
286 | content = delta.get("content", "")
287 |
288 | if content:
289 | # 累积文本同时返回每个增量
290 | accumulated_text += content
291 | yield content
292 | except json.JSONDecodeError:
293 | continue
294 |
295 | logger.info(f"流式总结生成完成: 总长度={len(accumulated_text)}")
296 |
297 | except Exception as e:
298 | logger.error(f"流式总结生成错误: {str(e)}", exc_info=True)
299 | # 如果出错,生成备用总结
300 | backup_summary = SummaryGenerator._generate_template_summary(meeting_topic, len(meeting_history))
301 | logger.info(f"使用备用总结: 长度={len(backup_summary)}")
302 |
303 | # 模拟流式输出备用总结
304 | for char in backup_summary:
305 | yield char
306 | await asyncio.sleep(0.01)
307 |
308 | except Exception as e:
309 | logger.error(f"流式总结生成过程中出现严重错误: {str(e)}", exc_info=True)
310 | error_msg = f"[生成会议总结失败: {str(e)}]"
311 | for char in error_msg:
312 | yield char
313 | await asyncio.sleep(0.01)
--------------------------------------------------------------------------------
/app/static/login.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | DeepGemini - Login
8 |
9 |
10 |
11 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
DeepGemini
316 |
Enter your credentials to access your account
317 |
318 |
319 |
320 |
321 | Invalid credentials
322 |
323 |
324 |
356 |
357 |
358 |
361 |
362 |
363 |
364 |
365 |
366 |
--------------------------------------------------------------------------------