├── SimpleLLMFunc ├── py.typed ├── llm_decorator │ ├── steps │ │ ├── __init__.py │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── types.py │ │ │ ├── prompt.py │ │ │ ├── log_context.py │ │ │ └── signature.py │ │ ├── chat │ │ │ ├── __init__.py │ │ │ ├── response.py │ │ │ ├── react.py │ │ │ └── message.py │ │ └── function │ │ │ ├── __init__.py │ │ │ ├── response.py │ │ │ └── react.py │ ├── utils │ │ ├── __init__.py │ │ └── tools.py │ ├── __init__.py │ └── multimodal_types.py ├── tool │ └── __init__.py ├── type │ ├── decorator.py │ ├── __init__.py │ ├── message.py │ └── multimodal.py ├── base │ ├── type_resolve │ │ ├── example.py │ │ ├── __init__.py │ │ ├── multimodal.py │ │ └── description.py │ ├── __init__.py │ ├── messages │ │ ├── __init__.py │ │ ├── extraction.py │ │ └── assistant.py │ └── tool_call │ │ ├── __init__.py │ │ └── validation.py ├── interface │ ├── __init__.py │ ├── llm_interface.py │ └── key_pool.py ├── logger │ ├── logger_config.py │ ├── types.py │ ├── logger.py │ ├── __init__.py │ ├── utils.py │ ├── context_manager.py │ └── formatters.py ├── __init__.py ├── utils.py ├── observability │ ├── __init__.py │ ├── langfuse_config.py │ └── langfuse_client.py └── config.py ├── tests ├── __init__.py ├── test_base │ ├── __init__.py │ ├── test_messages │ │ ├── __init__.py │ │ ├── test_assistant.py │ │ └── test_extraction.py │ ├── test_tool_call │ │ ├── __init__.py │ │ └── test_validation.py │ └── test_type_resolve │ │ ├── __init__.py │ │ ├── test_example.py │ │ └── test_multimodal.py └── test_llm_decorator_steps │ ├── __init__.py │ ├── test_chat │ ├── __init__.py │ ├── test_react.py │ ├── test_response.py │ └── test_message.py │ ├── test_common │ ├── __init__.py │ ├── test_types.py │ ├── test_log_context.py │ └── test_prompt.py │ └── test_function │ ├── __init__.py │ ├── test_response.py │ ├── test_react.py │ └── test_prompt.py ├── docs ├── requirements.txt ├── source │ ├── index.md │ ├── locale │ │ ├── zh_CN │ │ │ └── LC_MESSAGES │ │ │ │ ├── index.po │ │ │ │ ├── contributing.po │ │ │ │ └── detailed_guide │ │ │ │ ├── llm_chat.po │ │ │ │ └── config.po │ │ └── en │ │ │ └── LC_MESSAGES │ │ │ └── index.po │ ├── contributing.md │ ├── guide.md │ ├── detailed_guide │ │ └── config.md │ ├── examples.md │ └── langfuse_integration.md ├── make.bat ├── conf.py └── Makefile ├── img ├── repocover.png └── repocover_new.png ├── examples ├── repocover_new.png ├── batch_translate.sh ├── provider_template.json ├── multi_modality_toolcall.py ├── llm_function_pydantic_example.py ├── dynamic_template_demo.py └── llm_chat_raw_tooluse_example.py ├── env_template ├── .readthedocs.yaml ├── LICENSE ├── pyproject.toml ├── .gitignore └── CHANGELOG.md /SimpleLLMFunc/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for SimpleLLMFunc package.""" 2 | 3 | -------------------------------------------------------------------------------- /tests/test_base/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for base module.""" 2 | 3 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | myst-parser 3 | sphinx-rtd-theme 4 | sphinx-intl 5 | -------------------------------------------------------------------------------- /tests/test_base/test_messages/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for base.messages module.""" 2 | 3 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/__init__.py: -------------------------------------------------------------------------------- 1 | """Steps module for LLM decorators.""" 2 | 3 | -------------------------------------------------------------------------------- /img/repocover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NiJingzhe/SimpleLLMFunc/HEAD/img/repocover.png -------------------------------------------------------------------------------- /tests/test_base/test_tool_call/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for base.tool_call module.""" 2 | 3 | -------------------------------------------------------------------------------- /tests/test_base/test_type_resolve/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for base.type_resolve module.""" 2 | 3 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps module.""" 2 | 3 | -------------------------------------------------------------------------------- /img/repocover_new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NiJingzhe/SimpleLLMFunc/HEAD/img/repocover_new.png -------------------------------------------------------------------------------- /examples/repocover_new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NiJingzhe/SimpleLLMFunc/HEAD/examples/repocover_new.png -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_chat/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.chat module.""" 2 | 3 | -------------------------------------------------------------------------------- /SimpleLLMFunc/tool/__init__.py: -------------------------------------------------------------------------------- 1 | from .tool import Tool, tool 2 | 3 | __all__ = [ 4 | "Tool", 5 | "tool" 6 | ] -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_common/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.common module.""" 2 | 3 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_function/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.function module.""" 2 | 3 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | LLM 装饰器工具模块 3 | """ 4 | 5 | from .tools import process_tools 6 | 7 | __all__ = ["process_tools"] 8 | -------------------------------------------------------------------------------- /env_template: -------------------------------------------------------------------------------- 1 | LOG_LEVEL=WARNING 2 | LOG_DIR=./logs 3 | LANGFUSE_BASE_URL=http://your.langfuse.url 4 | LANGFUSE_SECRET_KEY=your_secret_key 5 | LANGFUSE_PUBLIC_KEY=your_public_key 6 | -------------------------------------------------------------------------------- /SimpleLLMFunc/type/decorator.py: -------------------------------------------------------------------------------- 1 | """Decorator-related type definitions.""" 2 | 3 | from typing import Any, Dict, List 4 | 5 | # Type alias for chat history 6 | HistoryList = List[Dict[str, Any]] 7 | 8 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/type_resolve/example.py: -------------------------------------------------------------------------------- 1 | """Example object generation helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | # Placeholder for future implementation 6 | # These functions will be implemented when needed 7 | 8 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/__init__.py: -------------------------------------------------------------------------------- 1 | """Baseline modules for SimpleLLMFunc internals.""" 2 | 3 | from . import ReAct, messages, post_process, tool_call, type_resolve 4 | 5 | __all__ = [ 6 | "ReAct", 7 | "messages", 8 | "post_process", 9 | "tool_call", 10 | "type_resolve", 11 | ] 12 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/__init__.py: -------------------------------------------------------------------------------- 1 | from SimpleLLMFunc.llm_decorator.llm_function_decorator import llm_function, async_llm_function 2 | from SimpleLLMFunc.llm_decorator.llm_chat_decorator import llm_chat, async_llm_chat 3 | 4 | __all__ = [ 5 | "llm_function", 6 | "async_llm_function", 7 | "llm_chat", 8 | "async_llm_chat", 9 | ] -------------------------------------------------------------------------------- /docs/source/index.md: -------------------------------------------------------------------------------- 1 | # SimpleLLMFunc documentation 2 | 3 | SimpleLLMFunc 是一个轻量级、可配置的 LLM 应用开发框架。 4 | 5 | ```{toctree} 6 | :maxdepth: 1 7 | :caption: 目录: 8 | 9 | 项目介绍 10 | 快速开始 11 | 使用指南 12 | 示例代码 13 | Langfuse集成 14 | 贡献指南 15 | ``` 16 | 17 | 18 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/common/__init__.py: -------------------------------------------------------------------------------- 1 | """Common steps shared by llm_function and llm_chat decorators.""" 2 | 3 | from SimpleLLMFunc.llm_decorator.steps.common.log_context import setup_log_context 4 | from SimpleLLMFunc.llm_decorator.steps.common.signature import parse_function_signature 5 | 6 | __all__ = [ 7 | "parse_function_signature", 8 | "setup_log_context", 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /SimpleLLMFunc/interface/__init__.py: -------------------------------------------------------------------------------- 1 | from SimpleLLMFunc.interface.key_pool import APIKeyPool 2 | from SimpleLLMFunc.interface.openai_compatible import OpenAICompatible 3 | from SimpleLLMFunc.interface.token_bucket import TokenBucket, RateLimitManager, rate_limit_manager 4 | 5 | __all__ = [ 6 | "APIKeyPool", 7 | "OpenAICompatible", 8 | "TokenBucket", 9 | "RateLimitManager", 10 | "rate_limit_manager", 11 | ] 12 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/common/types.py: -------------------------------------------------------------------------------- 1 | """Internal type definitions for decorator steps.""" 2 | 3 | from typing import Any, Dict, NamedTuple 4 | import inspect 5 | 6 | 7 | class FunctionSignature(NamedTuple): 8 | """函数签名信息(内部使用)""" 9 | 10 | func_name: str 11 | trace_id: str 12 | bound_args: inspect.BoundArguments 13 | signature: inspect.Signature 14 | type_hints: Dict[str, Any] 15 | return_type: Any 16 | docstring: str 17 | 18 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | commands: 8 | - pip install -r docs/requirements.txt 9 | - cd docs && sphinx-build -b html -c . source $READTHEDOCS_OUTPUT/html 10 | 11 | sphinx: 12 | configuration: docs/conf.py 13 | builder: html 14 | fail_on_warning: false 15 | 16 | # 多语言支持 17 | formats: 18 | - htmlzip 19 | 20 | # 语言配置 21 | python: 22 | install: 23 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/chat/__init__.py: -------------------------------------------------------------------------------- 1 | """Steps specific to llm_chat decorator.""" 2 | 3 | from SimpleLLMFunc.llm_decorator.steps.chat.message import build_chat_messages 4 | from SimpleLLMFunc.llm_decorator.steps.chat.react import execute_react_loop_streaming 5 | from SimpleLLMFunc.llm_decorator.steps.chat.response import process_chat_response_stream 6 | 7 | __all__ = [ 8 | "build_chat_messages", 9 | "execute_react_loop_streaming", 10 | "process_chat_response_stream", 11 | ] 12 | 13 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/function/__init__.py: -------------------------------------------------------------------------------- 1 | """Steps specific to llm_function decorator.""" 2 | 3 | from SimpleLLMFunc.llm_decorator.steps.function.prompt import build_initial_prompts 4 | from SimpleLLMFunc.llm_decorator.steps.function.react import execute_react_loop 5 | from SimpleLLMFunc.llm_decorator.steps.function.response import ( 6 | parse_and_validate_response, 7 | ) 8 | 9 | __all__ = [ 10 | "build_initial_prompts", 11 | "execute_react_loop", 12 | "parse_and_validate_response", 13 | ] 14 | 15 | -------------------------------------------------------------------------------- /SimpleLLMFunc/type/__init__.py: -------------------------------------------------------------------------------- 1 | # 多模态类型 2 | from SimpleLLMFunc.type.multimodal import ImgPath, ImgUrl, Text 3 | 4 | # 接口类型 5 | from SimpleLLMFunc.interface.llm_interface import LLM_Interface 6 | 7 | # 装饰器相关类型 8 | from SimpleLLMFunc.type.decorator import HistoryList 9 | 10 | # 消息类型 11 | from SimpleLLMFunc.type.message import MessageList, MessageParam 12 | 13 | __all__ = [ 14 | "Text", 15 | "ImgUrl", 16 | "ImgPath", 17 | "LLM_Interface", 18 | "HistoryList", 19 | "MessageParam", 20 | "MessageList", 21 | ] -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/multimodal_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | 多模态内容类型定义(向后兼容性重新导出) 3 | 4 | 本模块已移动到 SimpleLLMFunc.type.multimodal,此处仅为向后兼容性重新导出。 5 | 建议使用新的导入路径: 6 | from SimpleLLMFunc.type import Text, ImgUrl, ImgPath 7 | """ 8 | 9 | # 向后兼容性重新导出 10 | from SimpleLLMFunc.type.multimodal import ( 11 | ImgPath, 12 | ImgUrl, 13 | MultimodalContent, 14 | MultimodalList, 15 | Text, 16 | ) 17 | 18 | __all__ = [ 19 | "Text", 20 | "ImgUrl", 21 | "ImgPath", 22 | "MultimodalContent", 23 | "MultimodalList", 24 | ] 25 | -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/logger_config.py: -------------------------------------------------------------------------------- 1 | from pydantic_settings import BaseSettings, SettingsConfigDict 2 | from functools import lru_cache 3 | 4 | 5 | class LoggerConfig(BaseSettings): 6 | model_config = SettingsConfigDict( 7 | env_file=".env", 8 | env_file_encoding="utf-8", 9 | extra="ignore", 10 | ) 11 | 12 | LOG_LEVEL: str = "DEBUG" 13 | LOG_DIR: str = "logs" 14 | 15 | 16 | @lru_cache 17 | def get_logger_config() -> LoggerConfig: 18 | return LoggerConfig() 19 | 20 | 21 | # 全局配置实例 22 | logger_config = get_logger_config() 23 | -------------------------------------------------------------------------------- /SimpleLLMFunc/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @File : __init__.py 3 | @Time : 2025/08/03 02:19:19 4 | @Author : Jingzhe Ni 5 | @Contact : nijingzhe@zju.edu.cn 6 | @License : (C)Copyright 2025, Jingzhe Ni 7 | @Desc : Init for SimpleLLMFunc 8 | """ 9 | 10 | from rich import traceback 11 | traceback.install(show_locals=True) 12 | 13 | from SimpleLLMFunc.config import * 14 | from SimpleLLMFunc.llm_decorator import * 15 | from SimpleLLMFunc.logger import * 16 | from SimpleLLMFunc.tool import * 17 | from SimpleLLMFunc.interface import * 18 | from SimpleLLMFunc.observability import * -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/types.py: -------------------------------------------------------------------------------- 1 | """ 2 | 日志系统类型定义 3 | 4 | 本模块定义了日志系统使用到的所有类型和枚举。 5 | """ 6 | 7 | from enum import Enum, auto 8 | 9 | 10 | class LogLevel(Enum): 11 | """ 12 | 日志级别枚举 13 | 14 | 定义了标准的日志级别,用于控制日志的详细程度。 15 | 16 | Attributes: 17 | DEBUG: 调试级别,用于开发和调试时的详细信息 18 | INFO: 信息级别,用于记录正常运行时的关键信息 19 | WARNING: 警告级别,用于记录可能出现问题的情况 20 | ERROR: 错误级别,用于记录运行时错误 21 | CRITICAL: 严重错误级别,用于记录严重影响系统运行的错误 22 | """ 23 | 24 | DEBUG = auto() 25 | INFO = auto() 26 | WARNING = auto() 27 | ERROR = auto() 28 | CRITICAL = auto() 29 | -------------------------------------------------------------------------------- /tests/test_base/test_type_resolve/test_example.py: -------------------------------------------------------------------------------- 1 | """Tests for base.type_resolve.example module.""" 2 | 3 | from __future__ import annotations 4 | 5 | # Note: The example.py module is currently empty (placeholder for future implementation) 6 | # This test file is created for consistency and future tests 7 | 8 | 9 | class TestExampleModule: 10 | """Placeholder tests for example module.""" 11 | 12 | def test_module_exists(self) -> None: 13 | """Test that the module can be imported.""" 14 | from SimpleLLMFunc.base.type_resolve import example # noqa: F401 15 | 16 | assert True # Module imported successfully 17 | 18 | -------------------------------------------------------------------------------- /SimpleLLMFunc/utils.py: -------------------------------------------------------------------------------- 1 | """这个文件中包含各种在整个项目中被广泛使用的工具函数 2 | """ 3 | from typing import Generator, TypeVar, AsyncGenerator 4 | 5 | T = TypeVar("T") 6 | 7 | def get_last_item_of_generator(generator: Generator[T, None, None]) -> T | None: 8 | """ 9 | 获取生成器的最后一个元素 10 | """ 11 | last_item = None 12 | for item in generator: 13 | last_item = item 14 | return last_item 15 | 16 | async def get_last_item_of_async_generator(generator: AsyncGenerator[T, None]) -> T | None: 17 | """ 18 | 获取异步生成器的最后一个元素 19 | """ 20 | last_item = None 21 | async for item in generator: 22 | last_item = item 23 | return last_item -------------------------------------------------------------------------------- /SimpleLLMFunc/base/type_resolve/__init__.py: -------------------------------------------------------------------------------- 1 | """Type resolution helpers for LLM decorators.""" 2 | 3 | from SimpleLLMFunc.base.type_resolve.description import ( 4 | build_type_description_xml, 5 | describe_pydantic_model, 6 | generate_example_xml, 7 | get_detailed_type_description, 8 | ) 9 | from SimpleLLMFunc.base.type_resolve.multimodal import ( 10 | has_multimodal_content, 11 | is_multimodal_type, 12 | ) 13 | 14 | __all__ = [ 15 | "get_detailed_type_description", 16 | "has_multimodal_content", 17 | "is_multimodal_type", 18 | "describe_pydantic_model", 19 | "build_type_description_xml", 20 | "generate_example_xml", 21 | ] 22 | 23 | -------------------------------------------------------------------------------- /SimpleLLMFunc/observability/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Observability module for SimpleLLMFunc framework. 3 | 4 | This module provides integration with observability platforms like Langfuse 5 | to track LLM generations, tool calls, and overall function execution. 6 | """ 7 | 8 | from .langfuse_client import ( 9 | get_langfuse_client, 10 | langfuse_client, 11 | flush_all_observations, 12 | ) 13 | 14 | from .langfuse_config import ( 15 | get_langfuse_config, 16 | langfuse_config, 17 | ) 18 | 19 | 20 | __all__ = [ 21 | "get_langfuse_client", 22 | "langfuse_client", 23 | "get_langfuse_config", 24 | "langfuse_config", 25 | "flush_all_observations", 26 | ] -------------------------------------------------------------------------------- /SimpleLLMFunc/observability/langfuse_config.py: -------------------------------------------------------------------------------- 1 | from pydantic_settings import BaseSettings, SettingsConfigDict 2 | from functools import lru_cache 3 | 4 | class LangfuseConfig(BaseSettings): 5 | model_config = SettingsConfigDict( 6 | env_file=".env", 7 | env_file_encoding="utf-8", 8 | extra="ignore", 9 | ) 10 | 11 | LANGFUSE_PUBLIC_KEY: str = "" 12 | LANGFUSE_SECRET_KEY: str = "" 13 | LANGFUSE_BASE_URL: str = "https://cloud.langfuse.com" 14 | LANGFUSE_ENABLED: bool = True 15 | 16 | 17 | @lru_cache 18 | def get_langfuse_config() -> LangfuseConfig: 19 | return LangfuseConfig() 20 | 21 | 22 | # 全局配置实例 23 | langfuse_config = get_langfuse_config() 24 | -------------------------------------------------------------------------------- /SimpleLLMFunc/observability/langfuse_client.py: -------------------------------------------------------------------------------- 1 | from langfuse import Langfuse 2 | from SimpleLLMFunc.observability.langfuse_config import langfuse_config 3 | from functools import lru_cache 4 | 5 | 6 | @lru_cache 7 | def get_langfuse_client() -> Langfuse: 8 | 9 | return Langfuse( 10 | public_key=langfuse_config.LANGFUSE_PUBLIC_KEY, 11 | secret_key=langfuse_config.LANGFUSE_SECRET_KEY, 12 | host=langfuse_config.LANGFUSE_BASE_URL, 13 | ) 14 | 15 | # 全局配置实例 16 | langfuse_client = get_langfuse_client() 17 | 18 | def flush_all_observations() -> None: 19 | langfuse_client.flush() 20 | 21 | 22 | __all__ = [ 23 | "langfuse_client", 24 | "flush_all_observations", 25 | ] -------------------------------------------------------------------------------- /examples/batch_translate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source /Users/lildino/Library/Caches/pypoetry/virtualenvs/simplellmfunc-v9Mcp8Tm-py3.13/bin/activate 3 | 4 | # 根目录文件 5 | for file in ../docs/source/locale/zh/LC_MESSAGES/*.po; do 6 | basename=$(basename "$file") 7 | echo "Translating $basename..." 8 | python translate_po.py "$file" -o "../docs/source/locale/en/LC_MESSAGES/$basename" -b 20 -c 100 9 | done 10 | 11 | # detailed_guide子目录文件 12 | for file in ../docs/source/locale/zh/LC_MESSAGES/detailed_guide/*.po; do 13 | basename=$(basename "$file") 14 | echo "Translating detailed_guide/$basename..." 15 | python translate_po.py "$file" -o "../docs/source/locale/en/LC_MESSAGES/detailed_guide/$basename" -b 20 -c 100 16 | done 17 | 18 | echo "All translations completed!" 19 | -------------------------------------------------------------------------------- /SimpleLLMFunc/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | @File : config.py 3 | @Time : 2025/08/03 02:19:19 4 | @Author : Jingzhe Ni 5 | @Contact : nijingzhe@zju.edu.cn 6 | @License : (C)Copyright 2025, Jingzhe Ni 7 | @Desc : Config for SimpleLLMFunc 8 | """ 9 | from functools import lru_cache 10 | from pydantic_settings import BaseSettings, SettingsConfigDict 11 | 12 | 13 | class Settings(BaseSettings): 14 | """ 15 | Config class for SimpleLLMFunc 16 | """ 17 | 18 | model_config = SettingsConfigDict( 19 | env_file=".env", 20 | env_file_encoding="utf-8", 21 | extra="ignore", 22 | ) 23 | 24 | 25 | @lru_cache 26 | def get_settings() -> Settings: 27 | """ 28 | Get settings from .env file 29 | """ 30 | return Settings() 31 | 32 | 33 | global_settings = get_settings() 34 | 35 | __all__ = [ 36 | "global_settings", 37 | ] 38 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/messages/__init__.py: -------------------------------------------------------------------------------- 1 | """Helpers for constructing structured assistant messages.""" 2 | 3 | from SimpleLLMFunc.base.messages.assistant import ( 4 | build_assistant_response_message, 5 | build_assistant_tool_message, 6 | ) 7 | from SimpleLLMFunc.base.messages.extraction import extract_usage_from_response 8 | from SimpleLLMFunc.base.messages.multimodal import ( 9 | build_multimodal_content, 10 | create_image_path_content, 11 | create_image_url_content, 12 | create_text_content, 13 | parse_multimodal_parameter, 14 | ) 15 | 16 | __all__ = [ 17 | "build_assistant_tool_message", 18 | "build_assistant_response_message", 19 | "extract_usage_from_response", 20 | "build_multimodal_content", 21 | "parse_multimodal_parameter", 22 | "create_text_content", 23 | "create_image_url_content", 24 | "create_image_path_content", 25 | ] 26 | 27 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/function/response.py: -------------------------------------------------------------------------------- 1 | """Step 5: Parse and validate response for llm_function.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any 6 | 7 | from SimpleLLMFunc.base.post_process import process_response 8 | 9 | 10 | def extract_response_content(response: Any, func_name: str) -> str: 11 | """从响应对象中提取文本内容""" 12 | from SimpleLLMFunc.base.post_process import extract_content_from_response 13 | 14 | return extract_content_from_response(response, func_name) 15 | 16 | 17 | def parse_response_to_type(response: Any, return_type: Any) -> Any: 18 | """将响应解析为目标返回类型""" 19 | return process_response(response, return_type) 20 | 21 | 22 | def parse_and_validate_response( 23 | response: Any, 24 | return_type: Any, 25 | func_name: str, 26 | ) -> Any: 27 | """解析和验证响应的完整流程""" 28 | return parse_response_to_type(response, return_type) 29 | 30 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Ni Jingzhe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/tool_call/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool call extraction and execution helpers.""" 2 | 3 | from SimpleLLMFunc.base.tool_call.execution import ( 4 | _execute_single_tool_call, 5 | process_tool_calls, 6 | ) 7 | from SimpleLLMFunc.base.tool_call.extraction import ( 8 | AccumulatedToolCall, 9 | ReasoningDetail, 10 | ToolCallFunctionInfo, 11 | accumulate_tool_calls_from_chunks, 12 | extract_reasoning_details, 13 | extract_reasoning_details_from_stream, 14 | extract_tool_calls, 15 | extract_tool_calls_from_stream_response, 16 | ) 17 | from SimpleLLMFunc.base.tool_call.validation import ( 18 | is_valid_tool_result, 19 | serialize_tool_output_for_langfuse, 20 | ) 21 | 22 | __all__ = [ 23 | "serialize_tool_output_for_langfuse", 24 | "is_valid_tool_result", 25 | "process_tool_calls", 26 | "extract_tool_calls", 27 | "accumulate_tool_calls_from_chunks", 28 | "extract_tool_calls_from_stream_response", 29 | "extract_reasoning_details", 30 | "extract_reasoning_details_from_stream", 31 | "ToolCallFunctionInfo", 32 | "AccumulatedToolCall", 33 | "ReasoningDetail", 34 | ] 35 | 36 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/messages/extraction.py: -------------------------------------------------------------------------------- 1 | """Response information extraction helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Dict, Optional, Union 6 | 7 | from openai.types.chat.chat_completion import ChatCompletion 8 | from openai.types.chat.chat_completion_chunk import ChatCompletionChunk 9 | 10 | 11 | def extract_usage_from_response( 12 | response: Union[ChatCompletion, ChatCompletionChunk, None], 13 | ) -> Dict[str, int] | None: 14 | """从LLM响应中提取用量信息。 15 | 16 | Args: 17 | response: OpenAI API的ChatCompletion或ChatCompletionChunk响应对象 18 | 19 | Returns: 20 | 包含用量信息的字典 {"input": int, "output": int, "total": int}, 21 | 如果无法提取则返回None 22 | """ 23 | if response is None: 24 | return None 25 | 26 | try: 27 | if hasattr(response, "usage") and response.usage: 28 | return { 29 | "input": getattr(response.usage, "prompt_tokens", 0), 30 | "output": getattr(response.usage, "completion_tokens", 0), 31 | "total": getattr(response.usage, "total_tokens", 0), 32 | } 33 | except (AttributeError, TypeError): 34 | pass 35 | return None 36 | 37 | -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/logger.py: -------------------------------------------------------------------------------- 1 | """Console-only logger facade for SimpleLLMFunc. 2 | 3 | This module re-exports the public logging API while keeping compatibility 4 | with historical import paths. 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | from .core import ( 10 | setup_logger, 11 | get_logger, 12 | push_debug, 13 | push_info, 14 | push_warning, 15 | push_error, 16 | push_critical, 17 | app_log, 18 | ) 19 | from .context_manager import ( 20 | log_context, 21 | async_log_context, 22 | get_current_trace_id, 23 | get_current_context_attribute, 24 | set_current_context_attribute, 25 | ) 26 | from .types import LogLevel 27 | from .utils import get_location 28 | from .formatters import ConsoleFormatter 29 | 30 | __all__ = [ 31 | "setup_logger", 32 | "get_logger", 33 | "push_debug", 34 | "push_info", 35 | "push_warning", 36 | "push_error", 37 | "push_critical", 38 | "app_log", 39 | "log_context", 40 | "async_log_context", 41 | "get_current_trace_id", 42 | "get_current_context_attribute", 43 | "set_current_context_attribute", 44 | "LogLevel", 45 | "get_location", 46 | "ConsoleFormatter", 47 | ] 48 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/messages/assistant.py: -------------------------------------------------------------------------------- 1 | """Assistant message construction helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, Dict, List, Optional 6 | 7 | 8 | def build_assistant_tool_message( 9 | tool_calls: List[Dict[str, Any]], 10 | reasoning_details: Optional[List[Dict[str, Any]]] = None, 11 | ) -> Dict[str, Any]: 12 | """Construct the assistant message containing tool call descriptors. 13 | 14 | Args: 15 | tool_calls: 工具调用列表 16 | reasoning_details: 可选的推理细节(如 Google Gemini 的 reasoning_details) 17 | 18 | Returns: 19 | assistant 消息字典 20 | """ 21 | if tool_calls: 22 | message: Dict[str, Any] = { 23 | "role": "assistant", 24 | "content": None, 25 | "tool_calls": tool_calls, 26 | } 27 | if reasoning_details: 28 | message["reasoning_details"] = reasoning_details 29 | return message 30 | return {} 31 | 32 | 33 | def build_assistant_response_message(content: str) -> Dict[str, Any]: 34 | """Construct a plain assistant response message.""" 35 | 36 | return { 37 | "role": "assistant", 38 | "content": content, 39 | } 40 | 41 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "SimpleLLMFunc" 3 | version = "0.4.2" 4 | description = "A lightweight yet complete LLM/Agent application development framework. Provides decorators that use function docstrings as prompts, requiring no function body implementation while allowing you to benefit from function definitions and type annotations for higher development efficiency. Seamlessly integrate LLM capabilities into any Python project with minimal code." 5 | authors = ["Ni Jingzhe "] 6 | readme = "README.md" 7 | packages = [{ include = "SimpleLLMFunc", from = "." }] 8 | 9 | [tool.poetry.dependencies] 10 | python = ">=3.11,<4.0" 11 | openai = ">=1.84.0,<2.0.0" 12 | pyyaml = ">=6.0.2,<7.0.0" 13 | pydantic = ">=2.11.5,<3.0.0" 14 | pydantic-settings = ">=2.9.1,<3.0.0" 15 | httpx = { extras = ["socks"], version = "^0.28.1" } 16 | rich = "^14.0.0" 17 | langfuse = "^3.6.1" 18 | tqdm = "^4.67.1" 19 | 20 | [tool.poetry.group.dev.dependencies] 21 | pytest = "^8.4.0" 22 | pytest-mock = "^3.14.1" 23 | polib = "^1.2.0" 24 | types-polib = "^1.2.0.20250401" 25 | types-tqdm = "^4.67.0.20250809" 26 | sphinx-intl = "^2.3.2" 27 | myst-parser = "^4.0.1" 28 | pytest-asyncio = "^1.3.0" 29 | 30 | [build-system] 31 | requires = ["poetry-core>=2.0.0,<3.0.0"] 32 | build-backend = "poetry.core.masonry.api" 33 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/common/prompt.py: -------------------------------------------------------------------------------- 1 | """Shared prompt processing helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, Dict, Optional 6 | 7 | from SimpleLLMFunc.logger import push_warning 8 | from SimpleLLMFunc.logger.logger import get_location 9 | 10 | 11 | def process_docstring_template( 12 | docstring: str, template_params: Optional[Dict[str, Any]] 13 | ) -> str: 14 | """处理 docstring 模板参数替换""" 15 | if not template_params: 16 | return docstring 17 | 18 | try: 19 | return docstring.format(**template_params) 20 | except KeyError as e: 21 | push_warning( 22 | f"DocString template parameter substitution failed: missing parameter {e}. " 23 | "Using original DocString.", 24 | location=get_location(), 25 | ) 26 | return docstring 27 | except Exception as e: 28 | push_warning( 29 | f"Error during DocString template parameter substitution: {str(e)}. " 30 | "Using original DocString.", 31 | location=get_location(), 32 | ) 33 | return docstring 34 | 35 | 36 | def extract_parameter_type_hints(type_hints: Dict[str, Any]) -> Dict[str, Any]: 37 | """提取参数类型提示(排除返回类型)""" 38 | return {k: v for k, v in type_hints.items() if k != "return"} 39 | 40 | -------------------------------------------------------------------------------- /examples/provider_template.json: -------------------------------------------------------------------------------- 1 | { 2 | "openai": [ 3 | { 4 | "model_name": "gpt-3.5-turbo", 5 | "api_keys": ["sk-test-key-1", "sk-test-key-2"], 6 | "base_url": "https://api.openai.com/v1", 7 | "max_retries": 5, 8 | "retry_delay": 1.0, 9 | "rate_limit_capacity": 20, 10 | "rate_limit_refill_rate": 3.0 11 | }, 12 | { 13 | "model_name": "gpt-4", 14 | "api_keys": ["sk-test-key-3", "sk-test-key-4"], 15 | "base_url": "https://api.openai.com/v1", 16 | "max_retries": 5, 17 | "retry_delay": 1.0, 18 | "rate_limit_capacity": 10, 19 | "rate_limit_refill_rate": 1.0 20 | } 21 | ], 22 | "zhipu": [ 23 | { 24 | "model_name": "glm-4", 25 | "api_keys": ["zhipu-test-key-1", "zhipu-test-key-2"], 26 | "base_url": "https://open.bigmodel.cn/api/paas/v4/", 27 | "max_retries": 3, 28 | "retry_delay": 0.5, 29 | "rate_limit_capacity": 15, 30 | "rate_limit_refill_rate": 2.0 31 | } 32 | ], 33 | "claude": [ 34 | { 35 | "model_name": "claude-3-sonnet", 36 | "api_keys": ["claude-test-key-1"], 37 | "base_url": "https://api.anthropic.com/v1", 38 | "max_retries": 5, 39 | "retry_delay": 1.0, 40 | "rate_limit_capacity": 8, 41 | "rate_limit_refill_rate": 0.5 42 | } 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/common/log_context.py: -------------------------------------------------------------------------------- 1 | """Step 2: Setup log context.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | from typing import Any, AsyncContextManager, Dict 7 | 8 | from SimpleLLMFunc.logger import app_log, async_log_context 9 | from SimpleLLMFunc.logger.logger import get_location 10 | 11 | 12 | def log_function_call(func_name: str, arguments: Dict[str, Any]) -> None: 13 | """记录函数调用日志""" 14 | args_str = json.dumps(arguments, default=str, ensure_ascii=False, indent=4) 15 | app_log( 16 | f"Async LLM function '{func_name}' called with arguments: {args_str}", 17 | location=get_location(), 18 | ) 19 | 20 | 21 | def create_log_context_manager( 22 | func_name: str, trace_id: str 23 | ) -> AsyncContextManager[None]: 24 | """创建日志上下文管理器""" 25 | return async_log_context( 26 | trace_id=trace_id, 27 | function_name=func_name, 28 | input_tokens=0, 29 | output_tokens=0, 30 | ) 31 | 32 | 33 | def setup_log_context( 34 | func_name: str, 35 | trace_id: str, 36 | arguments: Dict[str, Any], 37 | ) -> AsyncContextManager[None]: 38 | """设置日志上下文的完整流程""" 39 | # 1. 记录函数调用日志 40 | log_function_call(func_name, arguments) 41 | 42 | # 2. 创建并返回日志上下文管理器 43 | return create_log_context_manager(func_name, trace_id) 44 | 45 | -------------------------------------------------------------------------------- /docs/source/locale/zh_CN/LC_MESSAGES/index.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2025, Nijingzhe 3 | # This file is distributed under the same license as the SimpleLLMFunc 4 | # package. 5 | # FIRST AUTHOR , 2025. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: SimpleLLMFunc \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2025-11-10 02:53+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language: zh_CN\n" 16 | "Language-Team: zh_CN \n" 17 | "Plural-Forms: nplurals=1; plural=0;\n" 18 | "MIME-Version: 1.0\n" 19 | "Content-Type: text/plain; charset=utf-8\n" 20 | "Content-Transfer-Encoding: 8bit\n" 21 | "Generated-By: Babel 2.17.0\n" 22 | 23 | #: ../../source/index.md:5 24 | msgid " 项目介绍" 25 | msgstr "" 26 | 27 | #: ../../source/index.md:5 28 | msgid " 快速开始" 29 | msgstr "" 30 | 31 | #: ../../source/index.md:5 32 | msgid " 使用指南" 33 | msgstr "" 34 | 35 | #: ../../source/index.md:5 36 | msgid " 示例代码" 37 | msgstr "" 38 | 39 | #: ../../source/index.md:5 40 | msgid " Langfuse集成" 41 | msgstr "" 42 | 43 | #: ../../source/index.md:5 44 | msgid " 贡献指南" 45 | msgstr "" 46 | 47 | #: ../../source/index.md:5 48 | msgid "目录:" 49 | msgstr "" 50 | 51 | #: ../../source/index.md:1 52 | msgid "SimpleLLMFunc documentation" 53 | msgstr "" 54 | 55 | #: ../../source/index.md:3 56 | msgid "SimpleLLMFunc 是一个轻量级、可配置的 LLM 应用开发框架。" 57 | msgstr "" 58 | 59 | -------------------------------------------------------------------------------- /SimpleLLMFunc/interface/llm_interface.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Optional, Dict, Iterable, Literal, Any, AsyncGenerator 3 | 4 | from SimpleLLMFunc.interface.key_pool import APIKeyPool 5 | from SimpleLLMFunc.logger import get_current_trace_id 6 | from openai.types.chat.chat_completion_chunk import ChatCompletionChunk 7 | from openai.types.chat.chat_completion import ChatCompletion 8 | 9 | class LLM_Interface(ABC): 10 | 11 | @abstractmethod 12 | def __init__( 13 | self, api_key_pool: APIKeyPool, model_name: str, base_url: Optional[str] = None 14 | ): 15 | self.input_token_count = 0 16 | self.output_token_count = 0 17 | self.model_name = model_name 18 | 19 | @abstractmethod 20 | async def chat( 21 | self, 22 | trace_id: str = get_current_trace_id(), 23 | stream: Literal[False] = False, 24 | messages: Iterable[Dict[str, str]] = [{"role": "user", "content": ""}], 25 | timeout: Optional[int] = None, 26 | *args, 27 | **kwargs, 28 | ) -> ChatCompletion: 29 | 30 | pass 31 | 32 | 33 | @abstractmethod 34 | async def chat_stream( 35 | self, 36 | trace_id: str = get_current_trace_id(), 37 | stream: Literal[True] = True, 38 | messages: Iterable[Dict[str, str]] = [{"role": "user", "content": ""}], 39 | timeout: Optional[int] = None, 40 | *args, 41 | **kwargs, 42 | ) -> AsyncGenerator[ChatCompletionChunk, None]: 43 | 44 | if False: 45 | yield ChatCompletionChunk(id="", created=0, model="", object="chat.completion.chunk", choices=[]) -------------------------------------------------------------------------------- /SimpleLLMFunc/type/message.py: -------------------------------------------------------------------------------- 1 | """OpenAI API message type definitions. 2 | 3 | 直接使用 OpenAI SDK 定义的消息类型,确保类型安全。 4 | 消息类型结构是固定的: 5 | - role: "system" | "user" | "assistant" | "tool" | "function" 6 | - content: str | List[Dict[str, Any]] | None (取决于 role) 7 | - reasoning_details: 可选字段,某些模型(如 Google Gemini)会返回 reasoning 信息 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | from typing import Any, Dict, List, Literal, NotRequired, TypeAlias, TypedDict 13 | 14 | # 导入 OpenAI SDK 的消息类型 15 | from openai.types.chat.chat_completion_message import ChatCompletionMessage 16 | 17 | # Reasoning detail 的类型定义 18 | class ReasoningDetail(TypedDict): 19 | """推理细节的类型定义(用于 Google Gemini 等模型)""" 20 | id: str 21 | format: str 22 | index: int 23 | type: Literal["reasoning.encrypted"] 24 | data: str 25 | 26 | # 扩展的消息参数类型,包含所有可能的字段 27 | class ExtendedMessageParam(TypedDict, total=False): 28 | """扩展的消息参数类型,支持额外的字段如 reasoning_details 29 | 30 | 包含 ChatCompletionMessage 的所有字段,并添加 reasoning_details 支持 31 | """ 32 | # 基础字段 33 | role: str 34 | content: str | List[Dict[str, Any]] | None 35 | 36 | # OpenAI 标准字段 37 | refusal: NotRequired[str | None] 38 | annotations: NotRequired[List[Dict[str, Any]] | None] 39 | audio: NotRequired[Dict[str, Any] | None] 40 | function_call: NotRequired[Dict[str, Any] | None] 41 | tool_calls: NotRequired[List[Dict[str, Any]] | None] 42 | 43 | # 扩展字段(如 Google Gemini 的 reasoning_details) 44 | reasoning_details: NotRequired[List[ReasoningDetail]] 45 | 46 | # 使用扩展类型作为主要消息类型 47 | MessageParam: TypeAlias = ChatCompletionMessage | ExtendedMessageParam 48 | 49 | # 消息列表类型 50 | MessageList: TypeAlias = List[MessageParam] 51 | 52 | -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | 初始化全局日志系统单例并导出日志函数 3 | """ 4 | 5 | from .logger_config import logger_config 6 | 7 | from .logger import ( 8 | setup_logger, 9 | app_log, 10 | push_warning, 11 | push_error, 12 | push_critical, 13 | push_info, 14 | push_debug, 15 | get_location, 16 | LogLevel, 17 | get_logger, 18 | log_context, 19 | async_log_context, 20 | ConsoleFormatter, 21 | get_current_trace_id, 22 | get_current_context_attribute, 23 | set_current_context_attribute, 24 | ) 25 | 26 | 27 | _log_level = logger_config.LOG_LEVEL.upper() 28 | 29 | 30 | # 日志级别映射 31 | _log_level_map = { 32 | "DEBUG": LogLevel.DEBUG, 33 | "INFO": LogLevel.INFO, 34 | "WARNING": LogLevel.WARNING, 35 | "ERROR": LogLevel.ERROR, 36 | "CRITICAL": LogLevel.CRITICAL, 37 | } 38 | 39 | # 将字符串级别转换为枚举 40 | console_level = _log_level_map.get(_log_level, LogLevel.INFO) 41 | 42 | # 初始化全局单例日志器 43 | GLOBAL_LOGGER = setup_logger( 44 | console_level=console_level, 45 | use_color=True, 46 | logger_name="SimpleLLMFunc", 47 | ) 48 | 49 | # 记录日志系统初始化完成 50 | push_info(f"全局日志系统初始化完成, 控制台日志级别: {_log_level}") 51 | 52 | # 确保启动时打印一条测试日志 53 | push_debug("测试DEBUG级别日志") 54 | app_log("测试INFO级别日志(app_log)") 55 | push_info("测试INFO级别日志(push_info)") 56 | 57 | __all__ = [ 58 | "app_log", 59 | "push_warning", 60 | "push_error", 61 | "push_critical", 62 | "push_info", 63 | "push_debug", 64 | "get_location", 65 | "log_context", 66 | "async_log_context", 67 | "LogLevel", 68 | "get_logger", 69 | "setup_logger", 70 | "ConsoleFormatter", 71 | "get_current_trace_id", 72 | "get_current_context_attribute", 73 | "set_current_context_attribute", 74 | ] 75 | -------------------------------------------------------------------------------- /docs/source/locale/en/LC_MESSAGES/index.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2025, Nijingzhe 3 | # This file is distributed under the same license as the SimpleLLMFunc 4 | # package. 5 | # FIRST AUTHOR , 2025. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: SimpleLLMFunc\n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2025-11-10 02:53+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language: en\n" 16 | "Language-Team: en \n" 17 | "Plural-Forms: nplurals=1; plural=0;\n" 18 | "MIME-Version: 1.0\n" 19 | "Content-Type: text/plain; charset=utf-8\n" 20 | "Content-Transfer-Encoding: 8bit\n" 21 | "Generated-By: Babel 2.17.0\n" 22 | 23 | #: ../../source/index.md:5 24 | msgid " 项目介绍" 25 | msgstr "Project Introduction" 26 | 27 | #: ../../source/index.md:5 28 | msgid " 快速开始" 29 | msgstr "Quick start" 30 | 31 | #: ../../source/index.md:5 32 | msgid " 使用指南" 33 | msgstr "User Guide" 34 | 35 | #: ../../source/index.md:5 36 | msgid " 示例代码" 37 | msgstr "Example code" 38 | 39 | #: ../../source/index.md:5 40 | msgid " Langfuse集成" 41 | msgstr "Langfuse Integration" 42 | 43 | #: ../../source/index.md:5 44 | msgid " 贡献指南" 45 | msgstr "Contribution Guide" 46 | 47 | #: ../../source/index.md:5 48 | msgid "目录:" 49 | msgstr "Directory:" 50 | 51 | #: ../../source/index.md:1 52 | msgid "SimpleLLMFunc documentation" 53 | msgstr "SimpleLLMFunc Documentation" 54 | 55 | #: ../../source/index.md:3 56 | msgid "SimpleLLMFunc 是一个轻量级、可配置的 LLM 应用开发框架。" 57 | msgstr "" 58 | "SimpleLLMFunc is a lightweight, configurable LLM application development " 59 | "framework." 60 | 61 | #~ msgid " Langfuse Intergation" 62 | #~ msgstr "" 63 | 64 | -------------------------------------------------------------------------------- /examples/multi_modality_toolcall.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | 4 | from SimpleLLMFunc import OpenAICompatible, llm_function, tool 5 | from SimpleLLMFunc.type import ImgPath 6 | 7 | # 当前脚本文件所在的文件夹下的provider.json文件 8 | current_dir = os.path.dirname(os.path.abspath(__file__)) 9 | provider_json_path = os.path.join(current_dir, "provider.json") 10 | gpt_4o = OpenAICompatible.load_from_json_file(provider_json_path)["dreamcatcher"]["gpt-4o"] 11 | 12 | 13 | @tool( 14 | name="get_image", 15 | description="Get an image from local path", 16 | ) 17 | async def get_image(image_path: str) -> tuple[str, ImgPath]: 18 | """Get an image from the local file system. 19 | 20 | Args: 21 | image_path: The path to the image file. 22 | detail: The detail level for the image retrieval. 23 | Can be 'low', 'high', or 'auto'. 24 | 25 | Returns: 26 | ImgPath: An object representing the image file with its path and detail level. 27 | """ 28 | 29 | return "仔细分析这张图的几何结构", ImgPath(image_path, detail="low") 30 | 31 | 32 | @llm_function( # type: ignore 33 | llm_interface=gpt_4o, 34 | toolkit=[get_image], 35 | timeout=600, 36 | ) 37 | async def analyze_image( 38 | focus: str, 39 | image_path: str, 40 | ) -> str: # type: ignore 41 | """Analyze an image and provide a description. 42 | 43 | Args: 44 | focus: The focus of the image analysis. 45 | image_path: The path to the local image file. 46 | 47 | Returns: 48 | str: A description of the image analysis result. 49 | """ 50 | 51 | return "" 52 | 53 | 54 | async def main() -> None: 55 | path = input("Enter the path to the image: ") 56 | result: str = await analyze_image( 57 | "Analyze the image for objects, provide the simplest description possible", 58 | path, 59 | ) 60 | print(result) 61 | 62 | 63 | if __name__ == "__main__": 64 | asyncio.run(main()) -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_common/test_types.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.common.types module.""" 2 | 3 | from __future__ import annotations 4 | 5 | import inspect 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.common.types import FunctionSignature 10 | 11 | 12 | class TestFunctionSignature: 13 | """Tests for FunctionSignature NamedTuple.""" 14 | 15 | def test_create_signature(self, sample_bound_args: inspect.BoundArguments) -> None: 16 | """Test creating FunctionSignature.""" 17 | sig = inspect.signature(lambda x: x) 18 | type_hints = {"x": str, "return": str} 19 | 20 | signature = FunctionSignature( 21 | func_name="test_func", 22 | trace_id="trace_123", 23 | bound_args=sample_bound_args, 24 | signature=sig, 25 | type_hints=type_hints, 26 | return_type=str, 27 | docstring="Test function.", 28 | ) 29 | 30 | assert signature.func_name == "test_func" 31 | assert signature.trace_id == "trace_123" 32 | assert signature.return_type == str 33 | assert signature.docstring == "Test function." 34 | assert signature.type_hints == type_hints 35 | 36 | def test_signature_immutability(self, sample_bound_args: inspect.BoundArguments) -> None: 37 | """Test that FunctionSignature is immutable.""" 38 | sig = inspect.signature(lambda x: x) 39 | type_hints = {"x": str, "return": str} 40 | 41 | signature = FunctionSignature( 42 | func_name="test_func", 43 | trace_id="trace_123", 44 | bound_args=sample_bound_args, 45 | signature=sig, 46 | type_hints=type_hints, 47 | return_type=str, 48 | docstring="Test function.", 49 | ) 50 | 51 | # NamedTuple is immutable, should raise AttributeError 52 | with pytest.raises(AttributeError): 53 | signature.func_name = "new_name" 54 | 55 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/type_resolve/multimodal.py: -------------------------------------------------------------------------------- 1 | """Multimodal type checking helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, Dict, List, Optional 6 | 7 | from SimpleLLMFunc.type.multimodal import ImgPath, ImgUrl, Text 8 | 9 | 10 | def has_multimodal_content( 11 | arguments: Dict[str, Any], 12 | type_hints: Dict[str, Any], 13 | exclude_params: Optional[List[str]] = None, 14 | ) -> bool: 15 | """Check whether arguments contain multimodal payloads.""" 16 | 17 | exclude_params = exclude_params or [] 18 | 19 | for param_name, param_value in arguments.items(): 20 | if param_name in exclude_params: 21 | continue 22 | 23 | if param_name in type_hints: 24 | annotation = type_hints[param_name] 25 | if is_multimodal_type(param_value, annotation): 26 | return True 27 | return False 28 | 29 | 30 | def is_multimodal_type(value: Any, annotation: Any) -> bool: 31 | """Determine whether a value/annotation pair represents multimodal content.""" 32 | 33 | from typing import List as TypingList, Union, get_args, get_origin 34 | 35 | if isinstance(value, (Text, ImgUrl, ImgPath)): 36 | return True 37 | 38 | origin = get_origin(annotation) 39 | args = get_args(annotation) 40 | 41 | if origin is Union: 42 | non_none_args = [arg for arg in args if arg is not type(None)] 43 | for arg_type in non_none_args: 44 | if is_multimodal_type(value, arg_type): 45 | return True 46 | return False 47 | 48 | if origin in (list, TypingList): 49 | if not args: 50 | return False 51 | element_type = args[0] 52 | if element_type in (Text, ImgUrl, ImgPath): 53 | return True 54 | if isinstance(value, (list, tuple)): 55 | return any(isinstance(item, (Text, ImgUrl, ImgPath)) for item in value) 56 | return False 57 | 58 | if annotation in (Text, ImgUrl, ImgPath): 59 | return True 60 | 61 | return False 62 | 63 | -------------------------------------------------------------------------------- /docs/source/contributing.md: -------------------------------------------------------------------------------- 1 | # 贡献指南 2 | 3 | 感谢你对 SimpleLLMFunc 项目的兴趣!我们欢迎并鼓励社区贡献,无论是修复错误、改进文档,还是添加新功能。 4 | 5 | ## 如何贡献 6 | 7 | ### 提交问题(Issue) 8 | 9 | 如果你发现了问题或有新功能建议,请先在 [GitHub Issues](https://github.com/NiJingzhe/SimpleLLMFunc/issues) 页面搜索相关内容,以确保你的问题或建议尚未被提出。如果没有相关内容,你可以创建一个新的 Issue,请务必: 10 | 11 | 1. 使用清晰的标题描述问题 12 | 2. 提供详细的问题描述或功能请求 13 | 3. 如果是 bug,请提供复现步骤和环境信息 14 | 4. 如果可能,包含代码示例或截图 15 | 16 | ### 提交代码更改(Pull Request) 17 | 18 | 1. Fork 项目仓库 19 | 2. 创建你的功能分支 (`git checkout -b feature/amazing-feature`) 20 | 3. 提交你的更改 (`git commit -m 'Add some amazing feature'`) 21 | 4. 推送到分支 (`git push origin feature/amazing-feature`) 22 | 5. 提交 Pull Request 23 | 24 | ### 开发流程 25 | 26 | 1. 确保你已经设置好开发环境(见下文) 27 | 2. 在开始工作前,请先同步最新的代码 28 | 3. 为你的功能或修复编写测试用例 29 | 4. 确保所有测试都通过 30 | 5. 遵循项目的代码风格和约定 31 | 32 | ## 开发环境设置 33 | 34 | ### 依赖项 35 | 36 | - Python 3.10 或更高版本 37 | - Poetry (推荐的依赖管理工具) 38 | 39 | ### 安装开发依赖 40 | 41 | ```bash 42 | git clone https://github.com/NiJingzhe/SimpleLLMFunc.git 43 | cd SimpleLLMFunc 44 | poetry install 45 | ``` 46 | 47 | 54 | 55 | ## 代码规范 56 | 57 | ### 代码风格 58 | 59 | 我们使用 [PEP 8](https://www.python.org/dev/peps/pep-0008/) 作为 Python 代码风格指南,使用 [Black](https://github.com/psf/black) 格式化器自动化格式化过程: 60 | 61 | ```bash 62 | black SimpleLLMFunc tests 63 | ``` 64 | 65 | ### 类型注解 66 | 67 | 我们鼓励使用类型注解以提高代码可读性和安全性。可以使用 Pylint 检查类型。 68 | 69 | ### 文档 70 | 71 | - 所有公共 API 都应该有清晰的文档字符串 72 | - 文档注释应遵循 [Google Python 文档风格](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings) 73 | - 更新功能时,请同时更新相关文档 74 | 75 | 76 | 86 | 87 | ## 行为准则 88 | 89 | 请尊重所有项目参与者,保持友好的交流环境。任何形式的骚扰或冒犯行为都是不可接受的。 90 | 91 | ## 获取帮助 92 | 93 | 如果你在贡献过程中需要帮助,可以: 94 | 95 | - 在 GitHub Issues 中提问 96 | - 联系项目维护者 97 | 98 | 再次感谢你对 SimpleLLMFunc 的贡献! 99 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/tool_call/validation.py: -------------------------------------------------------------------------------- 1 | """Tool result validation and serialization helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | from typing import Any 7 | 8 | from SimpleLLMFunc.type.multimodal import ImgPath, ImgUrl, Text 9 | 10 | 11 | def serialize_tool_output_for_langfuse(result: Any) -> Any: 12 | """序列化工具输出以便langfuse记录。 13 | 14 | Args: 15 | result: 工具返回的原始结果 16 | 17 | Returns: 18 | 序列化后的结果,适合langfuse记录 19 | """ 20 | if isinstance(result, ImgPath): 21 | return { 22 | "type": "image_path", 23 | "path": str(result.path), 24 | "detail": result.detail, 25 | } 26 | 27 | if isinstance(result, ImgUrl): 28 | return { 29 | "type": "image_url", 30 | "url": result.url, 31 | "detail": result.detail, 32 | } 33 | 34 | if isinstance(result, tuple) and len(result) == 2: 35 | text_part, img_part = result 36 | if isinstance(text_part, str) and isinstance(img_part, (ImgPath, ImgUrl)): 37 | return { 38 | "type": "text_with_image", 39 | "text": text_part, 40 | "image": serialize_tool_output_for_langfuse(img_part), 41 | } 42 | 43 | if isinstance(result, Text): 44 | return str(result.content) 45 | 46 | # 对于其他类型,尝试直接返回(JSON可序列化的对象)或转为字符串 47 | try: 48 | json.dumps(result) 49 | return result 50 | except (TypeError, ValueError): 51 | return str(result) 52 | 53 | 54 | def is_valid_tool_result(result: Any) -> bool: 55 | """Validate whether a tool return value is supported.""" 56 | 57 | if isinstance(result, (ImgPath, ImgUrl)): 58 | return True 59 | 60 | if isinstance(result, str): 61 | return True 62 | 63 | if isinstance(result, tuple) and len(result) == 2: 64 | text_part, img_part = result 65 | if isinstance(text_part, str) and isinstance(img_part, (ImgPath, ImgUrl)): 66 | return True 67 | return False 68 | 69 | try: 70 | json.dumps(result) 71 | return True 72 | except (TypeError, ValueError): 73 | return False 74 | 75 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | import os 10 | import sys 11 | 12 | project = 'SimpleLLMFunc' 13 | copyright = '2025, Nijingzhe' 14 | author = 'Nijingzhe' 15 | 16 | # -- General configuration --------------------------------------------------- 17 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 18 | 19 | extensions = [ 20 | 'myst_parser', 21 | 'sphinx.ext.viewcode', 22 | 'sphinx.ext.githubpages', 23 | ] 24 | 25 | 26 | language = os.environ.get('SPHINX_LANGUAGE', 'en') 27 | 28 | # 国际化配置 29 | locale_dirs = ['locale/'] 30 | gettext_compact = False 31 | 32 | # 语言显示名称映射 33 | languages = { 34 | 'zh_CN': '中文(简体)', 35 | 'en': 'English', 36 | } 37 | 38 | 39 | source_suffix = [ '.md', '.rst' ] 40 | master_doc = 'index' 41 | 42 | # ReadTheDocs 环境检测 43 | is_readthedocs = os.environ.get('READTHEDOCS') == 'True' 44 | 45 | # 如果在ReadTheDocs环境中,使用环境变量设置的语言 46 | if is_readthedocs: 47 | language = os.environ.get('READTHEDOCS_LANGUAGE', 'en') 48 | 49 | # -- Options for HTML output ------------------------------------------------- 50 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 51 | 52 | html_theme = 'sphinx_rtd_theme' 53 | 54 | # HTML主题选项 55 | html_theme_options = { 56 | 'analytics_id': '', # 可选:Google Analytics ID 57 | 'logo_only': False, 58 | 'prev_next_buttons_location': 'bottom', 59 | 'style_external_links': False, 60 | 'vcs_pageview_mode': '', 61 | 'style_nav_header_background': '#2980B9', 62 | # Toc options 63 | 'collapse_navigation': True, 64 | 'sticky_navigation': True, 65 | 'navigation_depth': 4, 66 | 'includehidden': True, 67 | 'titles_only': False 68 | } 69 | 70 | # HTML上下文配置 71 | html_context = { 72 | 'current_version': 'latest', 73 | 'versions': { 74 | 'latest': 'latest', 75 | }, 76 | 'display_version': True, 77 | } 78 | 79 | 80 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_function/test_response.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.function.response module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any 6 | from unittest.mock import patch 7 | 8 | import pytest 9 | 10 | from SimpleLLMFunc.llm_decorator.steps.function.response import ( 11 | extract_response_content, 12 | parse_and_validate_response, 13 | parse_response_to_type, 14 | ) 15 | 16 | 17 | class TestExtractResponseContent: 18 | """Tests for extract_response_content function.""" 19 | 20 | @patch("SimpleLLMFunc.base.post_process.extract_content_from_response") 21 | def test_extract_content(self, mock_extract: Any, mock_chat_completion: Any) -> None: 22 | """Test extracting response content.""" 23 | mock_extract.return_value = "test content" 24 | result = extract_response_content(mock_chat_completion, "test_func") 25 | assert result == "test content" 26 | mock_extract.assert_called_once_with(mock_chat_completion, "test_func") 27 | 28 | 29 | class TestParseResponseToType: 30 | """Tests for parse_response_to_type function.""" 31 | 32 | @patch("SimpleLLMFunc.llm_decorator.steps.function.response.process_response") 33 | def test_parse_to_type(self, mock_process: Any, mock_chat_completion: Any) -> None: 34 | """Test parsing response to type.""" 35 | mock_process.return_value = "parsed result" 36 | result = parse_response_to_type(mock_chat_completion, str) 37 | assert result == "parsed result" 38 | mock_process.assert_called_once_with(mock_chat_completion, str) 39 | 40 | 41 | class TestParseAndValidateResponse: 42 | """Tests for parse_and_validate_response function.""" 43 | 44 | @patch("SimpleLLMFunc.llm_decorator.steps.function.response.parse_response_to_type") 45 | def test_parse_and_validate( 46 | self, mock_parse: Any, mock_chat_completion: Any 47 | ) -> None: 48 | """Test parsing and validating response.""" 49 | mock_parse.return_value = "result" 50 | result = parse_and_validate_response( 51 | mock_chat_completion, str, "test_func" 52 | ) 53 | assert result == "result" 54 | mock_parse.assert_called_once_with(mock_chat_completion, str) 55 | 56 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/chat/response.py: -------------------------------------------------------------------------------- 1 | """Step 5: Process chat response stream.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | from typing import Any, AsyncGenerator, Dict, List, Literal, Tuple 7 | 8 | from SimpleLLMFunc.base.post_process import ( 9 | extract_content_from_response, 10 | extract_content_from_stream_response, 11 | ) 12 | from SimpleLLMFunc.logger import app_log 13 | from SimpleLLMFunc.logger.logger import get_location 14 | from SimpleLLMFunc.type.decorator import HistoryList 15 | 16 | 17 | def extract_stream_response_content(chunk: Any, func_name: str) -> str: 18 | """从流式响应 chunk 中提取内容""" 19 | return extract_content_from_stream_response(chunk, func_name) 20 | 21 | 22 | def process_single_chat_response( 23 | response: Any, 24 | return_mode: Literal["text", "raw"], 25 | stream: bool, 26 | func_name: str, 27 | ) -> Any: 28 | """处理单个响应""" 29 | if return_mode == "raw": 30 | return response 31 | 32 | # text 模式:提取内容 33 | if stream: 34 | return extract_stream_response_content(response, func_name) 35 | else: 36 | return extract_content_from_response(response, func_name) or "" 37 | 38 | 39 | async def process_chat_response_stream( 40 | response_stream: AsyncGenerator[Tuple[Any, List[Dict[str, Any]]], None], 41 | return_mode: Literal["text", "raw"], 42 | messages: List[Dict[str, Any]], # 初始消息,用于兼容性 43 | func_name: str, 44 | stream: bool, 45 | ) -> AsyncGenerator[Tuple[Any, HistoryList], None]: 46 | """处理流式响应的完整流程""" 47 | current_messages = messages.copy() # 初始消息 48 | 49 | async for response, updated_messages in response_stream: 50 | # 更新当前消息为最新版本(包含工具调用结果) 51 | current_messages = updated_messages 52 | 53 | # 记录响应日志 54 | app_log( 55 | f"LLM Chat '{func_name}' received response:" 56 | f"\n{json.dumps(response, default=str, ensure_ascii=False, indent=2)}", 57 | location=get_location(), 58 | ) 59 | 60 | # 处理单个响应 61 | content = process_single_chat_response( 62 | response, 63 | return_mode, 64 | stream, 65 | func_name, 66 | ) 67 | 68 | # Yield 响应和更新后的历史(包含工具调用结果) 69 | yield content, current_messages.copy() 70 | 71 | # 流结束标记(text 模式) 72 | if return_mode == "text": 73 | yield "", current_messages.copy() 74 | 75 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/chat/react.py: -------------------------------------------------------------------------------- 1 | """Step 4: Execute ReAct loop for llm_chat (streaming).""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, List, Optional, Tuple, Union 6 | 7 | from SimpleLLMFunc.base.ReAct import execute_llm 8 | from SimpleLLMFunc.interface.llm_interface import LLM_Interface 9 | from SimpleLLMFunc.tool import Tool 10 | from SimpleLLMFunc.llm_decorator.utils import process_tools 11 | 12 | 13 | def prepare_tools_for_execution( 14 | toolkit: Optional[List[Union[Tool, Callable[..., Awaitable[Any]]]]], 15 | func_name: str, 16 | ) -> tuple[Optional[List[Dict[str, Any]]], Dict[str, Callable[..., Awaitable[Any]]]]: 17 | """准备工具供执行使用""" 18 | return process_tools(toolkit, func_name) 19 | 20 | 21 | async def execute_llm_call( 22 | llm_interface: LLM_Interface, 23 | messages: List[Dict[str, Any]], 24 | tools: Optional[List[Dict[str, Any]]], 25 | tool_map: Dict[str, Callable[..., Awaitable[Any]]], 26 | max_tool_calls: int, 27 | stream: bool = False, 28 | **llm_kwargs: Any, 29 | ) -> AsyncGenerator[Tuple[Any, List[Dict[str, Any]]], None]: 30 | """执行 LLM 调用,返回响应和更新后的消息""" 31 | async for response, updated_messages in execute_llm( 32 | llm_interface=llm_interface, 33 | messages=messages, 34 | tools=tools, 35 | tool_map=tool_map, 36 | max_tool_calls=max_tool_calls, 37 | stream=stream, 38 | **llm_kwargs, 39 | ): 40 | yield response, updated_messages 41 | 42 | 43 | async def execute_react_loop_streaming( 44 | llm_interface: LLM_Interface, 45 | messages: List[Dict[str, Any]], 46 | toolkit: Optional[List[Union[Tool, Callable[..., Awaitable[Any]]]]], 47 | max_tool_calls: int, 48 | stream: bool, 49 | llm_kwargs: Dict[str, Any], 50 | func_name: str, 51 | ) -> AsyncGenerator[Tuple[Any, List[Dict[str, Any]]], None]: 52 | """执行 ReAct 循环的流式版本(无重试),返回响应和更新后的消息""" 53 | # 1. 准备工具 54 | tool_param, tool_map = prepare_tools_for_execution(toolkit, func_name) 55 | 56 | # 2. 执行 LLM 调用(流式) 57 | response_stream = execute_llm_call( 58 | llm_interface=llm_interface, 59 | messages=messages, 60 | tools=tool_param, 61 | tool_map=tool_map, 62 | max_tool_calls=max_tool_calls, 63 | stream=stream, 64 | **llm_kwargs, 65 | ) 66 | 67 | # 3. 返回响应流和更新后的消息 68 | async for response, updated_messages in response_stream: 69 | yield response, updated_messages 70 | 71 | -------------------------------------------------------------------------------- /tests/test_base/test_messages/test_assistant.py: -------------------------------------------------------------------------------- 1 | """Tests for base.messages.assistant module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from SimpleLLMFunc.base.messages.assistant import ( 6 | build_assistant_response_message, 7 | build_assistant_tool_message, 8 | ) 9 | 10 | 11 | class TestBuildAssistantResponseMessage: 12 | """Tests for build_assistant_response_message function.""" 13 | 14 | def test_build_with_content(self) -> None: 15 | """Test building assistant message with content.""" 16 | result = build_assistant_response_message("Hello, world!") 17 | assert result == { 18 | "role": "assistant", 19 | "content": "Hello, world!", 20 | } 21 | 22 | def test_build_with_empty_content(self) -> None: 23 | """Test building assistant message with empty content.""" 24 | result = build_assistant_response_message("") 25 | assert result == { 26 | "role": "assistant", 27 | "content": "", 28 | } 29 | 30 | 31 | class TestBuildAssistantToolMessage: 32 | """Tests for build_assistant_tool_message function.""" 33 | 34 | def test_build_with_tool_calls(self) -> None: 35 | """Test building assistant message with tool calls.""" 36 | tool_calls = [ 37 | { 38 | "id": "call_123", 39 | "type": "function", 40 | "function": {"name": "test_tool", "arguments": '{"arg": "value"}'}, 41 | } 42 | ] 43 | result = build_assistant_tool_message(tool_calls) 44 | assert result == { 45 | "role": "assistant", 46 | "content": None, 47 | "tool_calls": tool_calls, 48 | } 49 | 50 | def test_build_with_empty_tool_calls(self) -> None: 51 | """Test building assistant message with empty tool calls.""" 52 | result = build_assistant_tool_message([]) 53 | assert result == {} 54 | 55 | def test_build_with_multiple_tool_calls(self) -> None: 56 | """Test building assistant message with multiple tool calls.""" 57 | tool_calls = [ 58 | { 59 | "id": "call_1", 60 | "type": "function", 61 | "function": {"name": "tool1", "arguments": "{}"}, 62 | }, 63 | { 64 | "id": "call_2", 65 | "type": "function", 66 | "function": {"name": "tool2", "arguments": "{}"}, 67 | }, 68 | ] 69 | result = build_assistant_tool_message(tool_calls) 70 | assert result["tool_calls"] == tool_calls 71 | assert len(result["tool_calls"]) == 2 72 | 73 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= -c . 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | # 翻译文件目录(与 conf.py 中的 locale_dirs 保持一致) 11 | LOCALEDIR = source/locale 12 | 13 | # 支持的语言列表 14 | LANGUAGES = zh_CN en 15 | 16 | # Put it first so that "make" without argument is like "make help". 17 | help: 18 | @echo "Available targets:" 19 | @echo " help - Show this help message" 20 | @echo " clean - Remove build directory" 21 | @echo " html - Build HTML documentation" 22 | @echo " html-all - Build HTML for all languages" 23 | @echo " gettext - Extract translatable strings" 24 | @echo " update-po - Update .po files from .pot files" 25 | @echo " compile-po - Compile .po files to .mo files" 26 | @echo " serve - Serve documentation locally" 27 | @echo "" 28 | @echo "Language-specific targets:" 29 | @for lang in $(LANGUAGES); do \ 30 | echo " html-$$lang - Build HTML for $$lang"; \ 31 | done 32 | 33 | .PHONY: help Makefile clean html html-all gettext update-po compile-po serve 34 | 35 | # 清理构建目录 36 | clean: 37 | rm -rf $(BUILDDIR)/* 38 | 39 | # 提取可翻译字符串 40 | gettext: 41 | $(SPHINXBUILD) -b gettext $(SPHINXOPTS) $(SOURCEDIR) $(BUILDDIR)/gettext 42 | 43 | # 更新.po文件 44 | update-po: gettext 45 | @for lang in $(LANGUAGES); do \ 46 | echo "Updating $$lang.po..."; \ 47 | sphinx-intl update -p $(BUILDDIR)/gettext -l $$lang -d $(LOCALEDIR); \ 48 | done 49 | 50 | # 编译.po文件为.mo文件 51 | compile-po: 52 | @for lang in $(LANGUAGES); do \ 53 | echo "Compiling $$lang..."; \ 54 | sphinx-intl build -d $(LOCALEDIR) -l $$lang; \ 55 | done 56 | 57 | # 构建单个语言的HTML文档 58 | html-%: compile-po 59 | @lang=$$(echo $* | sed 's/html-//'); \ 60 | echo "Building HTML for $$lang..."; \ 61 | $(SPHINXBUILD) -b html -D language=$$lang $(SPHINXOPTS) $(SOURCEDIR) $(BUILDDIR)/html/$$lang $(O) 62 | 63 | # 构建所有语言的HTML文档 64 | html-all: compile-po 65 | @for lang in $(LANGUAGES); do \ 66 | echo "Building HTML for $$lang..."; \ 67 | $(SPHINXBUILD) -b html -D language=$$lang $(SPHINXOPTS) $(SOURCEDIR) $(BUILDDIR)/html/$$lang $(O); \ 68 | done 69 | 70 | # 默认构建中文文档 71 | html: html-zh_CN 72 | 73 | # 本地服务(用于测试) 74 | serve: html-all 75 | @echo "Serving documentation at http://localhost:8000" 76 | @echo "Available languages:" 77 | @for lang in $(LANGUAGES); do \ 78 | echo " http://localhost:8000/html/$$lang/"; \ 79 | done 80 | @cd $(BUILDDIR)/html && python3 -m http.server 8000 81 | 82 | # Catch-all target: route all unknown targets to Sphinx using the new 83 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 84 | %: Makefile 85 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 86 | -------------------------------------------------------------------------------- /docs/source/guide.md: -------------------------------------------------------------------------------- 1 | # 使用指南 2 | 3 | 本指南提供了 SimpleLLMFunc 框架各个功能模块的详细文档。请根据你的需求选择相应的文档进行阅读。 4 | 5 | ## 📚 详细文档导航 6 | 7 | ```{toctree} 8 | :maxdepth: 1 9 | :caption: 核心功能 10 | 11 | 配置与环境 12 | LLM 接口层 13 | llm_function 装饰器 14 | llm_chat 装饰器 15 | 工具系统 16 | ``` 17 | 18 | ## 🎯 按使用场景查找文档 19 | 20 | ### 我想要快速上手 21 | 👉 [快速开始](quickstart.md) - 5分钟内运行你的第一个示例 22 | 23 | ### 我想要配置 API 和环境 24 | 👉 [配置与环境](detailed_guide/config.md) - 学习如何设置 provider.json 和环境变量 25 | 26 | ### 我想要创建 LLM 函数 27 | 👉 [llm_function 装饰器](detailed_guide/llm_function.md) - 创建无状态的 LLM 驱动函数 28 | 29 | ### 我想要构建聊天应用 30 | 👉 [llm_chat 装饰器](detailed_guide/llm_chat.md) - 构建多轮对话和 Agent 应用 31 | 32 | ### 我想要整合工具/API 33 | 👉 [工具系统](detailed_guide/tool.md) - 让 LLM 调用外部函数和 API 34 | 35 | ### 我想要理解接口设计 36 | 👉 [LLM 接口层](detailed_guide/llm_interface.md) - 了解密钥管理和流量控制 37 | 38 | ### 我想要查看代码示例 39 | 👉 [示例代码](examples.md) - 浏览各种使用场景的完整示例 40 | 41 | ## 🚀 推荐学习路径 42 | 43 | ### 初级用户(刚开始使用) 44 | 1. [快速开始](quickstart.md) - 环境配置和第一个示例 45 | 2. [llm_function 装饰器](detailed_guide/llm_function.md) - 基础功能 46 | 3. [示例代码](examples.md) - 学习实际用法 47 | 48 | ### 中级用户(已掌握基础) 49 | 1. [llm_chat 装饰器](detailed_guide/llm_chat.md) - 构建交互应用 50 | 2. [工具系统](detailed_guide/tool.md) - 整合外部能力 51 | 3. [配置与环境](detailed_guide/config.md) - 优化配置 52 | 53 | ### 高级用户(深入理解框架) 54 | 1. [LLM 接口层](detailed_guide/llm_interface.md) - 密钥管理和流量控制 55 | 2. 自定义 LLM 接口和工具 56 | 3. [示例代码](examples.md) - 生产级别的实现参考 57 | 58 | ## 📖 按功能模块查找 59 | 60 | | 功能 | 文档 | 说明 | 61 | |-----|------|------| 62 | | 基础配置 | [配置与环境](detailed_guide/config.md) | API 密钥、环境变量、provider.json | 63 | | 简单任务 | [llm_function 装饰器](detailed_guide/llm_function.md) | 无状态函数、文本处理、数据转换 | 64 | | 对话应用 | [llm_chat 装饰器](detailed_guide/llm_chat.md) | 多轮对话、历史管理、流式响应 | 65 | | 工具集成 | [工具系统](detailed_guide/tool.md) | 工具定义、调用、多模态返回 | 66 | | 系统设计 | [LLM 接口层](detailed_guide/llm_interface.md) | 接口抽象、密钥池、流量控制 | 67 | | 实战示例 | [示例代码](examples.md) | 各种场景的完整代码 | 68 | 69 | ## ❓ 常见问题速查 70 | 71 | - **如何配置 API 密钥?** → [配置与环境](detailed_guide/config.md) 72 | - **装饰器支持同步函数吗?** → [llm_function 装饰器 - 重要说明](detailed_guide/llm_function.md) 73 | - **如何做多轮对话?** → [llm_chat 装饰器](detailed_guide/llm_chat.md) 74 | - **如何让 LLM 调用函数?** → [工具系统](detailed_guide/tool.md) 75 | - **支持哪些 LLM 提供商?** → [LLM 接口层 - OpenAICompatible 实现](detailed_guide/llm_interface.md) 76 | - **如何处理错误和重试?** → [LLM 接口层 - 故障排除](detailed_guide/llm_interface.md) 77 | 78 | ## 🔗 其他资源 79 | 80 | - [项目介绍](introduction.md) - 了解 SimpleLLMFunc 的设计理念 81 | - [示例代码](examples.md) - 各种场景的完整代码示例 82 | - [贡献指南](contributing.md) - 如何为项目做出贡献 83 | - [GitHub 仓库](https://github.com/NiJingzhe/SimpleLLMFunc) - 源代码和问题追踪 84 | 85 | ## 💡 提示 86 | 87 | - 每个文档都包含完整的代码示例,可以直接复制使用 88 | - 使用浏览器的搜索功能(Ctrl+F)快速定位内容 89 | - 遇到问题时,先查看对应文档的"故障排除"或"常见问题"部分 90 | - 所有示例代码都位于 [examples/](https://github.com/NiJingzhe/SimpleLLMFunc/tree/master/examples) 目录 91 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_common/test_log_context.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.common.log_context module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import AsyncMock, MagicMock, patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.common.log_context import ( 10 | create_log_context_manager, 11 | log_function_call, 12 | setup_log_context, 13 | ) 14 | 15 | 16 | class TestLogFunctionCall: 17 | """Tests for log_function_call function.""" 18 | 19 | @patch("SimpleLLMFunc.llm_decorator.steps.common.log_context.app_log") 20 | @patch("SimpleLLMFunc.llm_decorator.steps.common.log_context.get_location") 21 | def test_log_function_call( 22 | self, mock_get_location: MagicMock, mock_app_log: MagicMock 23 | ) -> None: 24 | """Test logging function call.""" 25 | mock_get_location.return_value = "test_location" 26 | log_function_call("test_func", {"param1": "value1", "param2": 123}) 27 | mock_app_log.assert_called_once() 28 | call_args = mock_app_log.call_args[0][0] 29 | assert "test_func" in call_args 30 | assert "param1" in call_args or "value1" in call_args 31 | 32 | 33 | class TestCreateLogContextManager: 34 | """Tests for create_log_context_manager function.""" 35 | 36 | @patch("SimpleLLMFunc.llm_decorator.steps.common.log_context.async_log_context") 37 | def test_create_log_context_manager(self, mock_async_log_context: MagicMock) -> None: 38 | """Test creating log context manager.""" 39 | mock_context = AsyncMock() 40 | mock_async_log_context.return_value = mock_context 41 | 42 | result = create_log_context_manager("test_func", "trace_123") 43 | 44 | assert result == mock_context 45 | mock_async_log_context.assert_called_once_with( 46 | trace_id="trace_123", 47 | function_name="test_func", 48 | input_tokens=0, 49 | output_tokens=0, 50 | ) 51 | 52 | 53 | class TestSetupLogContext: 54 | """Tests for setup_log_context function.""" 55 | 56 | @patch("SimpleLLMFunc.llm_decorator.steps.common.log_context.log_function_call") 57 | @patch("SimpleLLMFunc.llm_decorator.steps.common.log_context.create_log_context_manager") 58 | def test_setup_log_context( 59 | self, 60 | mock_create_manager: MagicMock, 61 | mock_log_call: MagicMock, 62 | ) -> None: 63 | """Test setting up log context.""" 64 | mock_context = AsyncMock() 65 | mock_create_manager.return_value = mock_context 66 | 67 | result = setup_log_context( 68 | func_name="test_func", 69 | trace_id="trace_123", 70 | arguments={"param1": "value1"}, 71 | ) 72 | 73 | mock_log_call.assert_called_once_with("test_func", {"param1": "value1"}) 74 | mock_create_manager.assert_called_once_with("test_func", "trace_123") 75 | assert result == mock_context 76 | 77 | -------------------------------------------------------------------------------- /examples/llm_function_pydantic_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | llm_function 返回复杂 Pydantic 模型示例 3 | """ 4 | 5 | import asyncio 6 | import os 7 | from typing import List, Optional 8 | from pydantic import BaseModel, Field 9 | 10 | from SimpleLLMFunc.llm_decorator import llm_function 11 | from SimpleLLMFunc.interface.openai_compatible import OpenAICompatible 12 | 13 | 14 | # 配置 LLM 15 | current_dir = os.path.dirname(os.path.abspath(__file__)) 16 | provider_json_path = os.path.join(current_dir, "provider.json") 17 | 18 | try: 19 | llm_interface = OpenAICompatible.load_from_json_file( 20 | provider_json_path 21 | )["openrouter"]["google/gemini-3-pro-preview"] 22 | except Exception: 23 | llm_interface = None 24 | 25 | 26 | # 定义复杂的 Pydantic 模型 27 | class Address(BaseModel): 28 | """地址信息""" 29 | street: str = Field(..., description="街道地址") 30 | city: str = Field(..., description="城市名称") 31 | state: Optional[str] = Field(None, description="州/省名称") 32 | zip_code: str = Field(..., description="邮政编码") 33 | country: str = Field(default="中国", description="国家名称") 34 | 35 | 36 | class Contact(BaseModel): 37 | """联系方式""" 38 | email: str = Field(..., description="电子邮箱") 39 | phone: Optional[str] = Field(None, description="电话号码") 40 | website: Optional[str] = Field(None, description="网站URL") 41 | 42 | 43 | class Product(BaseModel): 44 | """商品信息""" 45 | name: str = Field(..., description="商品名称") 46 | price: float = Field(..., description="商品价格") 47 | category: str = Field(..., description="商品类别") 48 | tags: List[str] = Field(default_factory=list, description="标签列表") 49 | 50 | 51 | class Company(BaseModel): 52 | """公司信息""" 53 | name: str = Field(..., description="公司名称") 54 | founded_year: int = Field(..., description="成立年份") 55 | employee_count: int = Field(..., description="员工数量") 56 | address: Address = Field(..., description="公司地址") 57 | contact: Contact = Field(..., description="联系方式") 58 | products: List[Product] = Field(default_factory=list, description="产品列表") 59 | is_public: bool = Field(default=False, description="是否上市公司") 60 | 61 | 62 | class SearchResult(BaseModel): 63 | """搜索结果""" 64 | query: str = Field(..., description="搜索查询词") 65 | total_results: int = Field(..., description="总结果数") 66 | companies: List[Company] = Field(default_factory=list, description="公司列表") 67 | search_time_ms: float = Field(..., description="搜索耗时(毫秒)") 68 | 69 | 70 | # 定义返回复杂 Pydantic 模型的 llm_function 71 | @llm_function(llm_interface=llm_interface) 72 | async def search_companies(query: str, max_results: int = 3) -> SearchResult: 73 | """搜索符合条件的公司信息""" 74 | pass 75 | 76 | 77 | async def main(): 78 | if not llm_interface: 79 | print("请配置 provider.json") 80 | return 81 | 82 | result = await search_companies("AI 科技公司", max_results=2) 83 | 84 | import json 85 | print(json.dumps(result.model_dump(), indent=2, ensure_ascii=False)) 86 | 87 | 88 | if __name__ == "__main__": 89 | asyncio.run(main()) 90 | 91 | -------------------------------------------------------------------------------- /tests/test_base/test_messages/test_extraction.py: -------------------------------------------------------------------------------- 1 | """Tests for base.messages.extraction module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from openai.types.chat import ChatCompletion, ChatCompletionChunk 6 | from openai.types.chat.chat_completion import Choice 7 | from openai.types.chat.chat_completion_message import ChatCompletionMessage 8 | from openai.types.completion_usage import CompletionUsage 9 | 10 | from SimpleLLMFunc.base.messages.extraction import extract_usage_from_response 11 | 12 | 13 | class TestExtractUsageFromResponse: 14 | """Tests for extract_usage_from_response function.""" 15 | 16 | def test_extract_usage_from_completion(self) -> None: 17 | """Test extracting usage from ChatCompletion.""" 18 | usage = CompletionUsage( 19 | prompt_tokens=10, 20 | completion_tokens=20, 21 | total_tokens=30, 22 | ) 23 | message = ChatCompletionMessage(role="assistant", content="test") 24 | choice = Choice(finish_reason="stop", index=0, message=message) 25 | response = ChatCompletion( 26 | id="test", 27 | choices=[choice], 28 | created=0, 29 | model="test", 30 | object="chat.completion", 31 | usage=usage, 32 | ) 33 | result = extract_usage_from_response(response) 34 | assert result is not None 35 | assert result["input"] == 10 36 | assert result["output"] == 20 37 | assert result["total"] == 30 38 | 39 | def test_extract_usage_none(self) -> None: 40 | """Test extracting usage from None.""" 41 | result = extract_usage_from_response(None) 42 | assert result is None 43 | 44 | def test_extract_usage_no_usage_field(self) -> None: 45 | """Test extracting usage when usage field is missing.""" 46 | message = ChatCompletionMessage(role="assistant", content="test") 47 | choice = Choice(finish_reason="stop", index=0, message=message) 48 | response = ChatCompletion( 49 | id="test", 50 | choices=[choice], 51 | created=0, 52 | model="test", 53 | object="chat.completion", 54 | ) 55 | result = extract_usage_from_response(response) 56 | assert result is None 57 | 58 | def test_extract_usage_from_chunk(self) -> None: 59 | """Test extracting usage from ChatCompletionChunk.""" 60 | from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice 61 | from openai.types.chat.chat_completion_chunk import ChoiceDelta 62 | 63 | delta = ChoiceDelta(content="chunk", role="assistant") 64 | choice = ChunkChoice(delta=delta, finish_reason=None, index=0) 65 | chunk = ChatCompletionChunk( 66 | id="test", 67 | choices=[choice], 68 | created=0, 69 | model="test", 70 | object="chat.completion.chunk", 71 | ) 72 | # Chunks typically don't have usage, but function should handle gracefully 73 | result = extract_usage_from_response(chunk) 74 | # Should return None if no usage field 75 | assert result is None or isinstance(result, dict) 76 | 77 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_chat/test_react.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.chat.react module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import AsyncMock, patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.chat.react import ( 10 | execute_llm_call, 11 | execute_react_loop_streaming, 12 | prepare_tools_for_execution, 13 | ) 14 | 15 | 16 | class TestPrepareToolsForExecution: 17 | """Tests for prepare_tools_for_execution function.""" 18 | 19 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.react.process_tools") 20 | def test_prepare_tools(self, mock_process_tools: Any) -> None: 21 | """Test preparing tools for execution.""" 22 | mock_process_tools.return_value = ([{"name": "tool1"}], {"tool1": AsyncMock()}) 23 | result = prepare_tools_for_execution([], "test_func") 24 | mock_process_tools.assert_called_once_with([], "test_func") 25 | 26 | 27 | class TestExecuteLLMCall: 28 | """Tests for execute_llm_call function.""" 29 | 30 | @pytest.mark.asyncio 31 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.react.execute_llm") 32 | async def test_execute_call( 33 | self, mock_execute_llm: AsyncMock, mock_llm_interface: Any, sample_messages: list 34 | ) -> None: 35 | """Test executing LLM call.""" 36 | async def mock_generator(): 37 | yield "response1", sample_messages.copy() 38 | yield "response2", sample_messages.copy() 39 | 40 | mock_execute_llm.return_value = mock_generator() 41 | 42 | result = execute_llm_call( 43 | mock_llm_interface, 44 | sample_messages, 45 | None, 46 | {}, 47 | 5, 48 | stream=True, 49 | ) 50 | 51 | responses = [] 52 | async for r, _ in result: 53 | responses.append(r) 54 | 55 | assert len(responses) >= 1 56 | 57 | 58 | class TestExecuteReactLoopStreaming: 59 | """Tests for execute_react_loop_streaming function.""" 60 | 61 | @pytest.mark.asyncio 62 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.react.prepare_tools_for_execution") 63 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.react.execute_llm_call") 64 | async def test_execute_streaming( 65 | self, 66 | mock_execute: AsyncMock, 67 | mock_prepare: Any, 68 | mock_llm_interface: Any, 69 | sample_messages: list, 70 | ) -> None: 71 | """Test executing streaming ReAct loop.""" 72 | mock_prepare.return_value = (None, {}) 73 | 74 | async def mock_generator(): 75 | yield "response1", sample_messages.copy() 76 | yield "response2", sample_messages.copy() 77 | 78 | mock_execute.return_value = mock_generator() 79 | 80 | responses = [] 81 | async for response, _ in execute_react_loop_streaming( 82 | mock_llm_interface, 83 | sample_messages, 84 | None, 85 | 5, 86 | True, 87 | {}, 88 | "test_func", 89 | ): 90 | responses.append(response) 91 | 92 | assert len(responses) >= 1 93 | mock_prepare.assert_called_once() 94 | mock_execute.assert_called_once() 95 | 96 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/common/signature.py: -------------------------------------------------------------------------------- 1 | """Step 1: Parse function signature.""" 2 | 3 | from __future__ import annotations 4 | 5 | import inspect 6 | import uuid 7 | from typing import Any, Callable, Dict, Optional, Tuple, get_type_hints 8 | 9 | from SimpleLLMFunc.logger.logger import get_current_trace_id 10 | from SimpleLLMFunc.llm_decorator.steps.common.types import FunctionSignature 11 | 12 | 13 | def extract_template_params(kwargs: Dict[str, Any]) -> Optional[Dict[str, Any]]: 14 | """从 kwargs 中提取模板参数""" 15 | return kwargs.pop("_template_params", None) 16 | 17 | 18 | def extract_function_metadata( 19 | func: Callable, 20 | ) -> Tuple[inspect.Signature, Dict[str, Any], Any, str, str]: 21 | """提取函数的元数据""" 22 | signature = inspect.signature(func) 23 | type_hints = get_type_hints(func) 24 | return_type = type_hints.get("return") 25 | docstring = func.__doc__ or "" 26 | func_name = func.__name__ 27 | 28 | return signature, type_hints, return_type, docstring, func_name 29 | 30 | 31 | def generate_trace_id(func_name: str) -> str: 32 | """生成唯一的追踪 ID""" 33 | context_trace_id = get_current_trace_id() 34 | current_trace_id = f"{func_name}_{uuid.uuid4()}" 35 | if context_trace_id: 36 | current_trace_id += f"_{context_trace_id}" 37 | return current_trace_id 38 | 39 | 40 | def bind_function_arguments( 41 | signature: inspect.Signature, 42 | args: Tuple[Any, ...], 43 | kwargs: Dict[str, Any], 44 | ) -> inspect.BoundArguments: 45 | """绑定函数参数并应用默认值""" 46 | bound_args = signature.bind(*args, **kwargs) 47 | bound_args.apply_defaults() 48 | return bound_args 49 | 50 | 51 | def build_function_signature( 52 | func_name: str, 53 | trace_id: str, 54 | bound_args: inspect.BoundArguments, 55 | signature: inspect.Signature, 56 | type_hints: Dict[str, Any], 57 | return_type: Any, 58 | docstring: str, 59 | ) -> FunctionSignature: 60 | """构建函数签名对象""" 61 | return FunctionSignature( 62 | func_name=func_name, 63 | trace_id=trace_id, 64 | bound_args=bound_args, 65 | signature=signature, 66 | type_hints=type_hints, 67 | return_type=return_type, 68 | docstring=docstring, 69 | ) 70 | 71 | 72 | def parse_function_signature( 73 | func: Callable, 74 | args: Tuple[Any, ...], 75 | kwargs: Dict[str, Any], 76 | ) -> Tuple[FunctionSignature, Optional[Dict[str, Any]]]: 77 | """解析函数签名的完整流程""" 78 | # 1. 提取模板参数 79 | template_params = extract_template_params(kwargs) 80 | 81 | # 2. 提取函数元数据 82 | signature, type_hints, return_type, docstring, func_name = extract_function_metadata( 83 | func 84 | ) 85 | 86 | # 3. 生成追踪 ID 87 | trace_id = generate_trace_id(func_name) 88 | 89 | # 4. 绑定函数参数 90 | bound_args = bind_function_arguments(signature, args, kwargs) 91 | 92 | # 5. 构建函数签名对象 93 | function_signature = build_function_signature( 94 | func_name=func_name, 95 | trace_id=trace_id, 96 | bound_args=bound_args, 97 | signature=signature, 98 | type_hints=type_hints, 99 | return_type=return_type, 100 | docstring=docstring, 101 | ) 102 | 103 | return function_signature, template_params 104 | 105 | -------------------------------------------------------------------------------- /SimpleLLMFunc/interface/key_pool.py: -------------------------------------------------------------------------------- 1 | import heapq 2 | from typing import List, Tuple, Dict 3 | from SimpleLLMFunc.logger import push_critical, get_location 4 | import threading # 导入 threading 模块 5 | 6 | class APIKeyPool: 7 | # 类变量用于存储单例实例 8 | _instances: Dict[str, 'APIKeyPool'] = {} 9 | 10 | def __new__(cls, api_keys: List[str], provider_id: str) -> 'APIKeyPool': 11 | # 如果已经为这个 app_id 创建了实例,返回现有实例 12 | if provider_id in cls._instances: 13 | return cls._instances[provider_id] 14 | 15 | # 创建新实例 16 | instance = super(APIKeyPool, cls).__new__(cls) 17 | cls._instances[provider_id] = instance 18 | return instance 19 | 20 | def __init__(self, api_keys: List[str], provider_id: str) -> None: 21 | # 如果已经初始化,跳过初始化过程 22 | if hasattr(self, 'initialized') and self.initialized: # type: ignore 23 | return 24 | 25 | if len(api_keys) == 0 or api_keys is None: 26 | push_critical( 27 | f"API 密钥池 {provider_id} 为空。请检查您的配置。", # 更新日志为中文 28 | location=get_location() 29 | ) 30 | 31 | raise ValueError(f"API 密钥池 {provider_id} 为空。请检查您的配置。") # 更新错误信息为中文 32 | 33 | 34 | self.api_keys = api_keys 35 | self.app_id = provider_id 36 | 37 | # 内存中的存储,替代 Redis 38 | self.heap: List[Tuple[float, str]] = [(0, key) for key in self.api_keys] 39 | heapq.heapify(self.heap) 40 | self.key_to_task_count: Dict[str, int] = {key: 0 for key in self.api_keys} 41 | 42 | self.lock = threading.Lock() # 为每个实例创建一个锁 43 | self.initialized = True 44 | 45 | def get_least_loaded_key(self) -> str: 46 | with self.lock: # 获取锁保护读操作 47 | # 获取任务数量最小的 API key 48 | if not self.heap: 49 | raise ValueError(f"{self.app_id} 没有可用的 API 密钥") # 更新错误信息为中文 50 | return self.heap[0][1] 51 | 52 | def increment_task_count(self, api_key: str) -> None: 53 | with self.lock: # 获取锁 54 | if api_key not in self.key_to_task_count: 55 | raise ValueError(f"API 密钥 {api_key} 不在池中") # 更新错误信息为中文 56 | 57 | # 增加任务计数 58 | self.key_to_task_count[api_key] += 1 59 | 60 | # 更新堆 61 | self._update_heap(api_key, self.key_to_task_count[api_key]) 62 | 63 | def decrement_task_count(self, api_key: str) -> None: 64 | with self.lock: # 获取锁 65 | if api_key not in self.key_to_task_count: 66 | raise ValueError(f"API 密钥 {api_key} 不在池中") # 更新错误信息为中文 67 | 68 | # 减少任务计数 69 | self.key_to_task_count[api_key] -= 1 70 | 71 | # 更新堆 72 | self._update_heap(api_key, self.key_to_task_count[api_key]) 73 | 74 | def _update_heap(self, api_key: str, new_task_count: int) -> None: 75 | # 找到并移除当前条目 76 | for i, (count, key) in enumerate(self.heap): 77 | if key == api_key: 78 | self.heap[i] = (float('inf'), key) # 标记为移除 79 | break 80 | 81 | # 重新堆化以将标记的项移到末尾 82 | heapq.heapify(self.heap) 83 | 84 | # 移除标记的项并添加更新后的项 85 | self.heap.pop() 86 | heapq.heappush(self.heap, (new_task_count, api_key)) -------------------------------------------------------------------------------- /SimpleLLMFunc/type/multimodal.py: -------------------------------------------------------------------------------- 1 | """ 2 | 多模态内容类型定义 3 | 4 | 本模块定义了用于多模态LLM函数的类型,支持文本、图片URL和图片路径。 5 | 通过类型提示,框架可以自动识别参数类型并构建适当的消息格式。 6 | 7 | 示例: 8 | ```python 9 | from SimpleLLMFunc.type import Text, ImgUrl, ImgPath 10 | 11 | @llm_function 12 | def analyze_image( 13 | description: Text, 14 | image_url: ImgUrl, 15 | reference_image: ImgPath 16 | ) -> str: 17 | \"\"\"分析图像并提供描述\"\"\" 18 | pass 19 | ``` 20 | """ 21 | 22 | from typing import Union, List 23 | from pathlib import Path 24 | import base64 25 | 26 | 27 | class Text: 28 | """文本内容类型""" 29 | 30 | def __init__(self, content: str): 31 | self.content = content 32 | 33 | def __str__(self) -> str: 34 | return self.content 35 | 36 | def __repr__(self) -> str: 37 | return f"Text({self.content!r})" 38 | 39 | 40 | class ImgUrl: 41 | """图片URL类型""" 42 | 43 | def __init__(self, url: str, detail: str = "auto"): 44 | if not ( 45 | url.startswith("http://") 46 | or url.startswith("https://") 47 | or url.startswith("data:") 48 | ): 49 | raise ValueError("Image URL must start with http://, https://, or data:") 50 | if detail not in ("low", "high", "auto"): 51 | raise ValueError("detail must be 'low', 'high', or 'auto'") 52 | 53 | self.url = url 54 | self.detail = detail 55 | 56 | def __str__(self) -> str: 57 | return self.url 58 | 59 | def __repr__(self) -> str: 60 | return f"ImgUrl({self.url!r}, detail={self.detail!r})" 61 | 62 | 63 | class ImgPath: 64 | """本地图片路径类型""" 65 | 66 | def __init__(self, path: Union[str, Path], detail: str = "auto"): 67 | self.path = Path(path) 68 | if not self.path.exists(): 69 | raise FileNotFoundError(f"Image file not found: {path}") 70 | if not self.path.is_file(): 71 | raise ValueError(f"Path is not a file: {path}") 72 | 73 | if detail not in ("low", "high", "auto"): 74 | raise ValueError("detail must be 'low', 'high', or 'auto'") 75 | 76 | # 检查是否为图片文件 77 | valid_extensions = {".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp"} 78 | if self.path.suffix.lower() not in valid_extensions: 79 | raise ValueError(f"Unsupported image format: {self.path.suffix}") 80 | 81 | self.detail = detail 82 | 83 | def __str__(self) -> str: 84 | return str(self.path) 85 | 86 | def __repr__(self) -> str: 87 | return f"ImgPath({self.path!r}, detail={self.detail!r})" 88 | 89 | def to_base64(self) -> str: 90 | """将图片转换为base64编码""" 91 | with open(self.path, "rb") as image_file: 92 | return base64.b64encode(image_file.read()).decode("utf-8") 93 | 94 | def get_mime_type(self) -> str: 95 | """获取图片的MIME类型""" 96 | extension = self.path.suffix.lower() 97 | mime_types = { 98 | ".jpg": "image/jpeg", 99 | ".jpeg": "image/jpeg", 100 | ".png": "image/png", 101 | ".gif": "image/gif", 102 | ".bmp": "image/bmp", 103 | ".webp": "image/webp", 104 | } 105 | return mime_types.get(extension, "image/jpeg") 106 | 107 | 108 | # 类型别名,方便使用 109 | MultimodalContent = Union[Text, ImgUrl, ImgPath] 110 | MultimodalList = List[MultimodalContent] 111 | 112 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a PyInstaller build script 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *.cover 45 | .hypothesis/ 46 | .pytest_cache/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | db.sqlite3 56 | db.sqlite3-journal 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | docs/build/ 68 | docs/gettext/ 69 | # Note: docs/locale/ is intentionally NOT ignored - translation files must be committed to Git for ReadTheDocs 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # IPython 78 | profile_default/ 79 | ipython_config.py 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # pipenv 85 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 86 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 87 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 88 | # install all needed dependencies. 89 | #Pipfile.lock 90 | 91 | # poetry 92 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 93 | # This is especially recommended for binary packages to ensure reproducibility, and is more 94 | # commonly ignored for libraries. 95 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 96 | #poetry.lock 97 | 98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 99 | __pypackages__/ 100 | 101 | # Celery stuff 102 | celerybeat-schedule 103 | celerybeat.pid 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # Environments 109 | .env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | 135 | # pytype static type analyzer 136 | .pytype/ 137 | 138 | # Cython debug symbols 139 | cython_debug/ 140 | 141 | # Logs 142 | logs/ 143 | *.log 144 | 145 | # VS Code 146 | .vscode/ 147 | *.code-workspace 148 | 149 | # PyCharm 150 | .idea/ 151 | *.iml 152 | *.iws 153 | *.ipr 154 | 155 | # Custom application-specific additions 156 | # Add any project-specific ignores below 157 | **/log_indices/ 158 | **/*.log 159 | examples/testlog/ 160 | 161 | sandbox/ 162 | examples/provider.json 163 | examples/**/provider.json 164 | .venv310/ 165 | 166 | 167 | .DS_Store 168 | 169 | testlogs/ 170 | coverage* 171 | htmlcov/ 172 | .claude/ 173 | draft/ -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_common/test_prompt.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.common.prompt module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.common.prompt import ( 10 | extract_parameter_type_hints, 11 | process_docstring_template, 12 | ) 13 | 14 | 15 | class TestProcessDocstringTemplate: 16 | """Tests for process_docstring_template function.""" 17 | 18 | def test_process_without_template_params(self) -> None: 19 | """Test processing docstring without template params.""" 20 | docstring = "Simple docstring" 21 | result = process_docstring_template(docstring, None) 22 | assert result == docstring 23 | 24 | def test_process_with_template_params(self) -> None: 25 | """Test processing docstring with template params.""" 26 | docstring = "Function with {param1} and {param2}" 27 | template_params = {"param1": "value1", "param2": "value2"} 28 | result = process_docstring_template(docstring, template_params) 29 | assert result == "Function with value1 and value2" 30 | 31 | @patch("SimpleLLMFunc.llm_decorator.steps.common.prompt.push_warning") 32 | @patch("SimpleLLMFunc.llm_decorator.steps.common.prompt.get_location") 33 | def test_process_missing_template_param( 34 | self, mock_get_location: Any, mock_push_warning: Any 35 | ) -> None: 36 | """Test processing docstring with missing template param.""" 37 | mock_get_location.return_value = "test_location" 38 | docstring = "Function with {param1} and {param2}" 39 | template_params = {"param1": "value1"} # Missing param2 40 | result = process_docstring_template(docstring, template_params) 41 | assert result == docstring # Should return original 42 | mock_push_warning.assert_called() 43 | 44 | @patch("SimpleLLMFunc.llm_decorator.steps.common.prompt.push_warning") 45 | @patch("SimpleLLMFunc.llm_decorator.steps.common.prompt.get_location") 46 | def test_process_invalid_template( 47 | self, mock_get_location: Any, mock_push_warning: Any 48 | ) -> None: 49 | """Test processing docstring with invalid template.""" 50 | mock_get_location.return_value = "test_location" 51 | docstring = "Function with {invalid" 52 | template_params = {"param1": "value1"} 53 | result = process_docstring_template(docstring, template_params) 54 | assert result == docstring # Should return original 55 | mock_push_warning.assert_called() 56 | 57 | 58 | class TestExtractParameterTypeHints: 59 | """Tests for extract_parameter_type_hints function.""" 60 | 61 | def test_extract_excluding_return(self) -> None: 62 | """Test extracting type hints excluding return.""" 63 | type_hints = { 64 | "param1": str, 65 | "param2": int, 66 | "return": str, 67 | } 68 | result = extract_parameter_type_hints(type_hints) 69 | assert "param1" in result 70 | assert "param2" in result 71 | assert "return" not in result 72 | 73 | def test_extract_no_return(self) -> None: 74 | """Test extracting type hints when no return type.""" 75 | type_hints = { 76 | "param1": str, 77 | "param2": int, 78 | } 79 | result = extract_parameter_type_hints(type_hints) 80 | assert len(result) == 2 81 | assert "param1" in result 82 | assert "param2" in result 83 | 84 | def test_extract_empty(self) -> None: 85 | """Test extracting from empty type hints.""" 86 | result = extract_parameter_type_hints({}) 87 | assert result == {} 88 | 89 | -------------------------------------------------------------------------------- /docs/source/detailed_guide/config.md: -------------------------------------------------------------------------------- 1 | # 配置文件说明 2 | 3 | ## `.env` 文件 4 | 5 | `.env` 文件用于存储环境变量,在本框架中主要用于配置日志相关设置。你可以在你项目最终的 `WORKING DIR` 下创建一个 `.env` 文件,或者直接在环境变量中设置这些值。 6 | 7 | ### 环境变量配置 8 | 9 | ```bash 10 | # 日志相关配置 11 | 12 | # LOG_LEVEL:控制台日志级别,默认为 WARNING 13 | # 可选值:DEBUG, INFO, WARNING, ERROR, CRITICAL 14 | LOG_LEVEL=WARNING 15 | 16 | # 其他可选日志配置 17 | # LOG_FILE:日志文件路径(如果需要) 18 | # LOG_FILE=./logs/app.log 19 | ``` 20 | 21 | ### 支持的环境变量 22 | 23 | | 环境变量 | 说明 | 可选值 | 默认值 | 24 | |---------|------|--------|--------| 25 | | `LOG_LEVEL` | 日志级别 | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` | `WARNING` | 26 | 27 | ### 环境变量优先级 28 | 29 | 注意,直接 `export` 环境变量会覆盖 `.env` 文件中的设置,因此如果你在运行时设置了环境变量,这些设置将优先于 `.env` 文件中的配置。 30 | 31 | 优先级顺序(从高到低): 32 | 33 | 1. 运行时设置的环境变量 (如 `export LOG_LEVEL=DEBUG`) 34 | 2. `.env` 文件中的配置 35 | 3. 框架默认值 36 | 37 | ## `provider.json` 文件 38 | 39 | `provider.json` 文件用于配置 LLM 接口的相关信息,包括 API 密钥、提供商信息、模型名称等。你可以在项目根目录创建一个 `provider.json` 文件,内容示例如下: 40 | 41 | ### 配置文件结构 42 | 43 | provider.json 使用嵌套结构:`提供商 -> 模型名 -> 配置参数` 44 | 45 | ```json 46 | { 47 | "openai": [ 48 | { 49 | "model_name": "gpt-3.5-turbo", 50 | "api_keys": ["sk-test-key-1", "sk-test-key-2"], 51 | "base_url": "https://api.openai.com/v1", 52 | "max_retries": 5, 53 | "retry_delay": 1.0, 54 | "rate_limit_capacity": 20, 55 | "rate_limit_refill_rate": 3.0 56 | }, 57 | { 58 | "model_name": "gpt-4", 59 | "api_keys": ["sk-test-key-3", "sk-test-key-4"], 60 | "base_url": "https://api.openai.com/v1", 61 | "max_retries": 5, 62 | "retry_delay": 1.0, 63 | "rate_limit_capacity": 10, 64 | "rate_limit_refill_rate": 1.0 65 | } 66 | ], 67 | "zhipu": [ 68 | { 69 | "model_name": "glm-4", 70 | "api_keys": ["zhipu-test-key-1", "zhipu-test-key-2"], 71 | "base_url": "https://open.bigmodel.cn/api/paas/v4/", 72 | "max_retries": 3, 73 | "retry_delay": 0.5, 74 | "rate_limit_capacity": 15, 75 | "rate_limit_refill_rate": 2.0 76 | } 77 | ], 78 | "claude": [ 79 | { 80 | "model_name": "claude-3-sonnet", 81 | "api_keys": ["claude-test-key-1"], 82 | "base_url": "https://api.anthropic.com/v1", 83 | "max_retries": 5, 84 | "retry_delay": 1.0, 85 | "rate_limit_capacity": 8, 86 | "rate_limit_refill_rate": 0.5 87 | } 88 | ] 89 | } 90 | ``` 91 | 92 | ### 配置参数说明 93 | 94 | | 参数 | 类型 | 说明 | 示例 | 95 | |------|------|------|------| 96 | | `api_keys` | 数组 | API 密钥列表,支持多个密钥用于负载均衡 | `["key1", "key2"]` | 97 | | `base_url` | 字符串 | API 服务器地址 | `https://api.openai.com/v1` | 98 | | `model` | 字符串 | 模型名称,与提供商对应 | `gpt-3.5-turbo` | 99 | | `max_retries` | 数字 | 最大重试次数,默认 3 | `5` | 100 | | `retry_delay` | 浮点数 | 重试延迟(秒),默认 1.0 | `1.0` | 101 | | `rate_limit_capacity` | 数字 | 令牌桶容量,默认 10 | `20` | 102 | | `rate_limit_refill_rate` | 浮点数 | 令牌补充速率(tokens/秒),默认 1.0 | `3.0` | 103 | 104 | ### 加载和使用 105 | 106 | 然后你可以使用这个json文件来加载所有的接口,例如: 107 | 108 | ```python 109 | from SimpleLLMFunc import OpenAICompatible 110 | 111 | # 加载所有模型 112 | models = OpenAICompatible.load_from_json_file("provider.json") 113 | 114 | # 获取特定模型 115 | gpt35 = models["openai"]["gpt-3.5-turbo"] 116 | gpt4 = models["openai"]["gpt-4"] 117 | deepseek = models["deepseek"]["deepseek-chat"] 118 | zhipu = models["zhipu"]["glm-4"] 119 | 120 | # 在装饰器中使用 121 | from SimpleLLMFunc import llm_function 122 | 123 | @llm_function(llm_interface=gpt35) 124 | async def my_task(text: str) -> str: 125 | """处理文本的任务""" 126 | pass 127 | ``` 128 | 129 | ### 最佳实践 130 | 131 | 1. **多个 API 密钥**: 为了实现负载均衡和高可用性,建议为每个模型配置多个 API 密钥 132 | 2. **不同模型的限流策略**: 根据不同的 API 限制配置不同的 `rate_limit_capacity` 和 `rate_limit_refill_rate` 133 | 3. **环境区分**: 可以为开发环境和生产环境配置不同的 `max_retries` 和 `retry_delay` 134 | 135 | -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | 日志系统工具函数模块 3 | 4 | 本模块包含了日志系统使用的各种工具函数,包括位置获取、时间转换等。 5 | """ 6 | 7 | import inspect 8 | import os 9 | from datetime import datetime, timedelta, timezone 10 | from typing import Any, Dict, Optional 11 | 12 | 13 | def convert_float_to_datetime_with_tz( 14 | time_float: float, tz=timezone(timedelta(hours=8)) 15 | ) -> datetime: 16 | """ 17 | 将浮点时间戳转换为带时区的datetime对象 18 | 19 | Args: 20 | time_float: 浮点时间戳(从1970-01-01 00:00:00 UTC开始的秒数) 21 | tz: 时区信息,默认使用东八区(北京时间) 22 | 23 | Returns: 24 | 转换完成后的datetime对象,带有时区信息 25 | 26 | Example: 27 | >>> timestamp = 1640995200.0 # 2022-01-01 00:00:00 UTC 28 | >>> dt = convert_float_to_datetime_with_tz(timestamp) 29 | >>> print(dt) # 2022-01-01 08:00:00+08:00 30 | """ 31 | return datetime.fromtimestamp(time_float, tz=tz) 32 | 33 | 34 | def get_location(depth: int = 2) -> str: 35 | """ 36 | 获取调用者的代码位置信息 37 | 38 | 此函数通过检查调用栈来获取调用者的位置信息,用于在日志中标识代码位置。 39 | 40 | Args: 41 | depth: 调用栈深度,默认为2(调用者的调用者) 42 | 43 | Returns: 44 | 格式化的位置字符串,如 "module.py:function:42" 45 | 46 | Example: 47 | >>> location = get_location() 48 | >>> print(location) # "main.py:main:10" 49 | 50 | Note: 51 | - depth=1: 当前函数 52 | - depth=2: 调用当前函数的函数(默认) 53 | - 如果无法获取位置信息,返回"unknown" 54 | """ 55 | frame = inspect.currentframe() 56 | try: 57 | # 向上追溯调用栈 58 | for _ in range(depth): 59 | if frame is None: 60 | break 61 | frame = frame.f_back 62 | 63 | if frame: 64 | frame_info = inspect.getframeinfo(frame) 65 | filename = os.path.basename(frame_info.filename) 66 | return f"{filename}:{frame_info.function}:{frame_info.lineno}" 67 | else: 68 | return "unknown" 69 | finally: 70 | # 删除引用,避免循环引用 71 | del frame 72 | 73 | 74 | def format_extra_fields(record_dict: Dict[str, Any]) -> Dict[str, Any]: 75 | """ 76 | 格式化日志记录的额外字段 77 | 78 | 将日志记录字典中的额外字段进行处理,确保所有值都可以序列化。 79 | 对不可序列化的值进行字符串转换。 80 | 81 | Args: 82 | record_dict: 日志记录的字典表示 83 | 84 | Returns: 85 | 处理后的字典,所有值都可以进行JSON序列化 86 | 87 | Note: 88 | - 排除标准日志字段和私有字段 89 | - 对不可序列化的值进行字符串转换 90 | """ 91 | import json 92 | 93 | # 需要排除的标准字段 94 | excluded_fields = { 95 | "args", 96 | "asctime", 97 | "created", 98 | "exc_info", 99 | "exc_text", 100 | "filename", 101 | "funcName", 102 | "id", 103 | "levelname", 104 | "levelno", 105 | "lineno", 106 | "module", 107 | "msecs", 108 | "message", 109 | "msg", 110 | "name", 111 | "pathname", 112 | "process", 113 | "processName", 114 | "relativeCreated", 115 | "stack_info", 116 | "thread", 117 | "threadName", 118 | "trace_id", 119 | "location", 120 | } 121 | 122 | result = {} 123 | for key, value in record_dict.items(): 124 | if not key.startswith("_") and key not in excluded_fields: 125 | try: 126 | # 尝试JSON序列化,确保值可序列化 127 | json.dumps(value) 128 | result[key] = value 129 | except (TypeError, OverflowError): 130 | # 如果不可序列化,转换为字符串 131 | result[key] = str(value) 132 | 133 | return result 134 | 135 | 136 | def safe_dict_merge(*dicts: Dict[str, Any]) -> Dict[str, Any]: 137 | """ 138 | 安全地合并多个字典 139 | 140 | 按顺序合并字典,后面的字典覆盖前面的字典中的相同键。 141 | 142 | Args: 143 | *dicts: 要合并的字典列表 144 | 145 | Returns: 146 | 合并后的新字典 147 | 148 | Example: 149 | >>> dict1 = {"a": 1, "b": 2} 150 | >>> dict2 = {"b": 3, "c": 4} 151 | >>> merged = safe_dict_merge(dict1, dict2) 152 | >>> print(merged) # {"a": 1, "b": 3, "c": 4} 153 | """ 154 | result = {} 155 | for d in dicts: 156 | if d: 157 | result.update(d) 158 | return result 159 | -------------------------------------------------------------------------------- /examples/dynamic_template_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | 动态模板参数演示 4 | 5 | 展示如何使用_template_params在函数调用时动态设置DocString模板参数。 6 | 一个函数定义,多种使用场景。 7 | """ 8 | 9 | import asyncio 10 | import os 11 | import sys 12 | 13 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) 14 | 15 | from SimpleLLMFunc import llm_function, app_log 16 | from SimpleLLMFunc import OpenAICompatible 17 | from SimpleLLMFunc.observability import flush_all_observations 18 | 19 | # 加载LLM接口配置 20 | current_dir = os.path.dirname(os.path.abspath(__file__)) 21 | provider_json_path = os.path.join(current_dir, "provider.json") 22 | 23 | try: 24 | llm_interface = OpenAICompatible.load_from_json_file(provider_json_path)["dreamcatcher"]["gpt-4o"] 25 | print("✅ 成功加载LLM接口配置") 26 | except (FileNotFoundError, KeyError) as e: 27 | print(f"⚠️ 警告: 无法加载LLM接口配置 ({e})") 28 | print("请确保provider.json文件存在且配置正确") 29 | llm_interface = None # type: ignore 30 | 31 | # 万能的代码分析函数 32 | @llm_function(llm_interface=llm_interface) # type: ignore 33 | async def analyze_code(code: str) -> str: 34 | """以{style}的方式分析{language}代码,重点关注{focus}。""" 35 | return "" 36 | 37 | 38 | # 万能的文本处理函数 39 | @llm_function(llm_interface=llm_interface) # type: ignore 40 | async def process_text(text: str) -> str: 41 | """作为{role},请{action}以下文本,输出风格为{style}。""" 42 | return "" 43 | 44 | 45 | async def main() -> None: 46 | """主函数演示""" 47 | if llm_interface is None: 48 | print("由于缺少LLM接口配置,仅展示函数定义。") 49 | print("请参考examples/provider_template.json创建provider.json配置文件。") 50 | return 51 | 52 | app_log("开始运行动态模板参数演示") 53 | 54 | print("=== 动态模板参数演示 ===\n") 55 | 56 | # 示例1: 代码分析 57 | print("1. 代码分析功能演示:") 58 | 59 | python_code = """ 60 | def fibonacci(n): 61 | if n <= 1: 62 | return n 63 | return fibonacci(n-1) + fibonacci(n-2) 64 | """ 65 | 66 | try: 67 | print(" Python性能分析:") 68 | result1: str = await analyze_code( 69 | python_code, 70 | _template_params={ 71 | 'style': '详细', 72 | 'language': 'Python', 73 | 'focus': '性能优化' 74 | } 75 | ) 76 | print(f" 分析结果: {result1}\n") 77 | except Exception as e: 78 | print(f" 执行失败: {e}\n") 79 | 80 | try: 81 | print(" JavaScript规范检查:") 82 | js_code = "function test() { console.log('hello'); }" 83 | result2: str = await analyze_code( 84 | js_code, 85 | _template_params={ 86 | 'style': '简洁', 87 | 'language': 'JavaScript', 88 | 'focus': '代码规范' 89 | } 90 | ) 91 | print(f" 分析结果: {result2}\n") 92 | except Exception as e: 93 | print(f" 执行失败: {e}\n") 94 | 95 | # 示例2: 文本处理 96 | print("2. 文本处理功能演示:") 97 | 98 | sample_text = "人工智能技术正在快速发展,对各行各业产生深远影响。" 99 | 100 | try: 101 | print(" 编辑润色:") 102 | result3: str = await process_text( 103 | sample_text, 104 | _template_params={ 105 | 'role': '专业编辑', 106 | 'action': '润色', 107 | 'style': '学术' 108 | } 109 | ) 110 | print(f" 处理结果: {result3}\n") 111 | except Exception as e: 112 | print(f" 执行失败: {e}\n") 113 | 114 | try: 115 | print(" 翻译转换:") 116 | result4: str = await process_text( 117 | sample_text, 118 | _template_params={ 119 | 'role': '翻译专家', 120 | 'action': '翻译成英文', 121 | 'style': '商务' 122 | } 123 | ) 124 | print(f" 处理结果: {result4}\n") 125 | except Exception as e: 126 | print(f" 执行失败: {e}\n") 127 | 128 | print("✨ 核心优势:") 129 | print("• 一个函数定义,多种使用场景") 130 | print("• 调用时动态指定角色和任务") 131 | print("• 代码复用性大大提高") 132 | print("• 更符合实际使用需求") 133 | print() 134 | print("💡 使用提示:") 135 | print("• 在DocString中使用{变量名}作为占位符") 136 | print("• 调用时通过_template_params传入变量值") 137 | print("• _template_params不会传递给LLM,仅用于模板处理") 138 | 139 | app_log("动态模板参数演示运行结束") 140 | 141 | 142 | if __name__ == "__main__": 143 | asyncio.run(main()) 144 | flush_all_observations() -------------------------------------------------------------------------------- /tests/test_base/test_tool_call/test_validation.py: -------------------------------------------------------------------------------- 1 | """Tests for base.tool_call.validation module.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.base.tool_call.validation import ( 10 | is_valid_tool_result, 11 | serialize_tool_output_for_langfuse, 12 | ) 13 | from SimpleLLMFunc.type.multimodal import ImgPath, ImgUrl, Text 14 | 15 | 16 | class TestIsValidToolResult: 17 | """Tests for is_valid_tool_result function.""" 18 | 19 | def test_valid_string(self) -> None: 20 | """Test string result validation.""" 21 | assert is_valid_tool_result("test") is True 22 | 23 | def test_valid_img_url(self, img_url: ImgUrl) -> None: 24 | """Test ImgUrl result validation.""" 25 | assert is_valid_tool_result(img_url) is True 26 | 27 | def test_valid_img_path(self, img_path: ImgPath) -> None: 28 | """Test ImgPath result validation.""" 29 | assert is_valid_tool_result(img_path) is True 30 | 31 | def test_valid_dict(self) -> None: 32 | """Test dict result validation.""" 33 | assert is_valid_tool_result({"key": "value"}) is True 34 | 35 | def test_valid_list(self) -> None: 36 | """Test list result validation.""" 37 | assert is_valid_tool_result([1, 2, 3]) is True 38 | 39 | def test_valid_tuple_with_image(self, img_url: ImgUrl) -> None: 40 | """Test tuple with image validation.""" 41 | result = ("text", img_url) 42 | assert is_valid_tool_result(result) is True 43 | 44 | def test_invalid_tuple(self) -> None: 45 | """Test invalid tuple validation.""" 46 | result = ("text", "not_image") 47 | assert is_valid_tool_result(result) is False 48 | 49 | def test_invalid_type(self) -> None: 50 | """Test invalid type validation.""" 51 | # Create a non-serializable object 52 | class NonSerializable: 53 | pass 54 | 55 | assert is_valid_tool_result(NonSerializable()) is False 56 | 57 | 58 | class TestSerializeToolOutputForLangfuse: 59 | """Tests for serialize_tool_output_for_langfuse function.""" 60 | 61 | def test_serialize_string(self) -> None: 62 | """Test serializing string.""" 63 | result = serialize_tool_output_for_langfuse("test") 64 | assert result == "test" 65 | 66 | def test_serialize_img_url(self, img_url: ImgUrl) -> None: 67 | """Test serializing ImgUrl.""" 68 | result = serialize_tool_output_for_langfuse(img_url) 69 | assert result["type"] == "image_url" 70 | assert result["url"] == img_url.url 71 | 72 | def test_serialize_img_path(self, img_path: ImgPath) -> None: 73 | """Test serializing ImgPath.""" 74 | result = serialize_tool_output_for_langfuse(img_path) 75 | assert result["type"] == "image_path" 76 | assert "path" in result 77 | 78 | def test_serialize_text_object(self, text_content: Text) -> None: 79 | """Test serializing Text object.""" 80 | result = serialize_tool_output_for_langfuse(text_content) 81 | assert isinstance(result, str) 82 | 83 | def test_serialize_tuple_with_image(self, img_url: ImgUrl) -> None: 84 | """Test serializing tuple with image.""" 85 | result = serialize_tool_output_for_langfuse(("text", img_url)) 86 | assert result["type"] == "text_with_image" 87 | assert result["text"] == "text" 88 | assert "image" in result 89 | 90 | def test_serialize_dict(self) -> None: 91 | """Test serializing dict.""" 92 | data = {"key": "value", "number": 123} 93 | result = serialize_tool_output_for_langfuse(data) 94 | assert result == data 95 | 96 | def test_serialize_list(self) -> None: 97 | """Test serializing list.""" 98 | data = [1, 2, 3] 99 | result = serialize_tool_output_for_langfuse(data) 100 | assert result == data 101 | 102 | def test_serialize_non_serializable(self) -> None: 103 | """Test serializing non-serializable object.""" 104 | class NonSerializable: 105 | def __str__(self) -> str: 106 | return "non-serializable" 107 | 108 | obj = NonSerializable() 109 | result = serialize_tool_output_for_langfuse(obj) 110 | assert result == "non-serializable" 111 | 112 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_chat/test_response.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.chat.response module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import AsyncMock, patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.chat.response import ( 10 | extract_stream_response_content, 11 | process_chat_response_stream, 12 | process_single_chat_response, 13 | ) 14 | 15 | 16 | class TestExtractStreamResponseContent: 17 | """Tests for extract_stream_response_content function.""" 18 | 19 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.response.extract_content_from_stream_response") 20 | def test_extract_content(self, mock_extract: Any, mock_chat_completion_chunk: Any) -> None: 21 | """Test extracting stream response content.""" 22 | mock_extract.return_value = "chunk content" 23 | result = extract_stream_response_content(mock_chat_completion_chunk, "test_func") 24 | assert result == "chunk content" 25 | mock_extract.assert_called_once_with(mock_chat_completion_chunk, "test_func") 26 | 27 | 28 | class TestProcessSingleChatResponse: 29 | """Tests for process_single_chat_response function.""" 30 | 31 | def test_process_raw_mode(self, mock_chat_completion: Any) -> None: 32 | """Test processing response in raw mode.""" 33 | result = process_single_chat_response( 34 | mock_chat_completion, "raw", False, "test_func" 35 | ) 36 | assert result == mock_chat_completion 37 | 38 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.response.extract_content_from_response") 39 | def test_process_text_mode_non_stream( 40 | self, mock_extract: Any, mock_chat_completion: Any 41 | ) -> None: 42 | """Test processing response in text mode (non-stream).""" 43 | mock_extract.return_value = "content" 44 | result = process_single_chat_response( 45 | mock_chat_completion, "text", False, "test_func" 46 | ) 47 | assert result == "content" 48 | 49 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.response.extract_stream_response_content") 50 | def test_process_text_mode_stream( 51 | self, mock_extract: Any, mock_chat_completion_chunk: Any 52 | ) -> None: 53 | """Test processing response in text mode (stream).""" 54 | mock_extract.return_value = "chunk" 55 | result = process_single_chat_response( 56 | mock_chat_completion_chunk, "text", True, "test_func" 57 | ) 58 | assert result == "chunk" 59 | 60 | 61 | class TestProcessChatResponseStream: 62 | """Tests for process_chat_response_stream function.""" 63 | 64 | @pytest.mark.asyncio 65 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.response.process_single_chat_response") 66 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.response.app_log") 67 | async def test_process_stream_text_mode( 68 | self, mock_app_log: Any, mock_process: Any, sample_messages: list 69 | ) -> None: 70 | """Test processing stream in text mode.""" 71 | mock_process.return_value = "content" 72 | 73 | async def mock_stream(): 74 | yield "response1", sample_messages.copy() 75 | yield "response2", sample_messages.copy() 76 | 77 | results = [] 78 | async for content, history in process_chat_response_stream( 79 | mock_stream(), "text", sample_messages, "test_func", True 80 | ): 81 | results.append((content, history)) 82 | 83 | assert len(results) >= 2 # Should have responses + end marker 84 | assert results[-1][0] == "" # End marker should be empty string 85 | 86 | @pytest.mark.asyncio 87 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.response.process_single_chat_response") 88 | async def test_process_stream_raw_mode( 89 | self, mock_process: Any, sample_messages: list, mock_chat_completion: Any 90 | ) -> None: 91 | """Test processing stream in raw mode.""" 92 | mock_process.return_value = mock_chat_completion 93 | 94 | async def mock_stream(): 95 | yield mock_chat_completion, sample_messages.copy() 96 | 97 | results = [] 98 | async for content, history in process_chat_response_stream( 99 | mock_stream(), "raw", sample_messages, "test_func", False 100 | ): 101 | results.append((content, history)) 102 | 103 | assert len(results) >= 1 104 | 105 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change log for SimpleLLMFunc 2 | 3 | ## 0.4.2 Release Notes 4 | 5 | ### Refactoring 6 | 7 | 1. **ReAct Engine Return Type Enhancement**: Modified `execute_llm` function to return both response and message history in streaming mode. 8 | - Changed return type from `AsyncGenerator[Any, None]` to `AsyncGenerator[Tuple[Any, List[Dict[str, Any]]], None]` 9 | - Now yields `(response, current_messages.copy())` instead of just `response` 10 | - Creates a copy of `current_messages` to avoid modifying the original list 11 | - Updated related test files to adapt to the new return type 12 | 13 | --- 14 | 15 | ## 0.4.1 Release Notes 16 | 17 | ### Features 18 | 19 | 1. **Gemini 3 Pro Preview Support**: Added `reasoning_details` field support to enable compatibility with Google Gemini 3 Pro Preview model under OpenAI-compatible interface. 20 | 21 | 2. **Reasoning Details Extraction**: 22 | - Added `ReasoningDetail` type definition in `extraction.py` 23 | - Implemented extraction functions for both streaming and non-streaming responses 24 | - Support for extracting reasoning details from message objects (both dict and object formats) 25 | 26 | 3. **Message Type Enhancement**: Extended message type definitions in `message.py` to include `reasoning_details` field support. 27 | 28 | 4. **ReAct Engine Integration**: Integrated reasoning details extraction and propagation in the ReAct engine for tool call workflows. 29 | 30 | ### Examples 31 | 32 | - Updated example files (`llm_function_pydantic_example.py`, `parallel_toolcall_example.py`, `llm_chat_raw_tooluse_example.py`) to use `gemini-3-pro-preview` model. 33 | 34 | --- 35 | 36 | ## 0.4.0 Release Notes 37 | 38 | ### Major Refactoring 39 | 40 | 1. **Modular Architecture Restructuring**: Completely refactored the base module, splitting messages, tool_call, and type_resolve into dedicated sub-modules for better code organization and maintainability. 41 | 42 | 2. **Decorator Logic Step-based Implementation**: Refactored decorator logic into a steps-based architecture within the `llm_decorator` module, improving code clarity and extensibility. 43 | 44 | 3. **Type System Enhancement**: Introduced new type support modules including decorator types and multimodal type support, expanding framework capabilities. 45 | 46 | 4. **Type Resolution System Refactoring**: Comprehensive refactoring of the type resolution system to enhance functionality support and improve type inference accuracy. 47 | 48 | ### Features 49 | 50 | 1. **Enhanced Tool Call Execution**: Improved tool call execution mechanism with extended support for multimodal interactions, enabling richer LLM interactions. 51 | 52 | 2. **Multimodal Type Support**: Added comprehensive multimodal type support throughout the framework for better handling of diverse content types. 53 | 54 | ### Bug Fixes 55 | 56 | 1. Fixed system prompt nesting issues when building multi-model content. 57 | 58 | ### Testing 59 | 60 | Added extensive test coverage for refactored modules to ensure stability and reliability. 61 | 62 | --- 63 | 64 | ## 0.3.2.beta2 Release Notes 65 | 66 | 1. Remove dependence: `nest-asyncio` 67 | 68 | 2. Fix document error about `provider.json` 69 | 70 | ## 0.3.2.beta1 Release Notes 71 | 72 | 1. Better tool call tips in system prompt. 73 | 74 | 2. Better compound type annotations in prompt. 75 | 76 | ## 0.3.1 Release Notes 77 | 78 | 1. Added dynamic template parameter support: The `llm_function` decorator now supports passing `_template_params` to dynamically set DocString template parameters. This allows developers to create a single function that can adapt to various use cases, changing its behavior by passing different template parameters at call time. 79 | 80 | 2. Integrated Langfuse support: You can now configure `LANGFUSE_BASE_URL`, `LANGFUSE_SECRET_KEY`, and `LANGFUSE_PUBLIC_KEY` to send logs to Langfuse for tracing and analysis. 81 | 82 | 3. Added multilingual support: The English README has been updated, now supporting both Chinese and English. 83 | 84 | 4. Added parallel tool calling support. 85 | 86 | 5. Fully native async implementation: All decorators are now implemented with native async support, completely dropping any sync fallback. 87 | 88 | ## 0.2.13 Release Notes 89 | 90 | 1. Added the `return_mode` parameter (`Literal["text", "raw"]`) to the `llm_chat` decorator, allowing you to specify the return mode. You can now return either the raw response or text. This is designed to better display tool call information when developing Agents. 91 | 92 | 2. Improved code type annotations. 93 | 94 | ----- 95 | 96 | ## 0.2.12.2 Release Notes 97 | 98 | 1. Added a `py.typed` file to the framework package to support type checking. 99 | -------------------------------------------------------------------------------- /SimpleLLMFunc/base/type_resolve/description.py: -------------------------------------------------------------------------------- 1 | """Type description generation helpers.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, Dict, List, Optional, Type 6 | 7 | from pydantic import BaseModel 8 | 9 | 10 | def get_detailed_type_description(type_hint: Any) -> str: 11 | """Generate a human-readable description for a type hint.""" 12 | 13 | if type_hint is None: 14 | return "未知类型" 15 | 16 | if isinstance(type_hint, type) and issubclass(type_hint, BaseModel): 17 | return describe_pydantic_model(type_hint) 18 | 19 | origin = getattr(type_hint, "__origin__", None) 20 | if origin is list or origin is List: 21 | args = getattr(type_hint, "__args__", []) 22 | if args: 23 | item_type_desc = get_detailed_type_description(args[0]) 24 | return f"List[{item_type_desc}]" 25 | return "List" 26 | 27 | if origin is dict or origin is Dict: 28 | args = getattr(type_hint, "__args__", []) 29 | if len(args) >= 2: 30 | key_type_desc = get_detailed_type_description(args[0]) 31 | value_type_desc = get_detailed_type_description(args[1]) 32 | return f"Dict[{key_type_desc}, {value_type_desc}]" 33 | return "Dict" 34 | 35 | return str(type_hint) 36 | 37 | 38 | def describe_pydantic_model(model_class: Type[BaseModel]) -> str: 39 | """Expand a Pydantic model to a descriptive summary.""" 40 | 41 | model_name = model_class.__name__ 42 | schema = model_class.model_json_schema() 43 | 44 | properties = schema.get("properties", {}) 45 | required = schema.get("required", []) 46 | 47 | fields_desc = [] 48 | for field_name, field_info in properties.items(): 49 | field_type = field_info.get("type", "unknown") 50 | field_desc = field_info.get("description", "") 51 | is_required = field_name in required 52 | 53 | req_marker = "必填" if is_required else "可选" 54 | 55 | extra_info = "" 56 | if "minimum" in field_info: 57 | extra_info += f", 最小值: {field_info['minimum']}" 58 | if "maximum" in field_info: 59 | extra_info += f", 最大值: {field_info['maximum']}" 60 | if "default" in field_info: 61 | extra_info += f", 默认值: {field_info['default']}" 62 | 63 | fields_desc.append( 64 | f" - {field_name} ({field_type}, {req_marker}): {field_desc}{extra_info}" 65 | ) 66 | 67 | model_desc = f"{model_name} (Pydantic模型) 包含以下字段:\n" + "\n".join(fields_desc) 68 | return model_desc 69 | 70 | 71 | # ===== Structured XML description and example generation ===== 72 | 73 | 74 | def build_type_description_xml( 75 | type_hint: Any, 76 | depth: int = 0, 77 | max_depth: int = 5, 78 | seen: Optional[set] = None, 79 | ) -> str: 80 | """Build a structured XML Schema description for a type hint (recursive). 81 | 82 | - Fully expands nested BaseModel, List, Dict, and Union (excluding NoneType) 83 | - Guards against cycles and excessive depth 84 | - Returns XML Schema description as text 85 | """ 86 | from SimpleLLMFunc.base.type_resolve.xml_utils import pydantic_to_xml_schema 87 | 88 | return pydantic_to_xml_schema(type_hint, depth, max_depth, seen) 89 | 90 | 91 | def _generate_primitive_example(type_hint: Any) -> Any: 92 | """Generate example value for primitive types directly. 93 | 94 | Also handles Optional[T] by extracting the inner type. 95 | """ 96 | from typing import get_origin, get_args, Union as TypingUnion 97 | 98 | # Handle Optional[T] / Union[T, None] 99 | origin = get_origin(type_hint) 100 | if origin is TypingUnion: 101 | args = get_args(type_hint) 102 | # Extract first non-None type from Union 103 | for t in args: 104 | if t is not type(None): 105 | return _generate_primitive_example(t) # Recursively check inner type 106 | 107 | # Primitive types 108 | if type_hint is str: 109 | return "example" 110 | if type_hint is int: 111 | return 123 112 | if type_hint is float: 113 | return 1.23 114 | if type_hint is bool: 115 | return True 116 | if type_hint is type(None): 117 | return None 118 | 119 | return None # Not a primitive, need recursive handling 120 | 121 | 122 | def generate_example_xml( 123 | type_hint: Any, 124 | depth: int = 0, 125 | max_depth: int = 5, 126 | seen: Optional[set] = None, 127 | ) -> str: 128 | """Generate an example XML string for the given type hint (recursive).""" 129 | from SimpleLLMFunc.base.type_resolve.xml_utils import generate_xml_example 130 | 131 | return generate_xml_example(type_hint, depth, max_depth, seen) 132 | 133 | -------------------------------------------------------------------------------- /examples/llm_chat_raw_tooluse_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | 展示在 return_mode="raw" 下,@llm_chat 透传底层原始响应, 3 | 从而实时解析到工具调用(tool_calls / delta.tool_calls)。 4 | 5 | 运行前准备:请在 examples/provider.json 中配置兼容的提供方与模型。 6 | 建议选择支持函数调用/工具调用的 OpenAI 兼容模型。 7 | """ 8 | 9 | import asyncio 10 | import json 11 | import os 12 | from typing import Any, Dict, List, Optional, Tuple 13 | 14 | from SimpleLLMFunc import tool 15 | from SimpleLLMFunc.llm_decorator import llm_chat 16 | from SimpleLLMFunc.interface.openai_compatible import OpenAICompatible 17 | 18 | 19 | # ============ Provider & Interface ============ 20 | current_dir: str = os.path.dirname(os.path.abspath(__file__)) 21 | provider_json_path: str = os.path.join(current_dir, "provider.json") 22 | 23 | # 选用与现有示例一致的 key(可按需修改到你可用的 provider/model) 24 | VolcEngine_deepseek_v3_Interface = OpenAICompatible.load_from_json_file( 25 | provider_json_path 26 | )["openrouter"]["google/gemini-3-pro-preview"] 27 | 28 | 29 | # ============ 定义一个简单工具 ============ 30 | @tool(name="get_weather", description="获取指定城市的天气信息") 31 | async def get_weather(city: str) -> Dict[str, str]: 32 | """ 33 | 获取城市天气。 34 | 35 | Args: 36 | city: 城市名 37 | 38 | Returns: 39 | 包含温度/湿度/天气状况的字典 40 | """ 41 | return {"temperature": "28°C", "humidity": "65%", "condition": "Sunny"} 42 | 43 | 44 | # ============ 原始响应透传:流式示例 ============ 45 | @llm_chat( 46 | llm_interface=VolcEngine_deepseek_v3_Interface, 47 | toolkit=[get_weather], 48 | stream=True, 49 | return_mode="raw", 50 | ) 51 | async def chat_stream_raw(history: Optional[List[Dict[str, Any]]] = None, query: str = ""): 52 | """ 53 | 流式原始响应透传示例。 54 | 要求:若用户询问天气,请调用 get_weather 工具。 55 | """ 56 | pass 57 | 58 | 59 | # ============ 原始响应透传:非流式示例 ============ 60 | @llm_chat( 61 | llm_interface=VolcEngine_deepseek_v3_Interface, 62 | toolkit=[get_weather], 63 | stream=False, 64 | return_mode="raw", 65 | ) 66 | async def chat_nonstream_raw(history: Optional[List[Dict[str, Any]]] = None, query: str = ""): 67 | """ 68 | 非流式原始响应透传示例。 69 | 要求:若用户询问天气,请调用 get_weather 工具。 70 | """ 71 | pass 72 | 73 | 74 | def _print_stream_chunk(chunk: Any) -> None: 75 | """打印流式 chunk 中的文本增量与工具调用增量(若有)。""" 76 | if hasattr(chunk, "choices") and chunk.choices: 77 | choice = chunk.choices[0] 78 | # 文本增量 79 | if hasattr(choice, "delta") and choice.delta: 80 | delta = choice.delta 81 | if getattr(delta, "content", None): 82 | print(delta.content, end="") 83 | # 工具调用增量 84 | if getattr(delta, "tool_calls", None): 85 | try: 86 | print("\n[tool_calls(delta)]:", json.dumps(delta.tool_calls, default=str, ensure_ascii=False)) 87 | except Exception: 88 | print("\n[tool_calls(delta) detected]") 89 | 90 | 91 | def _print_nonstream_message(raw: Any) -> None: 92 | """打印非流式响应中的工具调用(若有)与文本。""" 93 | if not hasattr(raw, "choices") or not raw.choices: 94 | return 95 | msg = raw.choices[0].message 96 | # 文本 97 | content = getattr(msg, "content", None) 98 | if content: 99 | print(content) 100 | # 工具调用 101 | tool_calls = getattr(msg, "tool_calls", None) 102 | if tool_calls: 103 | try: 104 | print("[tool_calls]:", json.dumps(tool_calls, default=str, ensure_ascii=False)) 105 | except Exception: 106 | print("[tool_calls detected]") 107 | 108 | 109 | async def demo_stream() -> None: 110 | print("\n=== 流式原始响应透传(可观测 delta.tool_calls)===") 111 | history: List[Dict[str, Any]] = [] 112 | async for raw, messages in chat_stream_raw(history=history, query="请查询北京今天的天气,并给出建议"): 113 | # 流式阶段:多次接收 chunk(含 delta 与可能的 delta.tool_calls) 114 | _print_stream_chunk(raw) 115 | # 工具调用结束与最终应答后,messages 将包含 tool 轨迹 116 | print("\n--- 最终 messages 中的 tool 轨迹(节选) ---") 117 | for m in messages: 118 | if m.get("role") in ("assistant", "tool"): 119 | print(json.dumps(m, ensure_ascii=False)) 120 | 121 | 122 | async def demo_nonstream() -> None: 123 | print("\n=== 非流式原始响应透传(可观测 message.tool_calls)===") 124 | history: List[Dict[str, Any]] = [] 125 | async for raw, messages in chat_nonstream_raw(history=history, query="请查询上海今天的天气,并给出建议"): 126 | # 非流式阶段:首帧为 initial_response,含 message.tool_calls(如果模型触发工具) 127 | _print_nonstream_message(raw) 128 | print("\n--- 最终 messages 中的 tool 轨迹(节选) ---") 129 | for m in messages: 130 | if m.get("role") in ("assistant", "tool"): 131 | print(json.dumps(m, ensure_ascii=False)) 132 | 133 | 134 | async def main() -> None: 135 | await demo_stream() 136 | await demo_nonstream() 137 | 138 | 139 | if __name__ == "__main__": 140 | asyncio.run(main()) 141 | 142 | 143 | -------------------------------------------------------------------------------- /docs/source/locale/zh_CN/LC_MESSAGES/contributing.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2025, Nijingzhe 3 | # This file is distributed under the same license as the SimpleLLMFunc 4 | # package. 5 | # FIRST AUTHOR , 2025. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: SimpleLLMFunc \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2025-11-10 02:53+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language: zh_CN\n" 16 | "Language-Team: zh_CN \n" 17 | "Plural-Forms: nplurals=1; plural=0;\n" 18 | "MIME-Version: 1.0\n" 19 | "Content-Type: text/plain; charset=utf-8\n" 20 | "Content-Transfer-Encoding: 8bit\n" 21 | "Generated-By: Babel 2.17.0\n" 22 | 23 | #: ../../source/contributing.md:1 24 | msgid "贡献指南" 25 | msgstr "" 26 | 27 | #: ../../source/contributing.md:3 28 | msgid "感谢你对 SimpleLLMFunc 项目的兴趣!我们欢迎并鼓励社区贡献,无论是修复错误、改进文档,还是添加新功能。" 29 | msgstr "" 30 | 31 | #: ../../source/contributing.md:5 32 | msgid "如何贡献" 33 | msgstr "" 34 | 35 | #: ../../source/contributing.md:7 36 | msgid "提交问题(Issue)" 37 | msgstr "" 38 | 39 | #: ../../source/contributing.md:9 40 | msgid "" 41 | "如果你发现了问题或有新功能建议,请先在 [GitHub " 42 | "Issues](https://github.com/NiJingzhe/SimpleLLMFunc/issues) " 43 | "页面搜索相关内容,以确保你的问题或建议尚未被提出。如果没有相关内容,你可以创建一个新的 Issue,请务必:" 44 | msgstr "" 45 | 46 | #: ../../source/contributing.md:11 47 | msgid "使用清晰的标题描述问题" 48 | msgstr "" 49 | 50 | #: ../../source/contributing.md:12 51 | msgid "提供详细的问题描述或功能请求" 52 | msgstr "" 53 | 54 | #: ../../source/contributing.md:13 55 | msgid "如果是 bug,请提供复现步骤和环境信息" 56 | msgstr "" 57 | 58 | #: ../../source/contributing.md:14 59 | msgid "如果可能,包含代码示例或截图" 60 | msgstr "" 61 | 62 | #: ../../source/contributing.md:16 63 | msgid "提交代码更改(Pull Request)" 64 | msgstr "" 65 | 66 | #: ../../source/contributing.md:18 67 | msgid "Fork 项目仓库" 68 | msgstr "" 69 | 70 | #: ../../source/contributing.md:19 71 | msgid "创建你的功能分支 (`git checkout -b feature/amazing-feature`)" 72 | msgstr "" 73 | 74 | #: ../../source/contributing.md:20 75 | msgid "提交你的更改 (`git commit -m 'Add some amazing feature'`)" 76 | msgstr "" 77 | 78 | #: ../../source/contributing.md:21 79 | msgid "推送到分支 (`git push origin feature/amazing-feature`)" 80 | msgstr "" 81 | 82 | #: ../../source/contributing.md:22 83 | msgid "提交 Pull Request" 84 | msgstr "" 85 | 86 | #: ../../source/contributing.md:24 87 | msgid "开发流程" 88 | msgstr "" 89 | 90 | #: ../../source/contributing.md:26 91 | msgid "确保你已经设置好开发环境(见下文)" 92 | msgstr "" 93 | 94 | #: ../../source/contributing.md:27 95 | msgid "在开始工作前,请先同步最新的代码" 96 | msgstr "" 97 | 98 | #: ../../source/contributing.md:28 99 | msgid "为你的功能或修复编写测试用例" 100 | msgstr "" 101 | 102 | #: ../../source/contributing.md:29 103 | msgid "确保所有测试都通过" 104 | msgstr "" 105 | 106 | #: ../../source/contributing.md:30 107 | msgid "遵循项目的代码风格和约定" 108 | msgstr "" 109 | 110 | #: ../../source/contributing.md:32 111 | msgid "开发环境设置" 112 | msgstr "" 113 | 114 | #: ../../source/contributing.md:34 115 | msgid "依赖项" 116 | msgstr "" 117 | 118 | #: ../../source/contributing.md:36 119 | msgid "Python 3.10 或更高版本" 120 | msgstr "" 121 | 122 | #: ../../source/contributing.md:37 123 | msgid "Poetry (推荐的依赖管理工具)" 124 | msgstr "" 125 | 126 | #: ../../source/contributing.md:39 127 | msgid "安装开发依赖" 128 | msgstr "" 129 | 130 | #: ../../source/contributing.md:55 131 | msgid "代码规范" 132 | msgstr "" 133 | 134 | #: ../../source/contributing.md:57 135 | msgid "代码风格" 136 | msgstr "" 137 | 138 | #: ../../source/contributing.md:59 139 | msgid "" 140 | "我们使用 [PEP 8](https://www.python.org/dev/peps/pep-0008/) 作为 Python " 141 | "代码风格指南,使用 [Black](https://github.com/psf/black) 格式化器自动化格式化过程:" 142 | msgstr "" 143 | 144 | #: ../../source/contributing.md:65 145 | msgid "类型注解" 146 | msgstr "" 147 | 148 | #: ../../source/contributing.md:67 149 | msgid "我们鼓励使用类型注解以提高代码可读性和安全性。可以使用 Pylint 检查类型。" 150 | msgstr "" 151 | 152 | #: ../../source/contributing.md:69 153 | msgid "文档" 154 | msgstr "" 155 | 156 | #: ../../source/contributing.md:71 157 | msgid "所有公共 API 都应该有清晰的文档字符串" 158 | msgstr "" 159 | 160 | #: ../../source/contributing.md:72 161 | msgid "" 162 | "文档注释应遵循 [Google Python 文档风格](https://github.com/google/styleguide/blob" 163 | "/gh-pages/pyguide.md#38-comments-and-docstrings)" 164 | msgstr "" 165 | 166 | #: ../../source/contributing.md:73 167 | msgid "更新功能时,请同时更新相关文档" 168 | msgstr "" 169 | 170 | #: ../../source/contributing.md:87 171 | msgid "行为准则" 172 | msgstr "" 173 | 174 | #: ../../source/contributing.md:89 175 | msgid "请尊重所有项目参与者,保持友好的交流环境。任何形式的骚扰或冒犯行为都是不可接受的。" 176 | msgstr "" 177 | 178 | #: ../../source/contributing.md:91 179 | msgid "获取帮助" 180 | msgstr "" 181 | 182 | #: ../../source/contributing.md:93 183 | msgid "如果你在贡献过程中需要帮助,可以:" 184 | msgstr "" 185 | 186 | #: ../../source/contributing.md:95 187 | msgid "在 GitHub Issues 中提问" 188 | msgstr "" 189 | 190 | #: ../../source/contributing.md:96 191 | msgid "联系项目维护者" 192 | msgstr "" 193 | 194 | #: ../../source/contributing.md:98 195 | msgid "再次感谢你对 SimpleLLMFunc 的贡献!" 196 | msgstr "" 197 | 198 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/utils/tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | 工具处理公用模块 3 | 4 | 此模块提供统一的工具解析和处理逻辑,供 llm_chat_decorator 和 llm_function_decorator 使用。 5 | """ 6 | 7 | import inspect 8 | from typing import ( 9 | Any, 10 | Awaitable, 11 | Callable, 12 | Dict, 13 | List, 14 | Optional, 15 | Tuple, 16 | Union, 17 | ) 18 | 19 | from SimpleLLMFunc.logger import get_location, push_debug, push_warning 20 | from SimpleLLMFunc.tool import Tool 21 | 22 | 23 | def process_tools( 24 | toolkit: Optional[List[Union[Tool, Callable[..., Awaitable[Any]]]]] = None, 25 | func_name: str = "unknown_function", 26 | ) -> Tuple[Optional[List[Dict[str, Any]]], Dict[str, Callable[..., Awaitable[Any]]]]: 27 | """ 28 | 处理工具列表,返回 API 所需的工具参数和工具映射。 29 | 30 | 此函数是 llm_chat_decorator 和 llm_function_decorator 中工具处理逻辑的统一实现。 31 | 统一将所有 @tool 装饰的函数映射到 tool_obj.run 方法。 32 | 33 | ## 工具类型支持 34 | - `Tool` 对象:直接使用,要求 `run` 方法为 `async` 函数 35 | - `@tool` 装饰的异步函数:会被转换为相应的工具映射 36 | 37 | ## 处理流程 38 | 1. 验证输入工具列表 39 | 2. 遍历工具,区分 Tool 对象和 @tool 装饰的函数 40 | 3. 检查类型合法性(必须为 async) 41 | 4. 构建工具对象列表和工具名称到函数的映射 42 | 5. 序列化工具以供 LLM API 使用 43 | 6. 返回序列化的工具参数和工具映射字典 44 | 45 | Args: 46 | toolkit: 工具列表,可以是 Tool 对象或被 @tool 装饰的异步函数,为 None 或空列表时返回 (None, {}) 47 | func_name: 函数名称,用于日志记录和错误信息 48 | 49 | Returns: 50 | (tool_param_for_api, tool_map) 元组: 51 | - tool_param_for_api: 序列化后的工具参数列表,供 LLM API 使用,如果无工具则为 None 52 | - tool_map: 工具名称到异步函数的映射字典,用于工具调用时的查找 53 | 54 | Raises: 55 | TypeError: 当工具的 run 方法或被装饰的函数不是 async 时抛出 56 | 57 | Examples: 58 | ```python 59 | from SimpleLLMFunc.tool import Tool 60 | 61 | # 示例1:使用 Tool 对象 62 | my_tool = Tool(name="get_weather", ...) 63 | tool_param, tool_map = process_tools([my_tool], "my_func") 64 | 65 | # 示例2:使用 @tool 装饰的函数 66 | @tool(name="calculate") 67 | async def calculate(a: int, b: int) -> int: 68 | return a + b 69 | 70 | tool_param, tool_map = process_tools([calculate], "my_func") 71 | 72 | # 示例3:混合使用 73 | tools = [my_tool, calculate] 74 | tool_param, tool_map = process_tools(tools, "my_func") 75 | ``` 76 | 77 | Note: 78 | - 所有工具的 run 方法或函数本体必须是异步的(async) 79 | - 工具名称通过 Tool.name 属性获取 80 | - 序列化使用 Tool.serialize_tools() 方法 81 | - 所有 @tool 装饰的函数都统一映射到其 tool_obj.run 方法 82 | """ 83 | if not toolkit: 84 | return None, {} 85 | 86 | tool_objects: List[Union[Tool, Callable[..., Awaitable[Any]]]] = [] 87 | tool_map: Dict[str, Callable[..., Awaitable[Any]]] = {} 88 | 89 | for tool in toolkit: 90 | if isinstance(tool, Tool): 91 | # 处理 Tool 对象 92 | _process_tool_object(tool, func_name, tool_objects, tool_map) 93 | elif callable(tool) and hasattr(tool, "_tool"): 94 | # 处理 @tool 装饰的函数 95 | _process_decorated_function(tool, func_name, tool_objects, tool_map) 96 | else: 97 | push_warning( 98 | f"LLM 函数 '{func_name}': 不支持的工具类型 {type(tool)}," 99 | "工具必须是 Tool 对象或被 @tool 装饰的函数", 100 | location=get_location(), 101 | ) 102 | 103 | # 序列化工具以供 LLM API 使用 104 | tool_param_for_api: Optional[List[Dict[str, Any]]] = ( 105 | Tool.serialize_tools(tool_objects) if tool_objects else None 106 | ) 107 | 108 | push_debug( 109 | f"LLM 函数 '{func_name}' 加载了 {len(tool_objects)} 个工具", 110 | location=get_location(), 111 | ) 112 | 113 | return tool_param_for_api, tool_map 114 | 115 | 116 | def _process_tool_object( 117 | tool: Tool, 118 | func_name: str, 119 | tool_objects: List[Union[Tool, Callable[..., Awaitable[Any]]]], 120 | tool_map: Dict[str, Callable[..., Awaitable[Any]]], 121 | ) -> None: 122 | """ 123 | 处理 Tool 对象。 124 | 125 | Args: 126 | tool: Tool 实例 127 | func_name: 函数名,用于日志记录 128 | tool_objects: 工具对象列表(会被修改) 129 | tool_map: 工具名称到函数的映射(会被修改) 130 | 131 | Raises: 132 | TypeError: 当工具的 run 方法不是 async 时抛出 133 | """ 134 | if not inspect.iscoroutinefunction(tool.run): 135 | raise TypeError( 136 | f"LLM 函数 '{func_name}': Tool '{tool.name}' 必须实现 async run 方法" 137 | ) 138 | tool_objects.append(tool) 139 | tool_map[tool.name] = tool.run 140 | 141 | 142 | def _process_decorated_function( 143 | tool: Callable[..., Awaitable[Any]], 144 | func_name: str, 145 | tool_objects: List[Union[Tool, Callable[..., Awaitable[Any]]]], 146 | tool_map: Dict[str, Callable[..., Awaitable[Any]]], 147 | ) -> None: 148 | """ 149 | 处理被 @tool 装饰的函数。 150 | 151 | 统一将被 @tool 装饰的函数映射到其 tool_obj.run 方法。 152 | 153 | Args: 154 | tool: 被 @tool 装饰的异步函数 155 | func_name: 函数名,用于日志记录 156 | tool_objects: 工具对象列表(会被修改) 157 | tool_map: 工具名称到函数的映射(会被修改) 158 | 159 | Raises: 160 | TypeError: 当函数不是 async 时抛出 161 | """ 162 | if not inspect.iscoroutinefunction(tool): 163 | raise TypeError( 164 | f"LLM 函数 '{func_name}': 被 @tool 装饰的函数 '{tool.__name__}' 必须是 async 函数" 165 | ) 166 | 167 | tool_obj = getattr(tool, "_tool", None) 168 | assert isinstance( 169 | tool_obj, Tool 170 | ), "这一定是一个Tool对象,不会是None!是None我赤石" 171 | 172 | # 添加 Tool 对象到列表(用于序列化) 173 | tool_objects.append(tool_obj) 174 | 175 | # 统一映射到 tool_obj.run 176 | tool_map[tool_obj.name] = tool_obj.run 177 | -------------------------------------------------------------------------------- /tests/test_base/test_type_resolve/test_multimodal.py: -------------------------------------------------------------------------------- 1 | """Tests for base.type_resolve.multimodal module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import List, Optional, Union 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.base.type_resolve.multimodal import ( 10 | has_multimodal_content, 11 | is_multimodal_type, 12 | ) 13 | from SimpleLLMFunc.type.multimodal import ImgPath, ImgUrl, Text 14 | 15 | 16 | class TestIsMultimodalType: 17 | """Tests for is_multimodal_type function.""" 18 | 19 | def test_text_instance(self, text_content: Text) -> None: 20 | """Test Text instance detection.""" 21 | assert is_multimodal_type(text_content, Text) is True 22 | 23 | def test_img_url_instance(self, img_url: ImgUrl) -> None: 24 | """Test ImgUrl instance detection.""" 25 | assert is_multimodal_type(img_url, ImgUrl) is True 26 | 27 | def test_img_path_instance(self, img_path: ImgPath) -> None: 28 | """Test ImgPath instance detection.""" 29 | assert is_multimodal_type(img_path, ImgPath) is True 30 | 31 | def test_text_annotation(self, text_content: Text) -> None: 32 | """Test Text annotation detection.""" 33 | assert is_multimodal_type(text_content, Text) is True 34 | 35 | def test_img_url_annotation(self, img_url: ImgUrl) -> None: 36 | """Test ImgUrl annotation detection.""" 37 | assert is_multimodal_type(img_url, ImgUrl) is True 38 | 39 | def test_img_path_annotation(self, img_path: ImgPath) -> None: 40 | """Test ImgPath annotation detection.""" 41 | assert is_multimodal_type(img_path, ImgPath) is True 42 | 43 | def test_union_type_with_multimodal(self, text_content: Text) -> None: 44 | """Test Union type containing multimodal.""" 45 | assert is_multimodal_type(text_content, Union[str, Text]) is True 46 | 47 | def test_union_type_without_multimodal(self) -> None: 48 | """Test Union type without multimodal.""" 49 | assert is_multimodal_type("test", Union[str, int]) is False 50 | 51 | def test_list_of_multimodal_types(self) -> None: 52 | """Test List of multimodal types.""" 53 | assert is_multimodal_type([], List[Text]) is True 54 | assert is_multimodal_type([], List[ImgUrl]) is True 55 | assert is_multimodal_type([], List[ImgPath]) is True 56 | 57 | def test_list_with_multimodal_items(self, text_content: Text) -> None: 58 | """Test list containing multimodal items.""" 59 | assert is_multimodal_type([text_content], List[str]) is True 60 | 61 | def test_non_multimodal_type(self) -> None: 62 | """Test non-multimodal type.""" 63 | assert is_multimodal_type("test", str) is False 64 | assert is_multimodal_type(123, int) is False 65 | 66 | def test_optional_multimodal(self, text_content: Text) -> None: 67 | """Test Optional multimodal type.""" 68 | assert is_multimodal_type(text_content, Optional[Text]) is True 69 | 70 | 71 | class TestHasMultimodalContent: 72 | """Tests for has_multimodal_content function.""" 73 | 74 | def test_has_text_content(self, text_content: Text) -> None: 75 | """Test detecting Text content.""" 76 | arguments = {"text": text_content} 77 | type_hints = {"text": Text} 78 | assert has_multimodal_content(arguments, type_hints) is True 79 | 80 | def test_has_img_url_content(self, img_url: ImgUrl) -> None: 81 | """Test detecting ImgUrl content.""" 82 | arguments = {"image": img_url} 83 | type_hints = {"image": ImgUrl} 84 | assert has_multimodal_content(arguments, type_hints) is True 85 | 86 | def test_has_img_path_content(self, img_path: ImgPath) -> None: 87 | """Test detecting ImgPath content.""" 88 | arguments = {"image": img_path} 89 | type_hints = {"image": ImgPath} 90 | assert has_multimodal_content(arguments, type_hints) is True 91 | 92 | def test_no_multimodal_content(self) -> None: 93 | """Test detecting no multimodal content.""" 94 | arguments = {"text": "plain text", "number": 123} 95 | type_hints = {"text": str, "number": int} 96 | assert has_multimodal_content(arguments, type_hints) is False 97 | 98 | def test_exclude_params(self, text_content: Text) -> None: 99 | """Test excluding parameters from check.""" 100 | arguments = {"text": text_content, "history": []} 101 | type_hints = {"text": Text, "history": List} 102 | assert ( 103 | has_multimodal_content(arguments, type_hints, exclude_params=["history"]) 104 | is True 105 | ) 106 | assert ( 107 | has_multimodal_content(arguments, type_hints, exclude_params=["text"]) 108 | is False 109 | ) 110 | 111 | def test_union_type_in_arguments(self, text_content: Text) -> None: 112 | """Test Union type in arguments.""" 113 | arguments = {"content": text_content} 114 | type_hints = {"content": Union[str, Text]} 115 | assert has_multimodal_content(arguments, type_hints) is True 116 | 117 | def test_list_multimodal_in_arguments(self, text_content: Text) -> None: 118 | """Test List of multimodal in arguments.""" 119 | arguments = {"contents": [text_content]} 120 | type_hints = {"contents": List[Text]} 121 | assert has_multimodal_content(arguments, type_hints) is True 122 | 123 | def test_empty_arguments(self) -> None: 124 | """Test empty arguments.""" 125 | assert has_multimodal_content({}, {}) is False 126 | 127 | def test_missing_type_hint(self, text_content: Text) -> None: 128 | """Test argument without type hint.""" 129 | arguments = {"text": text_content} 130 | type_hints = {} 131 | # Should not raise error, just return False 132 | assert has_multimodal_content(arguments, type_hints) is False 133 | 134 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/chat/message.py: -------------------------------------------------------------------------------- 1 | """Step 3: Build chat messages for llm_chat.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, Dict, List, Optional, Union 6 | 7 | from SimpleLLMFunc.base.messages import build_multimodal_content 8 | from SimpleLLMFunc.base.type_resolve.multimodal import has_multimodal_content 9 | from SimpleLLMFunc.logger import push_warning 10 | from SimpleLLMFunc.logger.logger import get_location 11 | from SimpleLLMFunc.tool import Tool 12 | from SimpleLLMFunc.type.decorator import HistoryList 13 | from SimpleLLMFunc.llm_decorator.steps.common.types import FunctionSignature 14 | from SimpleLLMFunc.llm_decorator.utils import process_tools 15 | 16 | # Constants 17 | HISTORY_PARAM_NAMES: List[str] = ["history", "chat_history"] 18 | 19 | 20 | def extract_conversation_history( 21 | arguments: Dict[str, Any], 22 | func_name: str, 23 | history_param_names: Optional[List[str]] = None, 24 | ) -> Optional[HistoryList]: 25 | """提取并验证对话历史""" 26 | if history_param_names is None: 27 | history_param_names = HISTORY_PARAM_NAMES 28 | 29 | # 查找历史参数 30 | history_param_name = None 31 | for param_name in history_param_names: 32 | if param_name in arguments: 33 | history_param_name = param_name 34 | break 35 | 36 | if not history_param_name: 37 | push_warning( 38 | f"LLM Chat '{func_name}' missing history parameter " 39 | f"(parameter name should be one of {history_param_names}). " 40 | "History will not be passed.", 41 | location=get_location(), 42 | ) 43 | return None 44 | 45 | custom_history = arguments[history_param_name] 46 | 47 | # 验证历史格式 48 | if not ( 49 | isinstance(custom_history, list) 50 | and all(isinstance(item, dict) for item in custom_history) 51 | ): 52 | push_warning( 53 | f"LLM Chat '{func_name}' history parameter should be List[Dict[str, str]] type. " 54 | "History will not be passed.", 55 | location=get_location(), 56 | ) 57 | return None 58 | 59 | return custom_history 60 | 61 | 62 | def build_chat_user_message_content( 63 | arguments: Dict[str, Any], 64 | type_hints: Dict[str, Any], 65 | has_multimodal: bool, 66 | exclude_params: List[str], 67 | ) -> Union[str, List[Dict[str, Any]]]: 68 | """构建用户消息内容""" 69 | if has_multimodal: 70 | return build_multimodal_content( 71 | arguments, 72 | type_hints, 73 | exclude_params=exclude_params, 74 | ) 75 | else: 76 | # 构建文本消息,排除历史参数 77 | message_parts = [ 78 | f"{param_name}: {param_value}" 79 | for param_name, param_value in arguments.items() 80 | if param_name not in exclude_params 81 | ] 82 | return "\n\t".join(message_parts) 83 | 84 | 85 | def build_chat_system_prompt( 86 | docstring: str, 87 | tool_objects: Optional[List[Dict[str, Any]]], 88 | ) -> Optional[str]: 89 | """构建聊天系统提示""" 90 | if not docstring: 91 | return None 92 | 93 | system_content = docstring 94 | 95 | # 如果提供工具,添加工具描述 96 | if tool_objects: 97 | tool_descriptions = "\n\t".join( 98 | f"- {tool['function']['name']}: {tool['function']['description']}" 99 | for tool in tool_objects 100 | ) 101 | system_content = ( 102 | "\n\nYou can use the following tools flexibly according to the real case and tool description:\n\t" 103 | + tool_descriptions 104 | + "\n\n" 105 | + system_content.strip() 106 | ) 107 | 108 | return system_content 109 | 110 | 111 | def filter_history_messages( 112 | history: HistoryList, 113 | func_name: str, 114 | ) -> HistoryList: 115 | """过滤历史消息,排除 system 消息""" 116 | filtered = [] 117 | for msg in history: 118 | if isinstance(msg, dict) and "role" in msg and "content" in msg: 119 | if msg["role"] not in ["system"]: 120 | filtered.append(msg) 121 | else: 122 | push_warning( 123 | f"Skipping malformed history item: {msg}", 124 | location=get_location(), 125 | ) 126 | return filtered 127 | 128 | 129 | def build_chat_messages( 130 | signature: FunctionSignature, 131 | toolkit: Optional[List[Union[Tool, Any]]], 132 | exclude_params: List[str], 133 | ) -> HistoryList: 134 | """构建聊天消息列表的完整流程""" 135 | messages: HistoryList = [] 136 | 137 | # 1. 准备工具 138 | tool_param, tool_map = process_tools(toolkit, signature.func_name) 139 | 140 | # 2. 构建系统提示 141 | system_content = build_chat_system_prompt( 142 | signature.docstring, 143 | tool_param, 144 | ) 145 | if system_content: 146 | messages.append({"role": "system", "content": system_content}) 147 | 148 | # 3. 提取对话历史 149 | custom_history = extract_conversation_history( 150 | signature.bound_args.arguments, 151 | signature.func_name, 152 | ) 153 | 154 | # 4. 过滤并添加历史消息 155 | if custom_history: 156 | filtered_history = filter_history_messages(custom_history, signature.func_name) 157 | messages.extend(filtered_history) 158 | 159 | # 5. 检查多模态内容 160 | has_multimodal = has_multimodal_content( 161 | signature.bound_args.arguments, 162 | signature.type_hints, 163 | exclude_params=exclude_params, 164 | ) 165 | 166 | # 6. 构建用户消息内容 167 | user_message_content = build_chat_user_message_content( 168 | signature.bound_args.arguments, 169 | signature.type_hints, 170 | has_multimodal, 171 | exclude_params, 172 | ) 173 | 174 | # 7. 添加用户消息 175 | if user_message_content: 176 | messages.append({"role": "user", "content": user_message_content}) 177 | 178 | return messages 179 | 180 | -------------------------------------------------------------------------------- /docs/source/examples.md: -------------------------------------------------------------------------------- 1 | # 示例代码 2 | 3 | 本章节收集了 SimpleLLMFunc 框架的各种使用示例。这些示例展示了框架的核心功能和最佳实践。 4 | 5 | > ⚠️ **重要提示**:本框架中的所有装饰器(`@llm_function`、`@llm_chat`、`@tool`)均要求被装饰的函数使用 `async def` 定义,并在调用时通过 `await`(或 `asyncio.run`)执行。 6 | 7 | ## 基础示例 8 | 9 | ### llm_function 基础使用 10 | 11 | **文件**: [examples/llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 12 | 13 | 这个例子展示了如何使用 `@llm_function` 装饰器创建 LLM 驱动的函数: 14 | - 基本的文本分析 15 | - 动态模板参数的使用 16 | - 结构化输出(Pydantic 模型) 17 | - 类型安全的返回值处理 18 | 19 | ### 产品评论分析 20 | 21 | **文件**: [examples/llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 22 | 23 | 演示如何使用 `@llm_function` 进行产品评论分析: 24 | - 定义 Pydantic 模型作为返回类型 25 | - 自动解析 LLM 的结构化输出 26 | - 处理复杂的返回格式 27 | 28 | ### 天气信息查询与建议 29 | 30 | **文件**: [examples/llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 31 | 32 | 展示工具集成的基础示例: 33 | - 定义 `@tool` 装饰器的工具函数 34 | - 在 `@llm_function` 中使用工具 35 | - 处理 LLM 的工具调用 36 | 37 | ## 高级示例 38 | 39 | ### llm_chat 聊天应用 40 | 41 | **文件**: [examples/llm_chat_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_chat_example.py) 42 | 43 | 展示如何使用 `@llm_chat` 装饰器构建对话应用: 44 | - 多轮对话的历史管理 45 | - 流式响应的处理 46 | - 工具在对话中的应用 47 | - 对话会话的保存和加载 48 | 49 | ### 并行工具调用 50 | 51 | **文件**: [examples/parallel_toolcall_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/parallel_toolcall_example.py) 52 | 53 | 演示高级的工具调用特性: 54 | - 多个工具的并行执行 55 | - 工具调用的优化和性能 56 | - 大规模工具集的管理 57 | 58 | ### 多模态内容处理 59 | 60 | **文件**: [examples/multi_modality_toolcall.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/multi_modality_toolcall.py) 61 | 62 | 展示多模态功能的使用: 63 | - 图片 URL (`ImgUrl`) 的处理 64 | - 本地图片路径 (`ImgPath`) 的处理 65 | - 文本和图片的混合输入输出 66 | 67 | ## 供应商配置示例 68 | 69 | ### Provider 配置文件 70 | 71 | **文件**: [examples/provider.json](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/provider.json) 72 | 73 | 示范 provider.json 的完整配置结构: 74 | - OpenAI 模型配置 75 | - 其他供应商的配置方式 76 | - API 密钥和速率限制设置 77 | 78 | ### Provider 模板 79 | 80 | **文件**: [examples/provider_template.json](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/provider_template.json) 81 | 82 | 提供了一个可复用的配置模板: 83 | - 预配置的常见 LLM 供应商 84 | - 最佳实践的参数设置 85 | - 多个 API 密钥的配置方式 86 | 87 | ## 按功能分类的示例 88 | 89 | ### 文本处理 90 | - **文本分类**: 见 [llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 91 | - **文本摘要**: 见 [llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 92 | - **情感分析**: 见 [llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 93 | 94 | ### 工具调用 95 | - **单个工具调用**: 见 [llm_function_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_function_example.py) 96 | - **多工具并行调用**: 见 [parallel_toolcall_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/parallel_toolcall_example.py) 97 | 98 | ### 对话与 Agent 99 | - **基础聊天**: 见 [llm_chat_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_chat_example.py) 100 | - **带工具的聊天**: 见 [llm_chat_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_chat_example.py) 101 | - **多会话并发**: 见 [llm_chat_example.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/llm_chat_example.py) 102 | 103 | ### 多模态处理 104 | - **图片分析**: 见 [multi_modality_toolcall.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/multi_modality_toolcall.py) 105 | - **混合输入输出**: 见 [multi_modality_toolcall.py](https://github.com/NiJingzhe/SimpleLLMFunc/blob/master/examples/multi_modality_toolcall.py) 106 | 107 | ## 快速运行示例 108 | 109 | ### 前置要求 110 | 1. 安装 SimpleLLMFunc: `pip install SimpleLLMFunc` 111 | 2. 配置 API 密钥(见 [快速开始](quickstart.md)) 112 | 3. 创建或编辑 `provider.json` 文件 113 | 114 | ### 运行方式 115 | 116 | ```bash 117 | # 进入 examples 目录 118 | cd examples 119 | 120 | # 运行基础 LLM 函数示例 121 | python llm_function_example.py 122 | 123 | # 运行聊天示例 124 | python llm_chat_example.py 125 | 126 | # 运行并行工具调用示例 127 | python parallel_toolcall_example.py 128 | 129 | # 运行多模态示例 130 | python multi_modality_toolcall.py 131 | ``` 132 | 133 | ## 完整的 Examples 目录 134 | 135 | 所有示例代码都位于仓库的 `examples/` 目录中: 136 | 137 | **仓库链接**: https://github.com/NiJingzhe/SimpleLLMFunc/tree/master/examples 138 | 139 | 在该目录中你可以找到: 140 | - 各种装饰器的使用示例 141 | - 不同 LLM 供应商的配置示例 142 | - 最佳实践的参考实现 143 | - 环境变量配置的示例 144 | 145 | ## 学习路径建议 146 | 147 | ### 初级用户 148 | 1. 阅读 [快速开始](quickstart.md) 文档 149 | 2. 运行 `llm_function_example.py` 150 | 3. 修改示例代码,尝试自己的 Prompt 151 | 152 | ### 中级用户 153 | 1. 学习 [llm_chat 装饰器文档](detailed_guide/llm_chat.md) 154 | 2. 运行 `llm_chat_example.py` 155 | 3. 尝试 `parallel_toolcall_example.py` 156 | 157 | ### 高级用户 158 | 1. 阅读 [LLM 接口层文档](detailed_guide/llm_interface.md) 159 | 2. 学习多模态处理:`multi_modality_toolcall.py` 160 | 3. 自定义 LLM 接口和工具系统 161 | 162 | ## 常见问题 163 | 164 | ### Q: 示例代码在哪里? 165 | A: 所有示例代码都在 GitHub 仓库的 `examples/` 目录中。你可以直接查看或下载运行。 166 | 167 | ### Q: 如何修改示例代码? 168 | A: 169 | 1. 克隆仓库:`git clone https://github.com/NiJingzhe/SimpleLLMFunc.git` 170 | 2. 编辑 `examples/` 目录中的文件 171 | 3. 运行修改后的代码 172 | 173 | ### Q: 示例是否支持所有 LLM 供应商? 174 | A: 示例代码使用 `provider.json` 配置,支持任何兼容 OpenAI API 的供应商。参考 `provider_template.json` 配置你的供应商。 175 | 176 | ### Q: 我遇到了问题,该怎么办? 177 | A: 178 | 1. 检查 [快速开始](quickstart.md) 中的配置部分 179 | 2. 查看详细的 [使用指南](guide.md) 180 | 3. 在 GitHub 提交 Issue:https://github.com/NiJingzhe/SimpleLLMFunc/issues 181 | 182 | ## 贡献新示例 183 | 184 | 如果你想为项目贡献新的示例代码: 185 | 186 | 1. Fork 仓库 187 | 2. 在 `examples/` 目录中创建新文件 188 | 3. 遵循现有示例的代码风格和注释 189 | 4. 提交 Pull Request 190 | 191 | 详细信息见 [贡献指南](contributing.md)。 192 | 193 | ## 相关资源 194 | 195 | - **官方仓库**: https://github.com/NiJingzhe/SimpleLLMFunc 196 | - **完整文档**: https://simplellmfunc.readthedocs.io/ 197 | - **发布日志**: https://github.com/NiJingzhe/SimpleLLMFunc/releases 198 | - **问题反馈**: https://github.com/NiJingzhe/SimpleLLMFunc/issues 199 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_function/test_react.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.function.react module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import AsyncMock, MagicMock, patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.function.react import ( 10 | check_response_content_empty, 11 | execute_llm_call, 12 | execute_react_loop, 13 | get_final_response, 14 | prepare_tools_for_execution, 15 | retry_llm_call, 16 | ) 17 | 18 | 19 | class TestPrepareToolsForExecution: 20 | """Tests for prepare_tools_for_execution function.""" 21 | 22 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.process_tools") 23 | def test_prepare_tools(self, mock_process_tools: MagicMock) -> None: 24 | """Test preparing tools for execution.""" 25 | mock_process_tools.return_value = ([{"name": "tool1"}], {"tool1": AsyncMock()}) 26 | result = prepare_tools_for_execution([], "test_func") 27 | mock_process_tools.assert_called_once_with([], "test_func") 28 | 29 | 30 | class TestExecuteLLMCall: 31 | """Tests for execute_llm_call function.""" 32 | 33 | @pytest.mark.asyncio 34 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.execute_llm") 35 | async def test_execute_call( 36 | self, mock_execute_llm: AsyncMock, mock_llm_interface: Any, sample_messages: list 37 | ) -> None: 38 | """Test executing LLM call.""" 39 | async def mock_generator(): 40 | yield "response1", sample_messages.copy() 41 | yield "response2", sample_messages.copy() 42 | 43 | mock_execute_llm.return_value = mock_generator() 44 | 45 | result = execute_llm_call( 46 | mock_llm_interface, 47 | sample_messages, 48 | None, 49 | {}, 50 | 5, 51 | stream=False, 52 | ) 53 | 54 | responses = [] 55 | async for r in result: 56 | responses.append(r) 57 | 58 | assert len(responses) >= 1 59 | 60 | 61 | class TestGetFinalResponse: 62 | """Tests for get_final_response function.""" 63 | 64 | @pytest.mark.asyncio 65 | async def test_get_final_response(self) -> None: 66 | """Test getting final response from stream.""" 67 | async def mock_generator(): 68 | yield "response1" 69 | yield "response2" 70 | yield "final_response" 71 | 72 | result = await get_final_response(mock_generator()) 73 | assert result == "final_response" 74 | 75 | 76 | class TestCheckResponseContentEmpty: 77 | """Tests for check_response_content_empty function.""" 78 | 79 | def test_check_empty_content(self, mock_chat_completion: Any) -> None: 80 | """Test checking empty content.""" 81 | # Modify mock to have empty content 82 | mock_chat_completion.choices[0].message.content = "" 83 | result = check_response_content_empty(mock_chat_completion, "test_func") 84 | assert result is True 85 | 86 | def test_check_non_empty_content(self, mock_chat_completion: Any) -> None: 87 | """Test checking non-empty content.""" 88 | result = check_response_content_empty(mock_chat_completion, "test_func") 89 | assert result is False 90 | 91 | 92 | class TestRetryLLMCall: 93 | """Tests for retry_llm_call function.""" 94 | 95 | @pytest.mark.asyncio 96 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.execute_llm_call") 97 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.get_final_response") 98 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.extract_content_from_response") 99 | async def test_retry_success( 100 | self, 101 | mock_extract: MagicMock, 102 | mock_get_final: AsyncMock, 103 | mock_execute: AsyncMock, 104 | mock_llm_interface: Any, 105 | sample_messages: list, 106 | ) -> None: 107 | """Test retrying LLM call successfully.""" 108 | # extract_content_from_response 被调用3次: 109 | # 1. 第一次尝试(返回空) 110 | # 2. 第二次尝试(返回成功,触发break) 111 | # 3. 最终检查(返回成功) 112 | mock_extract.side_effect = ["", "success", "success"] 113 | mock_get_final.return_value = MagicMock() 114 | mock_execute.return_value = AsyncMock() 115 | 116 | result = await retry_llm_call( 117 | mock_llm_interface, 118 | sample_messages, 119 | None, 120 | {}, 121 | 5, 122 | 2, 123 | "test_func", 124 | ) 125 | 126 | assert mock_execute.call_count <= 3 # Initial + retries 127 | 128 | 129 | class TestExecuteReactLoop: 130 | """Tests for execute_react_loop function.""" 131 | 132 | @pytest.mark.asyncio 133 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.prepare_tools_for_execution") 134 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.execute_llm_call") 135 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.get_final_response") 136 | @patch("SimpleLLMFunc.llm_decorator.steps.function.react.check_response_content_empty") 137 | async def test_execute_react_loop( 138 | self, 139 | mock_check_empty: MagicMock, 140 | mock_get_final: AsyncMock, 141 | mock_execute: AsyncMock, 142 | mock_prepare: MagicMock, 143 | mock_llm_interface: Any, 144 | sample_messages: list, 145 | ) -> None: 146 | """Test executing ReAct loop.""" 147 | mock_prepare.return_value = (None, {}) 148 | mock_check_empty.return_value = False 149 | mock_get_final.return_value = MagicMock() 150 | mock_execute.return_value = AsyncMock() 151 | 152 | result = await execute_react_loop( 153 | mock_llm_interface, 154 | sample_messages, 155 | None, 156 | 5, 157 | {}, 158 | "test_func", 159 | ) 160 | 161 | assert result is not None 162 | 163 | -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/context_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | 日志上下文管理模块 3 | 4 | 本模块提供异步安全的日志上下文管理功能,使用contextvars实现。 5 | 支持嵌套上下文和跨协程的日志上下文传递。 6 | """ 7 | 8 | import contextvars 9 | from contextlib import asynccontextmanager, contextmanager 10 | from typing import Any, Dict, Generator, AsyncGenerator, Optional 11 | import threading 12 | 13 | # 使用 contextvars 来管理日志上下文,支持异步和多线程环境 14 | _log_context: contextvars.ContextVar[Dict[str, Any]] = contextvars.ContextVar( 15 | "log_context", default={} 16 | ) 17 | _context_lock = threading.RLock() # 保留锁用于向后兼容,但主要逻辑会使用 contextvars 18 | 19 | # 用于表示默认trace_id的常量 20 | DEFAULT_TRACE_ID = "" 21 | 22 | 23 | def _merge_context(extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 24 | """ 25 | 合并当前上下文和额外参数 26 | 27 | Args: 28 | extra: 额外参数字典 29 | 30 | Returns: 31 | 合并后的字典,包含当前上下文和额外参数 32 | 33 | Example: 34 | >>> current_context = _merge_context({"user_id": "123"}) 35 | >>> # current_context 包含了当前上下文 + user_id 36 | """ 37 | result = {} 38 | 39 | # 获取当前上下文 40 | current_context = _log_context.get({}) 41 | result.update(current_context) 42 | 43 | # 添加额外参数(如果有) 44 | if extra: 45 | result.update(extra) 46 | 47 | return result 48 | 49 | 50 | def get_current_trace_id() -> str: 51 | """ 52 | 获取当前上下文中的trace_id 53 | 54 | Returns: 55 | 当前上下文中的trace_id,如果不存在则返回空字符串 56 | 57 | Example: 58 | >>> trace_id = get_current_trace_id() 59 | >>> if trace_id: 60 | ... print(f"Current trace: {trace_id}") 61 | """ 62 | current_context = _log_context.get({}) 63 | return current_context.get("trace_id", DEFAULT_TRACE_ID) 64 | 65 | 66 | def get_current_context_attribute(key: str) -> Any: 67 | """ 68 | 获取当前上下文中的指定属性值 69 | 70 | Args: 71 | key: 属性名称 72 | 73 | Returns: 74 | 属性值,如果不存在则返回None 75 | 76 | Example: 77 | >>> user_id = get_current_context_attribute("user_id") 78 | >>> if user_id: 79 | ... print(f"User: {user_id}") 80 | """ 81 | current_context = _log_context.get({}) 82 | return current_context.get(key, None) 83 | 84 | 85 | def set_current_context_attribute(key: str, value: Any) -> None: 86 | """ 87 | 设置当前log上下文中某个属性的值 88 | 89 | Args: 90 | key: 属性名称 91 | value: 属性值 92 | 93 | Note: 94 | 对于已知的系统属性不会产生警告,对于新的属性会产生警告提示 95 | 96 | Example: 97 | >>> set_current_context_attribute("user_id", "12345") 98 | >>> set_current_context_attribute("execution_time", 0.123) 99 | """ 100 | current_context = _log_context.get({}) 101 | 102 | # 系统已知的属性,不需要警告 103 | KNOWN_SYSTEM_ATTRIBUTES = { 104 | "input_tokens", 105 | "output_tokens", 106 | "trace_id", 107 | "location", 108 | "execution_time", 109 | "model_name", 110 | "function_name", 111 | } 112 | 113 | if key not in current_context and key not in KNOWN_SYSTEM_ATTRIBUTES: 114 | from .core import push_warning 115 | push_warning( 116 | f"You are changing a never seen attribute in current log context: {key}" 117 | ) 118 | 119 | # 创建新的上下文字典 120 | new_context = current_context.copy() 121 | new_context[key] = value 122 | _log_context.set(new_context) 123 | 124 | 125 | @asynccontextmanager 126 | async def async_log_context(**kwargs: Any) -> AsyncGenerator[None, None]: 127 | """ 128 | 创建异步日志上下文,在上下文中的所有日志都会包含指定的字段 129 | 130 | 可以通过提供一些参数来指定在一层上下文中统一的属性值,并会被自动添加到log中 131 | 当context发生嵌套时,外层的属性并不会继承到内层,嵌套的上下文会以栈的形式被管理 132 | 133 | Args: 134 | **kwargs: 要添加到上下文的键值对 135 | 136 | Example: 137 | >>> async with async_log_context(trace_id="my_function_123", user_id="456"): 138 | ... push_info("处理用户请求") # 日志会自动包含trace_id和user_id 139 | 140 | Note: 141 | - 支持异步环境 142 | - 上下文是栈式的,后进先出 143 | - 支持GeneratorExit异常处理 144 | """ 145 | # 获取当前上下文 146 | current_context = _log_context.get({}) 147 | 148 | # 创建新的上下文,合并新的属性 149 | new_context = current_context.copy() 150 | new_context.update(kwargs) 151 | 152 | # 设置新的上下文 153 | token = _log_context.set(new_context) 154 | 155 | try: 156 | yield 157 | except GeneratorExit: 158 | # 处理异步生成器被提前关闭的情况 159 | # 直接重置上下文并重新抛出异常 160 | try: 161 | _log_context.reset(token) 162 | except (ValueError, RuntimeError): 163 | # 忽略上下文重置错误 164 | pass 165 | raise 166 | except Exception: 167 | # 处理其他异常 168 | try: 169 | _log_context.reset(token) 170 | except (ValueError, RuntimeError): 171 | # 忽略上下文重置错误 172 | pass 173 | raise 174 | else: 175 | # 正常完成时重置上下文 176 | try: 177 | _log_context.reset(token) 178 | except (ValueError, RuntimeError): 179 | # 忽略上下文重置错误 180 | pass 181 | 182 | 183 | @contextmanager 184 | def log_context(**kwargs: Any) -> Generator[None, None, None]: 185 | """ 186 | 创建日志上下文,在上下文中的所有日志都会包含指定的字段 187 | 188 | 可以通过提供一些参数来指定在一层上下文中统一的属性值,并会被自动添加到log中 189 | 当context发生嵌套时,外层的属性并不会继承到内层,嵌套的上下文会以栈的形式被管理 190 | 191 | Args: 192 | **kwargs: 要添加到上下文的键值对 193 | 194 | Example: 195 | >>> with log_context(trace_id="my_function_123", user_id="456"): 196 | ... push_info("处理用户请求") # 日志会自动包含trace_id和user_id 197 | 198 | Note: 199 | - 支持同步环境 200 | - 上下文是栈式的,后进先出 201 | - 异常安全的上下文管理 202 | """ 203 | # 获取当前上下文 204 | current_context = _log_context.get({}) 205 | 206 | # 创建新的上下文,合并新的属性 207 | new_context = current_context.copy() 208 | new_context.update(kwargs) 209 | 210 | # 设置新的上下文 211 | token = _log_context.set(new_context) 212 | 213 | try: 214 | yield 215 | finally: 216 | # 恢复原始上下文 217 | try: 218 | _log_context.reset(token) 219 | except ValueError: 220 | # 在某些边缘情况下,Context 可能在不同的任务中被重置 221 | # 这种情况下忽略 ValueError 是安全的 222 | pass 223 | -------------------------------------------------------------------------------- /SimpleLLMFunc/logger/formatters.py: -------------------------------------------------------------------------------- 1 | """ 2 | 日志格式化器模块 3 | 4 | 本模块包含了用于格式化日志输出的各种格式化器类。 5 | 支持JSON格式化和控制台彩色输出格式化。 6 | """ 7 | 8 | import json 9 | import logging 10 | from logging import LogRecord 11 | import sys 12 | from typing import Optional 13 | 14 | 15 | class JsonFormatter(logging.Formatter): 16 | """ 17 | JSON格式化器,将日志记录转换为结构化JSON格式 18 | 19 | 此格式化器将日志记录转换为JSON字符串,便于机器解析和日志分析。 20 | 包含了完整的日志信息,包括时间戳、级别、消息、代码位置等。 21 | 22 | Example: 23 | >>> formatter = JsonFormatter() 24 | >>> record = logging.LogRecord(...) 25 | >>> json_output = formatter.format(record) 26 | """ 27 | 28 | def __init__(self) -> None: 29 | """ 30 | 初始化JSON格式化器 31 | 32 | 设置基本的日志格式化器配置。 33 | """ 34 | super().__init__() 35 | 36 | def format(self, record: LogRecord) -> str: 37 | """ 38 | 将日志记录格式化为JSON字符串 39 | 40 | Args: 41 | record: 日志记录对象,包含日志的所有信息 42 | 43 | Returns: 44 | 格式化后的JSON字符串 45 | 46 | Note: 47 | - 自动处理异常信息和堆栈跟踪 48 | - 对不可序列化的值进行字符串转换 49 | - 包含所有extra字段和标准日志字段 50 | """ 51 | # 基本日志字段 52 | log_data = { 53 | "timestamp": record.created, # 使用时间戳而不是格式化的时间字符串 54 | "level": record.levelname, 55 | "logger": record.name, 56 | "message": record.getMessage(), 57 | "module": record.module, 58 | "function": record.funcName, 59 | "line": record.lineno, 60 | "thread": record.threadName, 61 | "process": record.process, 62 | } 63 | 64 | # 添加异常信息(如果有) 65 | if record.exc_info and record.exc_info[0] is not None: 66 | log_data["exception"] = { 67 | "type": record.exc_info[0].__name__, # type: ignore 68 | "message": str(record.exc_info[1]) if record.exc_info[1] else "", 69 | "traceback": self.formatException(record.exc_info), 70 | } 71 | 72 | # 添加extra字段 73 | for key, value in record.__dict__.items(): 74 | if key not in { 75 | "args", 76 | "asctime", 77 | "created", 78 | "exc_info", 79 | "exc_text", 80 | "filename", 81 | "funcName", 82 | "id", 83 | "levelname", 84 | "levelno", 85 | "lineno", 86 | "module", 87 | "msecs", 88 | "message", 89 | "msg", 90 | "name", 91 | "pathname", 92 | "process", 93 | "processName", 94 | "relativeCreated", 95 | "stack_info", 96 | "thread", 97 | "threadName", 98 | "trace_id", 99 | "location", 100 | } and not key.startswith("_"): 101 | try: 102 | # 尝试JSON序列化,确保值可序列化 103 | json.dumps(value) 104 | log_data[key] = value 105 | except (TypeError, OverflowError): 106 | # 如果不可序列化,转换为字符串 107 | log_data[key] = str(value) 108 | 109 | return json.dumps(log_data, ensure_ascii=False, default=str) 110 | 111 | 112 | class ConsoleFormatter(logging.Formatter): 113 | """ 114 | 控制台日志格式化器,支持彩色输出 115 | 116 | 此格式化器为控制台输出提供美观的格式化,包括颜色支持和额外信息显示。 117 | 支持不同日志级别的颜色区分。 118 | 119 | Attributes: 120 | COLORS: ANSI颜色代码字典 121 | SUPPORTTED_EXTRA_INFO: 支持显示的额外信息字段 122 | 123 | Example: 124 | >>> formatter = ConsoleFormatter(use_color=True) 125 | >>> record = logging.LogRecord(...) 126 | >>> formatted_output = formatter.format(record) 127 | """ 128 | 129 | # ANSI颜色代码 130 | COLORS = { 131 | "DEBUG": "\033[36m", # 青色 132 | "INFO": "\033[32m", # 绿色 133 | "WARNING": "\033[33m", # 黄色 134 | "ERROR": "\033[31m", # 红色 135 | "CRITICAL": "\033[35m", # 紫色 136 | "RESET": "\033[0m", # 重置 137 | } 138 | 139 | SUPPORTTED_EXTRA_INFO = ["trace_id", "location", "input_tokens", "output_tokens"] 140 | 141 | def __init__( 142 | self, use_color: bool = True, format_string: Optional[str] = None 143 | ) -> None: 144 | """ 145 | 初始化控制台格式化器 146 | 147 | Args: 148 | use_color: 是否使用彩色输出,默认True 149 | format_string: 自定义格式字符串,默认使用标准格式 150 | 151 | Note: 152 | 颜色支持会自动检测终端是否支持彩色输出 153 | """ 154 | if format_string is None: 155 | format_string = ( 156 | "%(asctime)s [%(levelname)s] %(name)s:%(lineno)d - %(message)s" 157 | ) 158 | super().__init__(format_string) 159 | self.use_color = use_color and sys.stdout.isatty() 160 | 161 | def format(self, record: LogRecord) -> str: 162 | """ 163 | 格式化日志记录为控制台输出格式 164 | 165 | Args: 166 | record: 日志记录对象 167 | 168 | Returns: 169 | 格式化后的字符串,包含颜色和额外信息 170 | 171 | Features: 172 | - 彩色日志级别显示 173 | - 额外的上下文信息显示 174 | - 边框装饰 175 | """ 176 | # 使用标准格式器格式化 177 | formatted = super().format(record) 178 | 179 | # 应用颜色(如果启用) 180 | if self.use_color: 181 | levelname = record.levelname 182 | color = self.COLORS.get(levelname, self.COLORS["RESET"]) 183 | formatted = f"{color}{formatted}{self.COLORS['RESET']}" 184 | 185 | # 添加各类extra info(如果存在) 186 | extra_info = [] 187 | for attr in self.SUPPORTTED_EXTRA_INFO: 188 | attr_value = getattr(record, attr, "") 189 | if hasattr(record, attr) and attr_value: 190 | extra_info.append(f"{attr}={attr_value}") 191 | 192 | if extra_info: 193 | formatted += "\n" + "\n".join(extra_info) 194 | 195 | formatted = "=" * 30 + "\n" + formatted + "\n" + "=" * 30 196 | 197 | return formatted 198 | -------------------------------------------------------------------------------- /SimpleLLMFunc/llm_decorator/steps/function/react.py: -------------------------------------------------------------------------------- 1 | """Step 4: Execute ReAct loop for llm_function.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, List, Optional, Union, cast 7 | 8 | from SimpleLLMFunc.base.ReAct import execute_llm 9 | from SimpleLLMFunc.base.post_process import extract_content_from_response 10 | from SimpleLLMFunc.interface.llm_interface import LLM_Interface 11 | from SimpleLLMFunc.logger import push_debug, push_error, push_warning 12 | from SimpleLLMFunc.logger.logger import get_location 13 | from SimpleLLMFunc.type.message import MessageList 14 | 15 | 16 | from SimpleLLMFunc.tool import Tool 17 | from SimpleLLMFunc.utils import get_last_item_of_async_generator 18 | from SimpleLLMFunc.llm_decorator.utils import process_tools 19 | 20 | 21 | def prepare_tools_for_execution( 22 | toolkit: Optional[List[Union[Tool, Callable[..., Awaitable[Any]]]]], 23 | func_name: str, 24 | ) -> tuple[Optional[List[Dict[str, Any]]], Dict[str, Callable[..., Awaitable[Any]]]]: 25 | """准备工具供执行使用""" 26 | return process_tools(toolkit, func_name) 27 | 28 | 29 | async def execute_llm_call( 30 | llm_interface: LLM_Interface, 31 | messages: MessageList, 32 | tools: Optional[List[Dict[str, Any]]], 33 | tool_map: Dict[str, Callable[..., Awaitable[Any]]], 34 | max_tool_calls: int, 35 | stream: bool = False, 36 | **llm_kwargs: Any, 37 | ) -> AsyncGenerator[Any, None]: 38 | """执行 LLM 调用""" 39 | # 类型转换:MessageList 兼容 List[Dict[str, Any]] 40 | # execute_llm 现在返回 (response, updated_messages) 元组,但我们只需要 response 41 | async for response, _ in execute_llm( 42 | llm_interface=llm_interface, 43 | messages=cast(List[Dict[str, Any]], messages), 44 | tools=tools, 45 | tool_map=tool_map, 46 | max_tool_calls=max_tool_calls, 47 | stream=stream, 48 | **llm_kwargs, 49 | ): 50 | yield response 51 | 52 | 53 | async def get_final_response( 54 | response_stream: AsyncGenerator[Any, None], 55 | ) -> Any: 56 | """从响应流中获取最后一个响应""" 57 | return await get_last_item_of_async_generator(response_stream) 58 | 59 | 60 | def check_response_content_empty(response: Any, func_name: str) -> bool: 61 | """检查响应内容是否为空""" 62 | content = "" 63 | if hasattr(response, "choices") and len(response.choices) > 0: 64 | message = response.choices[0].message 65 | content = message.content if message and hasattr(message, "content") else "" 66 | 67 | return content == "" 68 | 69 | 70 | async def retry_llm_call( 71 | llm_interface: LLM_Interface, 72 | messages: MessageList, 73 | tools: Optional[List[Dict[str, Any]]], 74 | tool_map: Dict[str, Callable[..., Awaitable[Any]]], 75 | max_tool_calls: int, 76 | retry_times: int, 77 | func_name: str, 78 | **llm_kwargs: Any, 79 | ) -> Any: 80 | """重试 LLM 调用""" 81 | final_response = None 82 | 83 | for attempt in range(retry_times + 1): 84 | if attempt > 0: 85 | push_debug( 86 | f"Async LLM function '{func_name}' retry attempt {attempt}...", 87 | location=get_location(), 88 | ) 89 | 90 | # 执行 LLM 调用 91 | response_stream = execute_llm_call( 92 | llm_interface=llm_interface, 93 | messages=messages, 94 | tools=tools, 95 | tool_map=tool_map, 96 | max_tool_calls=max_tool_calls, 97 | stream=False, 98 | **llm_kwargs, 99 | ) 100 | 101 | # 获取最终响应 102 | final_response = await get_final_response(response_stream) 103 | 104 | # 检查内容是否为空 105 | content = extract_content_from_response(final_response, func_name) 106 | if content != "": 107 | break 108 | 109 | # 最终检查 110 | if final_response: 111 | content = extract_content_from_response(final_response, func_name) 112 | if content == "": 113 | push_error( 114 | f"Async LLM function '{func_name}' response content still empty, " 115 | "retry attempts exhausted.", 116 | location=get_location(), 117 | ) 118 | raise ValueError("LLM response content is empty after retries.") 119 | 120 | return final_response 121 | 122 | 123 | async def execute_react_loop( 124 | llm_interface: LLM_Interface, 125 | messages: MessageList, 126 | toolkit: Optional[List[Union[Tool, Callable[..., Awaitable[Any]]]]], 127 | max_tool_calls: int, 128 | llm_kwargs: Dict[str, Any], 129 | func_name: str, 130 | ) -> Any: 131 | """执行 ReAct 循环的完整流程(包含重试)""" 132 | # 1. 准备工具 133 | tool_param, tool_map = prepare_tools_for_execution(toolkit, func_name) 134 | 135 | # 2. 执行 LLM 调用 136 | response_stream = execute_llm_call( 137 | llm_interface=llm_interface, 138 | messages=messages, 139 | tools=tool_param, 140 | tool_map=tool_map, 141 | max_tool_calls=max_tool_calls, 142 | stream=False, 143 | **llm_kwargs, 144 | ) 145 | 146 | # 3. 获取最终响应 147 | final_response = await get_final_response(response_stream) 148 | 149 | # 4. 检查响应内容是否为空 150 | if check_response_content_empty(final_response, func_name): 151 | push_warning( 152 | f"Async LLM function '{func_name}' returned empty response content, " 153 | "will retry automatically.", 154 | location=get_location(), 155 | ) 156 | 157 | # 5. 重试 LLM 调用 158 | retry_times = llm_kwargs.get("retry_times", 2) 159 | final_response = await retry_llm_call( 160 | llm_interface=llm_interface, 161 | messages=messages, 162 | tools=tool_param, 163 | tool_map=tool_map, 164 | max_tool_calls=max_tool_calls, 165 | retry_times=retry_times, 166 | func_name=func_name, 167 | **llm_kwargs, 168 | ) 169 | 170 | # 6. 记录最终响应 171 | push_debug( 172 | f"Async LLM function '{func_name}' received response " 173 | f"{json.dumps(final_response, default=str, ensure_ascii=False, indent=2)}", 174 | location=get_location(), 175 | ) 176 | 177 | return final_response 178 | 179 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_chat/test_message.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.chat.message module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.chat.message import ( 10 | build_chat_messages, 11 | build_chat_system_prompt, 12 | build_chat_user_message_content, 13 | extract_conversation_history, 14 | filter_history_messages, 15 | ) 16 | 17 | 18 | class TestExtractConversationHistory: 19 | """Tests for extract_conversation_history function.""" 20 | 21 | def test_extract_history_exists(self) -> None: 22 | """Test extracting history when it exists.""" 23 | arguments = { 24 | "history": [ 25 | {"role": "user", "content": "Hello"}, 26 | {"role": "assistant", "content": "Hi"}, 27 | ], 28 | "message": "test", 29 | } 30 | result = extract_conversation_history(arguments, "test_func") 31 | assert result is not None 32 | assert len(result) == 2 33 | 34 | def test_extract_history_not_exists(self) -> None: 35 | """Test extracting history when it doesn't exist.""" 36 | arguments = {"message": "test"} 37 | result = extract_conversation_history(arguments, "test_func") 38 | assert result is None 39 | 40 | def test_extract_history_invalid_format(self) -> None: 41 | """Test extracting history with invalid format.""" 42 | arguments = {"history": "not a list"} 43 | result = extract_conversation_history(arguments, "test_func") 44 | assert result is None 45 | 46 | 47 | class TestBuildChatSystemPrompt: 48 | """Tests for build_chat_system_prompt function.""" 49 | 50 | def test_build_with_docstring(self) -> None: 51 | """Test building system prompt with docstring.""" 52 | result = build_chat_system_prompt("Test docstring", None) 53 | assert result == "Test docstring" 54 | 55 | def test_build_without_docstring(self) -> None: 56 | """Test building system prompt without docstring.""" 57 | result = build_chat_system_prompt("", None) 58 | assert result is None 59 | 60 | def test_build_with_tools(self) -> None: 61 | """Test building system prompt with tools.""" 62 | tools = [ 63 | { 64 | "function": { 65 | "name": "test_tool", 66 | "description": "A test tool", 67 | } 68 | } 69 | ] 70 | result = build_chat_system_prompt("Test docstring", tools) 71 | assert result is not None 72 | assert "test_tool" in result 73 | 74 | 75 | class TestBuildChatUserMessageContent: 76 | """Tests for build_chat_user_message_content function.""" 77 | 78 | def test_build_text_content(self) -> None: 79 | """Test building text user message content.""" 80 | arguments = {"message": "Hello", "param": "value"} 81 | type_hints = {"message": str, "param": str} 82 | result = build_chat_user_message_content( 83 | arguments, type_hints, False, ["history"] 84 | ) 85 | assert isinstance(result, str) 86 | assert "Hello" in result 87 | 88 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.message.build_multimodal_content") 89 | def test_build_multimodal_content( 90 | self, mock_build_multimodal: Any 91 | ) -> None: 92 | """Test building multimodal user message content.""" 93 | mock_build_multimodal.return_value = [ 94 | {"type": "text", "text": "test"} 95 | ] 96 | arguments = {"image": "test"} 97 | type_hints = {"image": str} 98 | result = build_chat_user_message_content( 99 | arguments, type_hints, True, ["history"] 100 | ) 101 | assert isinstance(result, list) 102 | mock_build_multimodal.assert_called_once() 103 | 104 | 105 | class TestFilterHistoryMessages: 106 | """Tests for filter_history_messages function.""" 107 | 108 | def test_filter_valid_messages(self) -> None: 109 | """Test filtering valid history messages.""" 110 | history = [ 111 | {"role": "user", "content": "Hello"}, 112 | {"role": "assistant", "content": "Hi"}, 113 | ] 114 | result = filter_history_messages(history, "test_func") 115 | assert len(result) == 2 116 | 117 | def test_filter_system_messages(self) -> None: 118 | """Test filtering out system messages.""" 119 | history = [ 120 | {"role": "system", "content": "System message"}, 121 | {"role": "user", "content": "Hello"}, 122 | ] 123 | result = filter_history_messages(history, "test_func") 124 | assert len(result) == 1 125 | assert result[0]["role"] == "user" 126 | 127 | 128 | class TestBuildChatMessages: 129 | """Tests for build_chat_messages function.""" 130 | 131 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.message.process_tools") 132 | @patch("SimpleLLMFunc.llm_decorator.steps.chat.message.has_multimodal_content") 133 | def test_build_messages( 134 | self, mock_has_multimodal: Any, mock_process_tools: Any 135 | ) -> None: 136 | """Test building chat messages.""" 137 | mock_process_tools.return_value = (None, {}) 138 | mock_has_multimodal.return_value = False 139 | 140 | from SimpleLLMFunc.llm_decorator.steps.common.types import FunctionSignature 141 | import inspect 142 | 143 | def test_func(message: str) -> str: 144 | """Test function.""" 145 | return "result" 146 | 147 | sig = inspect.signature(test_func) 148 | bound = sig.bind("Hello") 149 | bound.apply_defaults() 150 | 151 | signature = FunctionSignature( 152 | func_name="test_func", 153 | trace_id="trace_123", 154 | bound_args=bound, 155 | signature=sig, 156 | type_hints={"message": str, "return": str}, 157 | return_type=str, 158 | docstring="Test function.", 159 | ) 160 | 161 | result = build_chat_messages(signature, None, ["history"]) 162 | assert len(result) >= 1 163 | 164 | -------------------------------------------------------------------------------- /docs/source/locale/zh_CN/LC_MESSAGES/detailed_guide/llm_chat.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2025, Nijingzhe 3 | # This file is distributed under the same license as the SimpleLLMFunc 4 | # package. 5 | # FIRST AUTHOR , 2025. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: SimpleLLMFunc \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2025-12-08 01:12+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language: zh_CN\n" 16 | "Language-Team: zh_CN \n" 17 | "Plural-Forms: nplurals=1; plural=0;\n" 18 | "MIME-Version: 1.0\n" 19 | "Content-Type: text/plain; charset=utf-8\n" 20 | "Content-Transfer-Encoding: 8bit\n" 21 | "Generated-By: Babel 2.17.0\n" 22 | 23 | #: ../../source/detailed_guide/llm_chat.md:1 24 | msgid "llm_chat 装饰器" 25 | msgstr "" 26 | 27 | #: ../../source/detailed_guide/llm_chat.md:3 28 | msgid "" 29 | "本文档介绍 SimpleLLMFunc 库中的聊天装饰器 " 30 | "`llm_chat`。该装饰器专门用于实现与大语言模型的对话功能,支持多轮对话、历史记录管理和工具调用。" 31 | msgstr "" 32 | 33 | #: ../../source/detailed_guide/llm_chat.md:5 34 | msgid "llm_chat 装饰器概述" 35 | msgstr "" 36 | 37 | #: ../../source/detailed_guide/llm_chat.md:7 38 | msgid "装饰器作用" 39 | msgstr "" 40 | 41 | #: ../../source/detailed_guide/llm_chat.md:9 42 | msgid "`llm_chat` 装饰器用于构建对话式应用,特别适合以下场景:" 43 | msgstr "" 44 | 45 | #: ../../source/detailed_guide/llm_chat.md:11 46 | msgid "**多轮对话**: 自动管理对话历史,支持上下文连续性" 47 | msgstr "" 48 | 49 | #: ../../source/detailed_guide/llm_chat.md:12 50 | msgid "**流式响应**: 支持实时流式返回响应内容" 51 | msgstr "" 52 | 53 | #: ../../source/detailed_guide/llm_chat.md:13 54 | msgid "**智能助手**: 集成工具调用能力,让 LLM 可以执行外部操作" 55 | msgstr "" 56 | 57 | #: ../../source/detailed_guide/llm_chat.md:14 58 | msgid "**聊天机器人**: 适合构建实时交互的聊天应用" 59 | msgstr "" 60 | 61 | #: ../../source/detailed_guide/llm_chat.md:16 62 | msgid "主要功能特性" 63 | msgstr "" 64 | 65 | #: ../../source/detailed_guide/llm_chat.md:18 66 | msgid "**多轮对话支持**: 自动管理对话历史记录,保持上下文" 67 | msgstr "" 68 | 69 | #: ../../source/detailed_guide/llm_chat.md:19 70 | msgid "**流式响应**: 返回异步生成器,支持实时流式输出" 71 | msgstr "" 72 | 73 | #: ../../source/detailed_guide/llm_chat.md:20 74 | msgid "**工具集成**: 支持在对话中调用工具,扩展 LLM 的能力范围" 75 | msgstr "" 76 | 77 | #: ../../source/detailed_guide/llm_chat.md:21 78 | msgid "**灵活参数处理**: 智能处理历史记录参数和用户消息" 79 | msgstr "" 80 | 81 | #: ../../source/detailed_guide/llm_chat.md:22 82 | msgid "**完整的日志记录**: 与框架日志系统集成,自动追踪对话" 83 | msgstr "" 84 | 85 | #: ../../source/detailed_guide/llm_chat.md:24 86 | msgid "装饰器用法" 87 | msgstr "" 88 | 89 | #: ../../source/detailed_guide/llm_chat.md:26 90 | msgid "" 91 | "⚠️ **重要说明**:`llm_chat` 只能装饰 `async def` 定义的异步函数,返回的也是可 `await` " 92 | "的协程;请在异步上下文中调用,或在脚本入口使用 `asyncio.run()`。" 93 | msgstr "" 94 | 95 | #: ../../source/detailed_guide/llm_chat.md:28 96 | msgid "基本语法" 97 | msgstr "" 98 | 99 | #: ../../source/detailed_guide/llm_chat.md:52 100 | msgid "参数说明" 101 | msgstr "" 102 | 103 | #: ../../source/detailed_guide/llm_chat.md:54 104 | msgid "**llm_interface** (必需): LLM 接口实例,用于与大语言模型通信" 105 | msgstr "" 106 | 107 | #: ../../source/detailed_guide/llm_chat.md:55 108 | msgid "**toolkit** (可选): 工具列表,可以是 Tool 对象或被 @tool 装饰的函数" 109 | msgstr "" 110 | 111 | #: ../../source/detailed_guide/llm_chat.md:56 112 | msgid "**max_tool_calls** (可选): 最大工具调用次数,防止无限循环,默认为 5" 113 | msgstr "" 114 | 115 | #: ../../source/detailed_guide/llm_chat.md:57 116 | msgid "**stream** (可选): 是否启用流式模式,默认为 True" 117 | msgstr "" 118 | 119 | #: ../../source/detailed_guide/llm_chat.md:58 120 | msgid "**return_mode** (可选): 返回模式,可选值为 \"text\"(默认)或 \"raw\"" 121 | msgstr "" 122 | 123 | #: ../../source/detailed_guide/llm_chat.md:59 124 | msgid "****llm_kwargs**: 额外的关键字参数,将直接传递给 LLM 接口(如 temperature、top_p 等)" 125 | msgstr "" 126 | 127 | #: ../../source/detailed_guide/llm_chat.md:61 128 | msgid "返回值" 129 | msgstr "" 130 | 131 | #: ../../source/detailed_guide/llm_chat.md:63 132 | msgid "`llm_chat` 装饰的函数返回一个异步生成器,每次迭代返回:" 133 | msgstr "" 134 | 135 | #: ../../source/detailed_guide/llm_chat.md:65 136 | msgid "`chunk` (str): 响应内容的一部分(流式模式)或完整响应(非流式)" 137 | msgstr "" 138 | 139 | #: ../../source/detailed_guide/llm_chat.md:66 140 | msgid "`updated_history` (List[Dict[str, str]]): 更新后的对话历史" 141 | msgstr "" 142 | 143 | #: ../../source/detailed_guide/llm_chat.md:68 144 | msgid "使用示例" 145 | msgstr "" 146 | 147 | #: ../../source/detailed_guide/llm_chat.md:70 148 | msgid "示例 1: 基础聊天助手" 149 | msgstr "" 150 | 151 | #: ../../source/detailed_guide/llm_chat.md:72 152 | msgid "最简单的对话助手实现:" 153 | msgstr "" 154 | 155 | #: ../../source/detailed_guide/llm_chat.md:110 156 | msgid "示例 2: 带工具调用的聊天助手" 157 | msgstr "" 158 | 159 | #: ../../source/detailed_guide/llm_chat.md:112 160 | msgid "展示如何在对话中使用工具:" 161 | msgstr "" 162 | 163 | #: ../../source/detailed_guide/llm_chat.md:172 164 | msgid "示例 3: 交互式多轮对话" 165 | msgstr "" 166 | 167 | #: ../../source/detailed_guide/llm_chat.md:174 168 | msgid "展示如何维护完整的对话会话:" 169 | msgstr "" 170 | 171 | #: ../../source/detailed_guide/llm_chat.md:243 172 | msgid "高级特性" 173 | msgstr "" 174 | 175 | #: ../../source/detailed_guide/llm_chat.md:245 176 | msgid "返回模式" 177 | msgstr "" 178 | 179 | #: ../../source/detailed_guide/llm_chat.md:247 180 | msgid "`return_mode` 参数控制返回的数据类型:" 181 | msgstr "" 182 | 183 | #: ../../source/detailed_guide/llm_chat.md:263 184 | msgid "并发聊天会话" 185 | msgstr "" 186 | 187 | #: ../../source/detailed_guide/llm_chat.md:265 188 | msgid "使用 `asyncio.gather` 处理多个并发的聊天会话:" 189 | msgstr "" 190 | 191 | #: ../../source/detailed_guide/llm_chat.md:306 192 | msgid "最佳实践" 193 | msgstr "" 194 | 195 | #: ../../source/detailed_guide/llm_chat.md:308 196 | msgid "1. 错误处理" 197 | msgstr "" 198 | 199 | #: ../../source/detailed_guide/llm_chat.md:322 200 | msgid "2. 超时控制" 201 | msgstr "" 202 | 203 | #: ../../source/detailed_guide/llm_chat.md:337 204 | msgid "3. 历史记录限制" 205 | msgstr "" 206 | 207 | #: ../../source/detailed_guide/llm_chat.md:339 208 | msgid "为避免上下文过长,限制历史记录长度:" 209 | msgstr "" 210 | 211 | #: ../../source/detailed_guide/llm_chat.md:368 212 | msgid "4. 日志与调试" 213 | msgstr "" 214 | 215 | #: ../../source/detailed_guide/llm_chat.md:381 216 | msgid "常见问题" 217 | msgstr "" 218 | 219 | #: ../../source/detailed_guide/llm_chat.md:383 220 | msgid "Q: 如何保存和恢复对话历史?" 221 | msgstr "" 222 | 223 | #: ../../source/detailed_guide/llm_chat.md:407 224 | msgid "Q: 如何处理 LLM 拒绝或无效响应?" 225 | msgstr "" 226 | 227 | #: ../../source/detailed_guide/llm_chat.md:435 228 | msgid "通过这些示例和最佳实践,你可以构建功能强大的对话应用。`llm_chat` 装饰器提供了简洁而强大的方式来实现复杂的对话逻辑。" 229 | msgstr "" 230 | 231 | -------------------------------------------------------------------------------- /docs/source/locale/zh_CN/LC_MESSAGES/detailed_guide/config.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2025, Nijingzhe 3 | # This file is distributed under the same license as the SimpleLLMFunc 4 | # package. 5 | # FIRST AUTHOR , 2025. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: SimpleLLMFunc \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2025-11-10 02:53+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language: zh_CN\n" 16 | "Language-Team: zh_CN \n" 17 | "Plural-Forms: nplurals=1; plural=0;\n" 18 | "MIME-Version: 1.0\n" 19 | "Content-Type: text/plain; charset=utf-8\n" 20 | "Content-Transfer-Encoding: 8bit\n" 21 | "Generated-By: Babel 2.17.0\n" 22 | 23 | #: ../../source/detailed_guide/config.md:1 24 | msgid "配置文件说明" 25 | msgstr "" 26 | 27 | #: ../../source/detailed_guide/config.md:3 28 | msgid "`.env` 文件" 29 | msgstr "" 30 | 31 | #: ../../source/detailed_guide/config.md:5 32 | msgid "" 33 | "`.env` 文件用于存储环境变量,在本框架中主要用于配置日志相关设置。你可以在你项目最终的 `WORKING DIR` 下创建一个 `.env`" 34 | " 文件,或者直接在环境变量中设置这些值。" 35 | msgstr "" 36 | 37 | #: ../../source/detailed_guide/config.md:7 38 | msgid "环境变量配置" 39 | msgstr "" 40 | 41 | #: ../../source/detailed_guide/config.md:21 42 | msgid "支持的环境变量" 43 | msgstr "" 44 | 45 | #: ../../source/detailed_guide/config.md 46 | msgid "环境变量" 47 | msgstr "" 48 | 49 | #: ../../source/detailed_guide/config.md 50 | msgid "说明" 51 | msgstr "" 52 | 53 | #: ../../source/detailed_guide/config.md 54 | msgid "可选值" 55 | msgstr "" 56 | 57 | #: ../../source/detailed_guide/config.md 58 | msgid "默认值" 59 | msgstr "" 60 | 61 | #: ../../source/detailed_guide/config.md 62 | msgid "`LOG_LEVEL`" 63 | msgstr "" 64 | 65 | #: ../../source/detailed_guide/config.md 66 | msgid "日志级别" 67 | msgstr "" 68 | 69 | #: ../../source/detailed_guide/config.md 70 | msgid "`DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`" 71 | msgstr "" 72 | 73 | #: ../../source/detailed_guide/config.md 74 | msgid "`WARNING`" 75 | msgstr "" 76 | 77 | #: ../../source/detailed_guide/config.md:27 78 | msgid "环境变量优先级" 79 | msgstr "" 80 | 81 | #: ../../source/detailed_guide/config.md:29 82 | msgid "" 83 | "注意,直接 `export` 环境变量会覆盖 `.env` 文件中的设置,因此如果你在运行时设置了环境变量,这些设置将优先于 `.env` " 84 | "文件中的配置。" 85 | msgstr "" 86 | 87 | #: ../../source/detailed_guide/config.md:31 88 | msgid "优先级顺序(从高到低):" 89 | msgstr "" 90 | 91 | #: ../../source/detailed_guide/config.md:33 92 | msgid "运行时设置的环境变量 (如 `export LOG_LEVEL=DEBUG`)" 93 | msgstr "" 94 | 95 | #: ../../source/detailed_guide/config.md:34 96 | msgid "`.env` 文件中的配置" 97 | msgstr "" 98 | 99 | #: ../../source/detailed_guide/config.md:35 100 | msgid "框架默认值" 101 | msgstr "" 102 | 103 | #: ../../source/detailed_guide/config.md:37 104 | msgid "`provider.json` 文件" 105 | msgstr "" 106 | 107 | #: ../../source/detailed_guide/config.md:39 108 | msgid "" 109 | "`provider.json` 文件用于配置 LLM 接口的相关信息,包括 API 密钥、提供商信息、模型名称等。你可以在项目根目录创建一个 " 110 | "`provider.json` 文件,内容示例如下:" 111 | msgstr "" 112 | 113 | #: ../../source/detailed_guide/config.md:41 114 | msgid "配置文件结构" 115 | msgstr "" 116 | 117 | #: ../../source/detailed_guide/config.md:43 118 | msgid "provider.json 使用嵌套结构:`提供商 -> 模型名 -> 配置参数`" 119 | msgstr "" 120 | 121 | #: ../../source/detailed_guide/config.md:92 122 | msgid "配置参数说明" 123 | msgstr "" 124 | 125 | #: ../../source/detailed_guide/config.md 126 | msgid "参数" 127 | msgstr "" 128 | 129 | #: ../../source/detailed_guide/config.md 130 | msgid "类型" 131 | msgstr "" 132 | 133 | #: ../../source/detailed_guide/config.md 134 | msgid "示例" 135 | msgstr "" 136 | 137 | #: ../../source/detailed_guide/config.md 138 | msgid "`api_keys`" 139 | msgstr "" 140 | 141 | #: ../../source/detailed_guide/config.md 142 | msgid "数组" 143 | msgstr "" 144 | 145 | #: ../../source/detailed_guide/config.md 146 | msgid "API 密钥列表,支持多个密钥用于负载均衡" 147 | msgstr "" 148 | 149 | #: ../../source/detailed_guide/config.md 150 | msgid "`[\"key1\", \"key2\"]`" 151 | msgstr "" 152 | 153 | #: ../../source/detailed_guide/config.md 154 | msgid "`base_url`" 155 | msgstr "" 156 | 157 | #: ../../source/detailed_guide/config.md 158 | msgid "字符串" 159 | msgstr "" 160 | 161 | #: ../../source/detailed_guide/config.md 162 | msgid "API 服务器地址" 163 | msgstr "" 164 | 165 | #: ../../source/detailed_guide/config.md 166 | msgid "`https://api.openai.com/v1`" 167 | msgstr "" 168 | 169 | #: ../../source/detailed_guide/config.md 170 | msgid "`model`" 171 | msgstr "" 172 | 173 | #: ../../source/detailed_guide/config.md 174 | msgid "模型名称,与提供商对应" 175 | msgstr "" 176 | 177 | #: ../../source/detailed_guide/config.md 178 | msgid "`gpt-3.5-turbo`" 179 | msgstr "" 180 | 181 | #: ../../source/detailed_guide/config.md 182 | msgid "`max_retries`" 183 | msgstr "" 184 | 185 | #: ../../source/detailed_guide/config.md 186 | msgid "数字" 187 | msgstr "" 188 | 189 | #: ../../source/detailed_guide/config.md 190 | msgid "最大重试次数,默认 3" 191 | msgstr "" 192 | 193 | #: ../../source/detailed_guide/config.md 194 | msgid "`5`" 195 | msgstr "" 196 | 197 | #: ../../source/detailed_guide/config.md 198 | msgid "`retry_delay`" 199 | msgstr "" 200 | 201 | #: ../../source/detailed_guide/config.md 202 | msgid "浮点数" 203 | msgstr "" 204 | 205 | #: ../../source/detailed_guide/config.md 206 | msgid "重试延迟(秒),默认 1.0" 207 | msgstr "" 208 | 209 | #: ../../source/detailed_guide/config.md 210 | msgid "`1.0`" 211 | msgstr "" 212 | 213 | #: ../../source/detailed_guide/config.md 214 | msgid "`rate_limit_capacity`" 215 | msgstr "" 216 | 217 | #: ../../source/detailed_guide/config.md 218 | msgid "令牌桶容量,默认 10" 219 | msgstr "" 220 | 221 | #: ../../source/detailed_guide/config.md 222 | msgid "`20`" 223 | msgstr "" 224 | 225 | #: ../../source/detailed_guide/config.md 226 | msgid "`rate_limit_refill_rate`" 227 | msgstr "" 228 | 229 | #: ../../source/detailed_guide/config.md 230 | msgid "令牌补充速率(tokens/秒),默认 1.0" 231 | msgstr "" 232 | 233 | #: ../../source/detailed_guide/config.md 234 | msgid "`3.0`" 235 | msgstr "" 236 | 237 | #: ../../source/detailed_guide/config.md:104 238 | msgid "加载和使用" 239 | msgstr "" 240 | 241 | #: ../../source/detailed_guide/config.md:106 242 | msgid "然后你可以使用这个json文件来加载所有的接口,例如:" 243 | msgstr "" 244 | 245 | #: ../../source/detailed_guide/config.md:129 246 | msgid "最佳实践" 247 | msgstr "" 248 | 249 | #: ../../source/detailed_guide/config.md:131 250 | msgid "**多个 API 密钥**: 为了实现负载均衡和高可用性,建议为每个模型配置多个 API 密钥" 251 | msgstr "" 252 | 253 | #: ../../source/detailed_guide/config.md:132 254 | msgid "" 255 | "**不同模型的限流策略**: 根据不同的 API 限制配置不同的 `rate_limit_capacity` 和 " 256 | "`rate_limit_refill_rate`" 257 | msgstr "" 258 | 259 | #: ../../source/detailed_guide/config.md:133 260 | msgid "**环境区分**: 可以为开发环境和生产环境配置不同的 `max_retries` 和 `retry_delay`" 261 | msgstr "" 262 | 263 | -------------------------------------------------------------------------------- /docs/source/langfuse_integration.md: -------------------------------------------------------------------------------- 1 | # Langfuse 集成指南 2 | 3 | SimpleLLMFunc 框架已集成 Langfuse 可观测性平台,支持对 LLM 生成和工具调用进行全面追踪。 4 | 5 | ## 功能特性 6 | 7 | - **LLM 生成追踪**: 自动记录所有 LLM 调用的输入、输出、模型参数和使用统计 8 | - **工具调用观测**: 追踪工具调用的参数、执行结果和性能指标 9 | - **嵌套跨度支持**: 支持复杂的多层调用链追踪 10 | - **流式响应支持**: 兼容流式和非流式 LLM 响应 11 | - **优雅降级**: 在 Langfuse 不可用时自动禁用,不影响核心功能 12 | 13 | ## 安装和配置 14 | 15 | ### 1. 安装 Langfuse 16 | 17 | Langfuse 已包含在框架依赖中: 18 | 19 | ```bash 20 | # 如果使用 poetry 21 | poetry install 22 | 23 | # 如果使用 pip 24 | pip install langfuse 25 | ``` 26 | 27 | ### 2. 获取 Langfuse 凭据 28 | 29 | 1. 访问 [Langfuse](https://langfuse.com) 并注册账户 30 | 2. 创建新项目 31 | 3. 获取项目的 Public Key 和 Secret Key 32 | 33 | ### 3. 配置环境变量 34 | 35 | ```bash 36 | export LANGFUSE_PUBLIC_KEY="your_public_key" 37 | export LANGFUSE_SECRET_KEY="your_secret_key" 38 | export LANGFUSE_HOST="https://cloud.langfuse.com" # 可选 39 | export LANGFUSE_ENABLED="True" # 可选 40 | ``` 41 | 42 | ### 4. 初始化观测系统 43 | 44 | SimpleLLMFunc 会自动从环境变量读取 Langfuse 配置,无需额外初始化。框架内部会在需要时自动连接 Langfuse: 45 | 46 | ```python 47 | # 只需设置环境变量,框架会自动处理 Langfuse 连接 48 | # 不需要手动初始化观测器 49 | 50 | from SimpleLLMFunc import llm_function 51 | 52 | llm = ... # 你的 LLM 接口实例 53 | 54 | @llm_function(llm_interface=llm) 55 | async def my_function(text: str) -> str: 56 | """功能描述""" 57 | pass 58 | 59 | # 所有调用都会自动追踪到 Langfuse(如果已配置) 60 | result = await my_function("test") 61 | ``` 62 | 63 | 如需手动获取 Langfuse 客户端或配置信息,可使用以下方式: 64 | 65 | ```python 66 | from SimpleLLMFunc.observability import langfuse_config, get_langfuse_client 67 | 68 | # 获取配置对象 69 | config = langfuse_config 70 | print(f"Langfuse 已启用: {config.enabled}") 71 | 72 | # 获取 Langfuse 客户端(用于高级场景) 73 | client = get_langfuse_client() 74 | if client: 75 | # 手动追踪其他操作 76 | pass 77 | ``` 78 | 79 | ## 使用示例 80 | 81 | ### 基本 LLM 函数追踪 82 | 83 | ```python 84 | import asyncio 85 | from SimpleLLMFunc import llm_function, OpenAICompatible 86 | 87 | # 配置 LLM 接口 88 | llm = OpenAICompatible.load_from_json_file("provider.json")["openai"]["gpt-3.5-turbo"] 89 | 90 | @llm_function(llm_interface=llm) 91 | async def analyze_text(text: str) -> str: 92 | """分析文本内容并提供摘要""" 93 | pass 94 | 95 | # 使用函数 - 自动追踪到 Langfuse(如果已配置环境变量) 96 | async def main(): 97 | result = await analyze_text("这是一段需要分析的文本...") 98 | print(result) 99 | 100 | asyncio.run(main()) 101 | ``` 102 | 103 | ### 带工具调用的追踪 104 | 105 | ```python 106 | from SimpleLLMFunc import llm_function, tool 107 | 108 | @tool(name="calculate", description="执行数学计算") 109 | async def calculate(expression: str) -> dict: 110 | """执行数学表达式计算""" 111 | result = eval(expression) # 实际使用中应使用更安全的方法 112 | return {"expression": expression, "result": result} 113 | 114 | @llm_function( 115 | llm_interface=llm, 116 | toolkit=[calculate], 117 | max_tool_calls=3 118 | ) 119 | async def math_assistant(question: str) -> str: 120 | """数学助手,可以回答数学问题并进行计算""" 121 | pass 122 | 123 | # 使用 - 工具调用也会被自动追踪 124 | result = await math_assistant("计算 15 * 8 + 32 的结果") 125 | ``` 126 | 127 | ### 聊天对话追踪 128 | 129 | ```python 130 | from SimpleLLMFunc import llm_chat 131 | 132 | @llm_chat( 133 | llm_interface=llm, 134 | toolkit=[calculate], 135 | max_tool_calls=2 136 | ) 137 | async def chat_bot(message: str, history: list = None): 138 | """智能聊天机器人""" 139 | pass 140 | 141 | # 使用 - 每轮对话都会被追踪 142 | history = [] 143 | async for response, updated_history in chat_bot("你好,请帮我计算一些数学题", history): 144 | if response.strip(): 145 | print(response) 146 | history = updated_history 147 | ``` 148 | 149 | ## 追踪数据结构 150 | 151 | ### Generation 追踪 152 | 153 | 每个 LLM 调用会创建一个 Generation 观测,包含: 154 | 155 | - **输入**: 发送给 LLM 的消息列表 156 | - **输出**: LLM 的响应内容和工具调用 157 | - **模型信息**: 模型名称和参数 158 | - **使用统计**: Token 使用量和成本信息 159 | - **元数据**: 流式模式、可用工具数量等 160 | 161 | ### Tool 追踪 162 | 163 | 每个工具调用会创建一个 Tool 观测,包含: 164 | 165 | - **输入**: 工具调用的参数 166 | - **输出**: 工具执行的结果 167 | - **元数据**: 工具调用 ID、执行时间等 168 | 169 | ### 层级结构 170 | 171 | ``` 172 | Function Call (Span) 173 | ├── Initial Generation 174 | │ ├── Input: Messages 175 | │ ├── Output: Response + Tool Calls 176 | │ └── Usage: Token counts 177 | ├── Tool Call 1 (Tool) 178 | │ ├── Input: Parameters 179 | │ └── Output: Result 180 | ├── Tool Call 2 (Tool) 181 | │ ├── Input: Parameters 182 | │ └── Output: Result 183 | └── Follow-up Generation 184 | ├── Input: Updated Messages 185 | ├── Output: Final Response 186 | └── Usage: Token counts 187 | ``` 188 | 189 | ## 配置选项 190 | 191 | ### 环境变量 192 | 193 | | 变量名 | 描述 | 默认值 | 必需 | 194 | |--------|------|--------|------| 195 | | `LANGFUSE_PUBLIC_KEY` | Langfuse 公钥 | - | 是 | 196 | | `LANGFUSE_SECRET_KEY` | Langfuse 私钥 | - | 是 | 197 | | `LANGFUSE_HOST` | Langfuse 服务器地址 | `https://cloud.langfuse.com` | 否 | 198 | | `LANGFUSE_ENABLED` | 是否启用观测 | `true` | 否 | 199 | 200 | ### 高级配置 201 | 202 | 如需自定义 Langfuse 配置(例如在生产环境动态设置密钥),可在启动应用前修改环境变量: 203 | 204 | ```python 205 | import os 206 | 207 | # 在导入 SimpleLLMFunc 之前设置环境变量 208 | os.environ["LANGFUSE_PUBLIC_KEY"] = "your_public_key" 209 | os.environ["LANGFUSE_SECRET_KEY"] = "your_secret_key" 210 | os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" 211 | os.environ["LANGFUSE_ENABLED"] = "true" 212 | 213 | # 然后导入并使用 SimpleLLMFunc 214 | from SimpleLLMFunc import llm_function 215 | 216 | # ... 后续代码 ... 217 | ``` 218 | 219 | ## 最佳实践 220 | 221 | ### 1. 环境分离 222 | 223 | ```bash 224 | # 开发环境 225 | export LANGFUSE_ENABLED="false" 226 | 227 | # 生产环境 228 | export LANGFUSE_ENABLED="true" 229 | export LANGFUSE_PUBLIC_KEY="prod_public_key" 230 | export LANGFUSE_SECRET_KEY="prod_secret_key" 231 | ``` 232 | 233 | ### 2. 错误处理 234 | 235 | 框架会自动处理 Langfuse 相关错误,但建议监控日志: 236 | 237 | ```python 238 | import logging 239 | 240 | # 启用 SimpleLLMFunc 日志 241 | logging.getLogger("SimpleLLMFunc").setLevel(logging.INFO) 242 | ``` 243 | 244 | ### 3. 性能考虑 245 | 246 | - Langfuse 调用是异步的,不会阻塞主要业务逻辑 247 | - 在高并发场景下,考虑设置合适的 Langfuse 客户端配置 248 | - 可以通过 `LANGFUSE_ENABLED=false` 临时禁用观测 249 | 250 | ### 4. 数据隐私 251 | 252 | - 确保敏感数据在发送到 Langfuse 前已脱敏 253 | - 考虑使用自托管的 Langfuse 实例处理敏感数据 254 | 255 | ## 故障排除 256 | 257 | ### 常见问题 258 | 259 | 1. **观测器未启用** 260 | ``` 261 | 警告: Langfuse 观测功能未启用 262 | ``` 263 | - 检查环境变量是否正确设置 264 | - 确认 Langfuse 包已安装 265 | 266 | 2. **连接失败** 267 | ``` 268 | Langfuse 初始化失败: Connection error 269 | ``` 270 | - 检查网络连接 271 | - 验证 API 密钥是否正确 272 | - 确认 LANGFUSE_HOST 设置正确 273 | 274 | 3. **数据未显示** 275 | - 检查 Langfuse 仪表板的项目设置 276 | - 确认使用的是正确的项目密钥 277 | - 等待数据同步(通常几秒钟) 278 | 279 | ### 调试模式 280 | 281 | ```python 282 | import logging 283 | 284 | # 启用详细日志 285 | logging.getLogger("SimpleLLMFunc").setLevel(logging.DEBUG) 286 | 287 | # 查看 Langfuse 相关日志 288 | logging.getLogger("langfuse").setLevel(logging.DEBUG) 289 | ``` 290 | 291 | ## 示例项目 292 | 293 | 查看 `examples/langfuse_integration_example.py` 获取完整的使用示例。 294 | 295 | ## 相关链接 296 | 297 | - [Langfuse 官方文档](https://langfuse.com/docs) 298 | -------------------------------------------------------------------------------- /tests/test_llm_decorator_steps/test_function/test_prompt.py: -------------------------------------------------------------------------------- 1 | """Tests for llm_decorator.steps.function.prompt module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from unittest.mock import patch 6 | 7 | import pytest 8 | 9 | from SimpleLLMFunc.llm_decorator.steps.function.prompt import ( 10 | build_initial_prompts, 11 | build_parameter_type_descriptions, 12 | build_return_type_description, 13 | build_text_messages, 14 | ) 15 | 16 | 17 | class TestBuildParameterTypeDescriptions: 18 | """Tests for build_parameter_type_descriptions function.""" 19 | 20 | def test_build_descriptions(self) -> None: 21 | """Test building parameter type descriptions.""" 22 | param_type_hints = {"param1": str, "param2": int} 23 | result = build_parameter_type_descriptions(param_type_hints) 24 | assert len(result) == 2 25 | assert any("param1" in desc for desc in result) 26 | assert any("param2" in desc for desc in result) 27 | 28 | def test_build_empty(self) -> None: 29 | """Test building descriptions from empty hints.""" 30 | result = build_parameter_type_descriptions({}) 31 | assert result == [] 32 | 33 | 34 | class TestBuildReturnTypeDescription: 35 | """Tests for build_return_type_description function.""" 36 | 37 | def test_build_str_type(self) -> None: 38 | """Test building description for str type.""" 39 | result = build_return_type_description(str) 40 | assert "str" in result.lower() or "string" in result.lower() 41 | 42 | def test_build_none_type(self) -> None: 43 | """Test building description for None type.""" 44 | result = build_return_type_description(None) 45 | assert "未知" in result or "unknown" in result.lower() 46 | 47 | def test_build_pydantic_model_type(self, sample_pydantic_model) -> None: 48 | """Test building description for Pydantic model type.""" 49 | result = build_return_type_description(sample_pydantic_model) 50 | assert "XML Schema" in result or "xml" in result.lower() 51 | assert "Example XML" in result or "example" in result.lower() 52 | assert "SampleModel" in result 53 | 54 | def test_build_list_type(self) -> None: 55 | """Test building description for List type.""" 56 | from typing import List 57 | result = build_return_type_description(List[str]) 58 | assert "XML Schema" in result or "xml" in result.lower() 59 | assert "Example XML" in result or "example" in result.lower() 60 | 61 | def test_build_dict_type(self) -> None: 62 | """Test building description for Dict type.""" 63 | from typing import Dict 64 | result = build_return_type_description(Dict[str, int]) 65 | assert "XML Schema" in result or "xml" in result.lower() 66 | assert "Example XML" in result or "example" in result.lower() 67 | 68 | def test_build_primitive_types(self) -> None: 69 | """Test building description for primitive types.""" 70 | result_int = build_return_type_description(int) 71 | assert "int" in result_int.lower() or "integer" in result_int.lower() 72 | 73 | result_float = build_return_type_description(float) 74 | assert "float" in result_float.lower() or "number" in result_float.lower() 75 | 76 | result_bool = build_return_type_description(bool) 77 | assert "bool" in result_bool.lower() or "boolean" in result_bool.lower() 78 | 79 | 80 | class TestBuildTextMessages: 81 | """Tests for build_text_messages function.""" 82 | 83 | def test_build_messages(self) -> None: 84 | """Test building text messages.""" 85 | result = build_text_messages( 86 | processed_docstring="Test function", 87 | param_type_descriptions=[" - param1: str"], 88 | return_type_description="str", 89 | arguments={"param1": "value1"}, 90 | system_template="Function: {function_description}", 91 | user_template="Params: {parameters}", 92 | ) 93 | assert len(result) == 2 94 | assert result[0]["role"] == "system" 95 | assert result[1]["role"] == "user" 96 | 97 | 98 | class TestBuildInitialPrompts: 99 | """Tests for build_initial_prompts function.""" 100 | 101 | @patch("SimpleLLMFunc.llm_decorator.steps.function.prompt.has_multimodal_content") 102 | def test_build_text_prompts( 103 | self, mock_has_multimodal: Any 104 | ) -> None: 105 | """Test building text prompts.""" 106 | mock_has_multimodal.return_value = False 107 | from SimpleLLMFunc.llm_decorator.steps.common.types import FunctionSignature 108 | import inspect 109 | 110 | def test_func(param1: str) -> str: 111 | """Test function.""" 112 | return "result" 113 | 114 | sig = inspect.signature(test_func) 115 | bound = sig.bind("test") 116 | bound.apply_defaults() 117 | 118 | signature = FunctionSignature( 119 | func_name="test_func", 120 | trace_id="trace_123", 121 | bound_args=bound, 122 | signature=sig, 123 | type_hints={"param1": str, "return": str}, 124 | return_type=str, 125 | docstring="Test function.", 126 | ) 127 | 128 | result = build_initial_prompts(signature) 129 | assert len(result) >= 2 130 | assert result[0]["role"] == "system" 131 | assert result[1]["role"] == "user" 132 | 133 | @patch("SimpleLLMFunc.llm_decorator.steps.function.prompt.has_multimodal_content") 134 | @patch("SimpleLLMFunc.llm_decorator.steps.function.prompt.build_multimodal_content") 135 | def test_build_multimodal_prompts( 136 | self, mock_build_multimodal: Any, mock_has_multimodal: Any 137 | ) -> None: 138 | """Test building multimodal prompts.""" 139 | mock_has_multimodal.return_value = True 140 | mock_build_multimodal.return_value = [ 141 | {"type": "text", "text": "test"} 142 | ] 143 | 144 | from SimpleLLMFunc.llm_decorator.steps.common.types import FunctionSignature 145 | import inspect 146 | 147 | def test_func(param1: str) -> str: 148 | """Test function.""" 149 | return "result" 150 | 151 | sig = inspect.signature(test_func) 152 | bound = sig.bind("test") 153 | bound.apply_defaults() 154 | 155 | signature = FunctionSignature( 156 | func_name="test_func", 157 | trace_id="trace_123", 158 | bound_args=bound, 159 | signature=sig, 160 | type_hints={"param1": str, "return": str}, 161 | return_type=str, 162 | docstring="Test function.", 163 | ) 164 | 165 | result = build_initial_prompts(signature) 166 | assert len(result) >= 2 167 | mock_build_multimodal.assert_called() 168 | 169 | --------------------------------------------------------------------------------