├── .gitignore
├── Dockerfile
├── LICENSE
├── Makefile
├── README.EN.md
├── README.md
├── build.sh
├── build_docker.sh
├── config.json
├── docker-compose.yml
├── docs
├── CHANGELOG.md
├── Cloudflare_Workers_AI申请使用流程.md
├── Gemini接入指南.md
├── asset
│ ├── cloudflare_work_ai.jpg
│ ├── coze2.jpg
│ ├── coze3.jpg
│ ├── coze4.jpg
│ ├── coze5.jpg
│ ├── coze6.jpg
│ ├── cozecn1.jpg
│ ├── deepseek1.jpg
│ ├── deepseek_price.jpg
│ ├── groqimg.png
│ ├── image-20240530233647666.png
│ ├── image-20240530233748804.png
│ ├── image-20240530233817058.png
│ ├── image-20240530233858173.png
│ ├── image-20240530233928186.png
│ ├── image-20240530234334239.png
│ ├── image-20240530235125913.png
│ ├── image-20240530235235481.png
│ ├── image-20240530235614503.png
│ ├── image-20240530235715433.png
│ ├── image-20240531000131857.png
│ ├── image-20240531000213111.png
│ ├── image-20240531000310097.png
│ ├── image-20240531000409511.png
│ ├── image-20240531000547449.png
│ ├── llamafamily.jpg
│ ├── qq_team.jpg
│ ├── zhipu1.jpg
│ ├── zijie_hsfz.jpg
│ └── 通义千问DashScope申请使用流程.md
├── build.sh_and_build_docker.sh_usage.md
├── build.sh编译说明.md
├── build_docker.sh使用说明.md
├── config.json详细说明.md
├── coze.cn申请API使用流程.md
├── deepseek模型申请使用流程.md
├── groq接入指南.md
├── llama_family接入指南.md
├── ollama接入指南.md
├── startup
│ ├── nohup_startup.md
│ └── systemd_startup.md
├── 兼容OpenAI模型协议接入指南.md
├── 在沉浸式翻译中使用simple-one-api.md
├── 智谱glm模型申请使用流程.md
├── 火山方舟大模型接入指南.md
├── 百度千帆speed和lite模型申请流程.md
├── 腾讯混元hunyuan-lite模型申请流程.md
├── 讯飞星火spark-lite模型申请流程.md
├── 通义千问DashScope申请使用流程.md
└── 零一万物接入指南.md
├── go.mod
├── go.sum
├── install_simple_one_api_service.sh
├── main.go
├── nohup_manage_simple_one_api.sh
├── pkg
├── adapter
│ ├── aliyun-dashscope-adapter
│ │ ├── dashscope_btype_openai.go
│ │ └── dashscope_common_openai.go
│ ├── azure_openai.go
│ ├── baidu_agentbuilder_adapter
│ │ └── agentbuilder_openai.go
│ ├── claude_openai.go
│ ├── claude_stream_resp_openai.go
│ ├── cozecn_openai.go
│ ├── cozecn_v3_openai.go
│ ├── dify_openai.go
│ ├── gemini_openai.go
│ ├── gemini_request_openai.go
│ ├── hunyuan_openai.go
│ ├── huoshanbot_openai.go
│ ├── minimax_openai.go
│ ├── ollama_openai.go
│ ├── openai_openai.go
│ ├── qianfan_openai.go
│ └── xinghuo_openai.go
├── apis
│ ├── models_handler.go
│ └── text2speech
│ │ └── text2speech_handler.go
├── config
│ ├── conf_def.go
│ ├── config.go
│ ├── config_json_check.go
│ ├── config_keyname.go
│ ├── default_config.go
│ ├── lb_strategy.go
│ └── proxy_strategy.go
├── embedding
│ ├── baiduqianfan
│ │ └── qianfan_embedding.go
│ ├── embeddings_handler.go
│ └── oai
│ │ ├── oai_embedding.go
│ │ └── oai_embedding_message.go
├── handler
│ ├── openai_agentbuilder_handler.go
│ ├── openai_aliyun_bailian_handler.go
│ ├── openai_aliyun_dashscope_handler.go
│ ├── openai_azure_handler.go
│ ├── openai_claude_handler.go
│ ├── openai_cozecn_handler.go
│ ├── openai_dify_handler.go
│ ├── openai_gemini_handler.go
│ ├── openai_groq_handler.go
│ ├── openai_handler.go
│ ├── openai_hunyuan_handler.go
│ ├── openai_huoshan_bot_handler.go
│ ├── openai_huoshan_handler.go
│ ├── openai_minimax_handler.go
│ ├── openai_ollama_handler.go
│ ├── openai_openai_handler.go
│ ├── openai_qianfan_handler.go
│ ├── openai_vertexai_handler.go
│ ├── openai_xinghuo_handler.go
│ └── openai_zhipu_handler.go
├── initializer
│ └── initializer.go
├── llm
│ ├── aliyun-dashscope
│ │ ├── common_btype
│ │ │ └── ds_common_btype_msg.go
│ │ └── commsg
│ │ │ ├── ds_com_request
│ │ │ ├── dashscope_prompt_request.go
│ │ │ └── ds_com_request.go
│ │ │ └── ds_com_resp
│ │ │ ├── ds_com_resp.go
│ │ │ └── ds_com_stream_resp.go
│ ├── baidu-qianfan
│ │ ├── qianfan.go
│ │ ├── qianfan_model2address.go
│ │ ├── qianfan_request.go
│ │ └── qianfan_response.go
│ ├── claude
│ │ ├── claude_request.go
│ │ ├── claude_response.go
│ │ └── claude_stream_response.go
│ ├── devplatform
│ │ ├── baidu_agentbuilder
│ │ │ ├── agentbuilder.go
│ │ │ ├── baidu_agentbuilder_conversation.go
│ │ │ └── baidu_agentbuilder_getanswer.go
│ │ ├── cozecn
│ │ │ ├── cozecn_request.go
│ │ │ └── cozecn_response.go
│ │ ├── cozecn_v3
│ │ │ ├── common
│ │ │ │ ├── cozecn_v3_http_request.go
│ │ │ │ └── request.go
│ │ │ ├── nonestream
│ │ │ │ ├── chat
│ │ │ │ │ ├── chat.go
│ │ │ │ │ └── response.go
│ │ │ │ ├── chat_message_list
│ │ │ │ │ ├── chat_message_list.go
│ │ │ │ │ └── response.go
│ │ │ │ ├── chat_retrieve
│ │ │ │ │ ├── chat_retrieve.go
│ │ │ │ │ ├── defines.go
│ │ │ │ │ └── response.go
│ │ │ │ └── chat_with_none_stream.go
│ │ │ └── streammode
│ │ │ │ ├── chat.go
│ │ │ │ └── response.go
│ │ └── dify
│ │ │ ├── chat_completion_response
│ │ │ └── chat_completion_response.go
│ │ │ ├── chat_message_request
│ │ │ ├── chat_message_request.go
│ │ │ └── chat_messages.go
│ │ │ └── chunk_chat_completion_response
│ │ │ └── chunk_chat_completion_response.go
│ ├── google-gemini
│ │ ├── gemini_request.go
│ │ └── gemini_response.go
│ ├── minimax
│ │ ├── minimax_request.go
│ │ └── minimax_response.go
│ ├── ollama
│ │ ├── ollma_request.go
│ │ └── olloma_response.go
│ ├── tecent-hunyuan
│ │ └── hunyuan_response.go
│ ├── xunfei-xinghuo
│ │ └── xinghuo.go
│ └── zijie-huoshan
│ │ ├── huoshan_request.go
│ │ └── huoshan_response.go
├── mycomdef
│ └── com_keyname.go
├── mycommon
│ ├── common_credentials.go
│ ├── common_err_resp.go
│ ├── common_model_params.go
│ ├── common_modeldetails.go
│ ├── common_msg.go
│ └── oai_message_utils.go
├── mylimiter
│ └── limiter.go
├── mylog
│ └── logger.go
├── mywebui
│ └── multi_model_call.go
├── openai
│ ├── openai_request.go
│ └── openai_response.go
├── simple_client
│ ├── simple_client.go
│ └── simple_stream_reader.go
├── translation
│ ├── llm_translate.go
│ ├── translate_handler_v1.go
│ └── translate_handler_v2.go
└── utils
│ ├── custom_transport_utils.go
│ ├── file_utils.go
│ ├── gin_utils.go
│ ├── http_request.go
│ ├── map_utils.go
│ ├── math_utils.go
│ ├── pointer_utils.go
│ ├── simple_custom_transport_utils.go
│ └── time_utils.go
├── quick_build.bat
├── quick_build.sh
├── samples
├── config.json
├── config_aliyun.json
├── config_azure_openai.json
├── config_cloudflare.json
├── config_cozecn.json
├── config_cozecom.json
├── config_deepseek.json
├── config_gemini.json
├── config_groq.json
├── config_hunyuan.json
├── config_huoshan.json
├── config_llama_familly.json
├── config_ollama.json
├── config_qianfan.json
├── config_siliconflow.cn.json
├── config_xinghuo.json
└── config_zhipu.json
├── static
├── index.html
├── jquery
│ ├── jquery.multiselect.css
│ └── jquery.multiselect.js
└── trans.html
└── test
└── simple_client_test
└── main.go
/.gitignore:
--------------------------------------------------------------------------------
1 | # 编译输出
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 | *.test
8 | *.out
9 | simple-one-api/simple-one-api
10 | simple-one-api/build
11 | simple-one-api/myconfigs
12 | simple-one-api/docker_build_push.sh
13 |
14 | test/simple_client_test/simple_client_test
15 |
16 | ./simple-one-api
17 | ./build
18 | ./build/*
19 | ./myconfigs
20 | ./myconfigs/*
21 | ./docker_build_push.sh
22 | README.CN.EN.md
23 | build/
24 | simple-one-api
25 | docker_build_push_no_latest.sh
26 | image.png
27 |
28 | # 日志文件
29 | *.log
30 |
31 | # 临时文件
32 | *.tmp
33 | *.temp
34 | *.bak
35 | *.swp
36 |
37 |
38 | simple-one-api/README.CN.EN.md
39 | README.CN.EN.md
40 | # Go编译缓存目录
41 |
42 | ./simple-one-api
43 | /go-build/
44 | /.gocache/
45 | cmd/simple-one-api/build
46 | myconfigs
47 | myconfigs/config.json
48 | myconfigs/config_cozecn.json
49 | myconfigs/config_cloudflare.json
50 | cmd/simple-one-api/config_*.json
51 | cmd/simple-one-api/simple-one-api
52 | cmd/simple-one-api/build.zip
53 | docker_build_push.sh
54 |
55 | # 编译器生成的二进制文件
56 | main
57 |
58 | # IDE和编辑器配置文件
59 | .vscode/
60 | .idea/
61 | *.sublime-workspace
62 | *.sublime-project
63 |
64 | # 操作系统生成的文件
65 | .DS_Store
66 | Thumbs.db
67 |
68 | # 项目依赖
69 | vendor/
70 |
71 | # 环境配置文件
72 | .env
73 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # 使用一个轻量级的基础镜像
2 | FROM alpine:latest
3 |
4 | # 设置工作目录
5 | WORKDIR /app
6 |
7 | # 通过构建参数选择架构
8 | ARG ARCH=amd64
9 | COPY build/linux-${ARCH}/simple-one-api /app/simple-one-api
10 |
11 | # 复制当前目录的static目录内的内容到镜像中
12 | COPY static /app/static
13 |
14 | # 暴露应用运行的端口(假设为9090)
15 | EXPOSE 9090
16 |
17 | # 运行可执行文件
18 | CMD ["./simple-one-api"]
19 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 fruitbars
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 默认禁用 UPX 压缩
4 | use_upx=0
5 | # 设置默认构建选项为 release
6 | build_option="release"
7 | # 默认不删除构建目录
8 | clean_up=0
9 |
10 | # 获取命令行参数
11 | while [[ "$#" -gt 0 ]]; do
12 | case "$1" in
13 | --enable-upx)
14 | use_upx=1
15 | shift
16 | ;;
17 | --show-platforms)
18 | echo "Available platforms:"
19 | echo " - darwin-amd64"
20 | echo " - darwin-arm64"
21 | echo " - windows-amd64"
22 | echo " - windows-arm64"
23 | echo " - linux-amd64"
24 | echo " - linux-arm64"
25 | echo " - freebsd-amd64"
26 | echo " - freebsd-arm64"
27 | exit 0
28 | ;;
29 | --development)
30 | build_option="dev"
31 | shift
32 | ;;
33 | --release)
34 | build_option="release"
35 | shift
36 | ;;
37 | --clean-up)
38 | clean_up=1
39 | shift
40 | ;;
41 | *)
42 | echo "Invalid option: $1"
43 | echo "Usage: $0 [--enable-upx] [--show-platforms] [--development | --release] [--clean-up]"
44 | exit 1
45 | ;;
46 | esac
47 | done
48 |
49 | # 根据指定的构建选项执行相应操作
50 | case $build_option in
51 | dev)
52 | echo "Building (Development)..."
53 | make dev use_upx=$use_upx clean_up=$clean_up
54 | ;;
55 | release)
56 | echo "Building and Releasing..."
57 | make release use_upx=$use_upx clean_up=$clean_up
58 | ;;
59 | *)
60 | echo "No build option specified or invalid option. Exiting."
61 | echo "Use --development for dev build or --release for release build."
62 | exit 1
63 | ;;
64 | esac
65 |
66 | echo "Build script finished."
--------------------------------------------------------------------------------
/build_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 检查是否传入版本号参数
4 | if [ -z "$1" ]; then
5 | echo "请传入版本号作为参数,例如:./build_and_push.sh v0.8.2"
6 | exit 1
7 | fi
8 |
9 | # 定义变量
10 | IMAGE_NAME="fruitbars/simple-one-api"
11 | TAG=$1
12 |
13 | # 构建镜像
14 | docker build -t $IMAGE_NAME:$TAG .
15 |
16 | # 打印完成信息
17 | echo "Docker image $IMAGE_NAME:$TAG built"
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": false,
3 | "load_balancing": "random",
4 | "services": {
5 | "xinghuo": [
6 | {
7 | "models": ["spark-lite"],
8 | "enabled": true,
9 | "credentials": {
10 | "appid": "xxx",
11 | "api_key": "xxx",
12 | "api_secret": "xxx"
13 | }
14 | }
15 | ],
16 | "qianfan": [
17 | {
18 | "models": ["yi_34b_chat","ERNIE-Speed-8K","ERNIE-Speed-128K","ERNIE-Lite-8K","ERNIE-Lite-8K-0922","ERNIE-Tiny-8K"],
19 | "enabled": true,
20 | "credentials": {
21 | "api_key": "xxx",
22 | "secret_key": "xxx"
23 | }
24 | }
25 | ],
26 | "hunyuan": [
27 | {
28 | "models": ["hunyuan-lite"],
29 | "enabled": true,
30 | "credentials": {
31 | "secret_id": "xxx",
32 | "secret_key": "xxx"
33 | }
34 | }
35 | ],
36 | "openai": [
37 | {
38 | "models": ["deepseek-chat"],
39 | "enabled": true,
40 | "credentials": {
41 | "api_key": "xxx"
42 | },
43 | "server_url":"https://api.deepseek.com/v1"
44 | },
45 | {
46 | "models": ["@cf/meta/llama-2-7b-chat-int8"],
47 | "enabled": true,
48 | "credentials": {
49 | "api_key": "xxx"
50 | },
51 | "server_url": "https://api.cloudflare.com/client/v4/accounts/xxx/ai/v1/chat/completions"
52 | },
53 | {
54 | "models": ["glm-4","glm-3-turbo"],
55 | "enabled": true,
56 | "credentials": {
57 | "api_key": "xxx"
58 | },
59 | "server_url":"https://open.bigmodel.cn/api/paas/v4/chat/completions"
60 | }
61 | ],
62 | "minimax": [
63 | {
64 | "models": ["abab6-chat"],
65 | "enabled": true,
66 | "credentials": {
67 | "group_id": "1782658868262748467",
68 | "api_key": "xxx"
69 | },
70 | "server_url":"https://api.minimax.chat/v1/text/chatcompletion_pro"
71 | }
72 | ]
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | simple-one-api:
5 | image: fruitbars/simple-one-api
6 | container_name: simple-one-api
7 | ports:
8 | - "9090:9090"
9 | volumes:
10 | - /path/to/config.json:/app/config.json
11 | restart: unless-stopped
--------------------------------------------------------------------------------
/docs/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # simple-one-api更新日志
2 |
3 | ## v0.3 - 2024-06-04
4 | 1. 程序调整默认gin为release模型
5 | 2. 支持了星火的function call
6 | 3. 修复了abab6-chat的默认maxtokens太小的问题(自动调整为最大)
7 | 4. 千帆的模型maxtokens超出时,自动调整为区间范围内
--------------------------------------------------------------------------------
/docs/Cloudflare_Workers_AI申请使用流程.md:
--------------------------------------------------------------------------------
1 | # Cloudflare_Workers_AI申请使用流程
2 |
3 | ## Workers AI免费信息
4 |
5 | cloudflare_Workers_AI目前免费可以每天使用1万次,一个月可以30万次;测试版本本的模型无限制。
6 |
7 | 参考文档https://developers.cloudflare.com/workers-ai/platform/pricing
8 |
9 | > Our free allocation allows anyone to use a total of 10,000 Neurons per day at no charge on our non-beta models. You can still enjoy unlimited usage on the beta models in the catalog until they graduate out of beta.
10 | > 我们的免费分配允许任何人每天在我们的非测试版模型上免费使用总计 10,000 个 Neurons。您仍然可以无限制地使用目录中的测试版型号,直到它们退出测试版为止。
11 |
12 |
13 |
14 | ## Workers AI支持OpenAI标准协议?
15 |
16 | 查看从文档:[https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/](https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/)
17 | 后台地址:[https://dash.cloudflare.com/](https://dash.cloudflare.com/)
18 |
19 | Workers AI 还支持 `/v1/chat/completions`,也就是说Workers AI 兼容OpenAI的接口,因此可以直接在simple-one-api中使用。
20 |
21 | > OpenAI compatible endpoints
22 | > OpenAI 兼容端点
23 | >
24 | > Workers AI also supports OpenAI compatible API endpoints for `/v1/chat/completions` and `/v1/embeddings`. For more details, refer to [Configurations](https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/).
25 | > Workers AI 还支持 `/v1/chat/completions` 和 `/v1/embeddings` 的 OpenAI 兼容 API 端点。有关更多详细信息,请参阅配置。
26 |
27 |
28 |
29 | 而这篇文档详细描述和如何采用OpenAI兼容的接口进行调用:https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/
30 |
31 | ```shell
32 | curl --request POST \
33 | --url https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/v1/chat/completions \
34 | --header 'Authorization: Bearer {api_token}' \
35 | --header 'Content-Type: application/json' \
36 | --data '
37 | {
38 | "model": "@cf/meta/llama-3-8b-instruct",
39 | "messages": [
40 | {
41 | "role": "user",
42 | "content": "how to build a wooden spoon in 3 short steps? give as short as answer as possible"
43 | }
44 | ]
45 | }
46 | '
47 | ```
48 |
49 | ## 在simple-one-api中配置
50 | 查看自己的api-tokens:[https://dash.cloudflare.com/profile/api-tokens](https://dash.cloudflare.com/profile/api-tokens)
51 |
52 | 从上面的示例参考,因此在可以按照下面的方式进行配置:
53 |
54 | ```json
55 | {
56 | "server_port":":9090",
57 | "load_balancing": "random",
58 | "services": {
59 | "openai": [
60 | {
61 | "models": ["@cf/meta/llama-3-8b-instruct"],
62 | "enabled": true,
63 | "credentials": {
64 | "api_key": "xxx"
65 | },
66 | "server_url": "https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/v1"
67 | }
68 | ]
69 | }
70 | }
71 | ```
72 |
73 | 这里请替换自己的`api_key`和`server_url`,可以在cloudflare的后台查看,`Create a Workers AI API `Token对应的就是`api_key`,`Account ID`需要填入到server_url中。
74 | 
--------------------------------------------------------------------------------
/docs/Gemini接入指南.md:
--------------------------------------------------------------------------------
1 | # Gemini接入指南
2 |
3 | 文档地址:https://ai.google.dev/gemini-api/docs/api-overview
4 |
5 | 后台地址:https://aistudio.google.com/app/apikey
6 |
7 | ## 在simple-one-api中使用
8 |
9 | 新建一个`gemini`,填写上相关配置即可。注意Gemini免费版限制:15RPM(每分钟请求数);100万 TPM(每分钟令牌);1500 RPD(每天请求数)
10 | 因此可以在`simple-one-api`中设置`limit`设置。
11 |
12 | ```json
13 | {
14 | "server_port": ":9099",
15 | "log_level": "prodj",
16 | "load_balancing": "random",
17 | "services": {
18 | "gemini": [
19 | {
20 | "models": ["gemini-1.5-flash"],
21 | "enabled": true,
22 | "credentials": {
23 | "api_key": "xxx"
24 | },
25 | "limit": {
26 | "rpm": 15,
27 | "timeout":120
28 | }
29 | }
30 | ]
31 | }
32 | }
33 |
34 | ```
--------------------------------------------------------------------------------
/docs/asset/cloudflare_work_ai.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/cloudflare_work_ai.jpg
--------------------------------------------------------------------------------
/docs/asset/coze2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/coze2.jpg
--------------------------------------------------------------------------------
/docs/asset/coze3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/coze3.jpg
--------------------------------------------------------------------------------
/docs/asset/coze4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/coze4.jpg
--------------------------------------------------------------------------------
/docs/asset/coze5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/coze5.jpg
--------------------------------------------------------------------------------
/docs/asset/coze6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/coze6.jpg
--------------------------------------------------------------------------------
/docs/asset/cozecn1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/cozecn1.jpg
--------------------------------------------------------------------------------
/docs/asset/deepseek1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/deepseek1.jpg
--------------------------------------------------------------------------------
/docs/asset/deepseek_price.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/deepseek_price.jpg
--------------------------------------------------------------------------------
/docs/asset/groqimg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/groqimg.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530233647666.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530233647666.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530233748804.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530233748804.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530233817058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530233817058.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530233858173.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530233858173.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530233928186.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530233928186.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530234334239.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530234334239.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530235125913.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530235125913.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530235235481.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530235235481.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530235614503.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530235614503.png
--------------------------------------------------------------------------------
/docs/asset/image-20240530235715433.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240530235715433.png
--------------------------------------------------------------------------------
/docs/asset/image-20240531000131857.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240531000131857.png
--------------------------------------------------------------------------------
/docs/asset/image-20240531000213111.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240531000213111.png
--------------------------------------------------------------------------------
/docs/asset/image-20240531000310097.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240531000310097.png
--------------------------------------------------------------------------------
/docs/asset/image-20240531000409511.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240531000409511.png
--------------------------------------------------------------------------------
/docs/asset/image-20240531000547449.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/image-20240531000547449.png
--------------------------------------------------------------------------------
/docs/asset/llamafamily.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/llamafamily.jpg
--------------------------------------------------------------------------------
/docs/asset/qq_team.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/qq_team.jpg
--------------------------------------------------------------------------------
/docs/asset/zhipu1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/zhipu1.jpg
--------------------------------------------------------------------------------
/docs/asset/zijie_hsfz.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/zijie_hsfz.jpg
--------------------------------------------------------------------------------
/docs/asset/通义千问DashScope申请使用流程.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fruitbars/simple-one-api/a370b4361f0df2e4a0ab375bec3fc28f0bac6d22/docs/asset/通义千问DashScope申请使用流程.md
--------------------------------------------------------------------------------
/docs/build.sh_and_build_docker.sh_usage.md:
--------------------------------------------------------------------------------
1 | # README
2 |
3 | ## build.sh使用说明
4 |
5 | ### 默认发布构建
6 | 运行以下命令将默认进行发布构建(不启用 UPX 压缩且不删除构建目录):
7 |
8 | ```bash
9 | ./build.sh
10 | ```
11 |
12 | ### 启用 UPX 压缩的发布构建,并删除构建目录
13 | 如果你想要启用 UPX 压缩并进行发布构建,同时在压缩后删除构建目录,可以运行:
14 |
15 | ```bash
16 | ./build.sh --enable-upx --clean-up
17 | ```
18 |
19 | ### 进行开发构建
20 | 运行以下命令进行开发构建(不启用 UPX 压缩且不删除构建目录):
21 |
22 | ```bash
23 | ./build.sh --development
24 | ```
25 |
26 | ### 显示支持的平台
27 | 如果需要查看支持的平台列表,可以使用:
28 |
29 | ```bash
30 | ./build.sh --show-platforms
31 | ```
32 |
33 |
34 | ## build_docker.sh使用说明
35 |
36 | 1. **赋予执行权限**:确保脚本具有执行权限。
37 |
38 | ```sh
39 | chmod +x build_docker.sh
40 | ```
41 |
42 | 2. **运行脚本**:运行脚本并传递版本号参数(例如 `v0.5`)。
43 |
44 | ```sh
45 | ./build_docker.sh v0.5
46 | ```
--------------------------------------------------------------------------------
/docs/build.sh编译说明.md:
--------------------------------------------------------------------------------
1 | # build.sh使用说明
2 |
3 | ## 默认发布构建
4 | 运行以下命令将默认进行发布构建(不启用 UPX 压缩且不删除构建目录):
5 |
6 | ```bash
7 | ./build.sh
8 | ```
9 |
10 | ## 启用 UPX 压缩的发布构建,并删除构建目录
11 | 如果你想要启用 UPX 压缩并进行发布构建,同时在压缩后删除构建目录,可以运行:
12 |
13 | ```bash
14 | ./build.sh --enable-upx --clean-up
15 | ```
16 |
17 | ## 进行开发构建
18 | 运行以下命令进行开发构建(不启用 UPX 压缩且不删除构建目录):
19 |
20 | ```bash
21 | ./build.sh --development
22 | ```
23 |
24 | ## 显示支持的平台
25 | 如果需要查看支持的平台列表,可以使用:
26 |
27 | ```bash
28 | ./build.sh --show-platforms
29 | ```
--------------------------------------------------------------------------------
/docs/build_docker.sh使用说明.md:
--------------------------------------------------------------------------------
1 |
2 | # build_docker.sh使用说明
3 |
4 | 1. **赋予执行权限**:确保脚本具有执行权限。
5 |
6 | ```sh
7 | chmod +x build_docker.sh
8 | ```
9 |
10 | 2. **运行脚本**:运行脚本并传递版本号参数(例如 `v0.5`)。
11 |
12 | ```sh
13 | ./build_docker.sh v0.5
14 | ```
--------------------------------------------------------------------------------
/docs/coze.cn申请API使用流程.md:
--------------------------------------------------------------------------------
1 | # coze.cn申请API使用流程
2 |
3 | 所有免费模型汇总:
4 |
5 | [https://github.com/fruitbars/simple-one-api/blob/main/README.md](https://github.com/fruitbars/simple-one-api/blob/main/README.md)
6 |
7 | ## 获取API流程
8 |
9 | 第一步:创建Bot
10 |
11 |
12 |
13 |
14 |
15 | 第二步:创建
16 |
17 |
18 |
19 |
20 |
21 | 第三步:选择模型
22 |
23 |
24 |
25 |
26 |
27 | 第4步:发布Bot,选择 跳过并直接发布
28 |
29 |
30 |
31 |
32 |
33 | 第五步:勾选 Bot as API ,点击发布
34 |
35 | 
36 |
37 | 第6步:成功提交发布,可以开始调用了
38 |
39 | 
40 |
41 |
42 |
43 | ## 调用流程
44 |
45 | 参考API的文档:[https://www.coze.cn/docs/developer_guides/coze_api_overview](https://www.coze.cn/docs/developer_guides/coze_api_overview)
46 |
47 | > 在发送请求前,请将示例中的以下参数值替换成真实数据:
48 | >
49 | > - {{Personal_Access_Token}}:生成的个人访问令牌。点击[这里](https://www.coze.cn/open/api)生成令牌。
50 | > - {{Bot_Id}}:Bot ID。进入 Bot 的开发页面,开发页面 URL 中 bot 参数后的数字就是 Bot ID。例如https://www.coze.cn/space/73428668341****/bot/73428668*****,bot ID 为73428668*****。
51 |
52 | models填写的是Bot_Id,而credentials中的token填写的是Personal_Access_Token;models中可以填写多个Bot_Id!
53 |
54 | ```
55 | {
56 | "server_port": ":9099",
57 | "load_balancing": "random",
58 | "services": {
59 | "cozecn": [
60 | {
61 | "models": ["xxx"],
62 | "enabled": true,
63 | "credentials": {
64 | "token": "xxx"
65 | }
66 | }
67 | ]
68 | }
69 | }
70 | ```
71 |
72 |
--------------------------------------------------------------------------------
/docs/deepseek模型申请使用流程.md:
--------------------------------------------------------------------------------
1 | # deepseek模型申请使用流程
2 | deepseek虽然目前不免费,但是很便宜,也是业界最早将大模型价格降下来的大模型,效果也不错。
3 | 
4 | ## deepseek API申请流程
5 | 进入到deepseek的后台:https://platform.deepseek.com/api_keys
6 | 点击 API keys,然后点击创建API key
7 | 
8 |
9 | 到这里即,获得了api key,接下来可以使用
10 |
11 | ## 在simple-one-api中配置使用
12 | deepseek的接口是兼容openai,因此在services中加一项openai,按照如下方式配置即可。
13 | ```json
14 | {
15 | "services": {
16 | "openai": [
17 | {
18 | "models": ["deepseek-chat"],
19 | "enabled": true,
20 | "credentials": {
21 | "api_key": "xxx"
22 | }
23 | }
24 | ]
25 | }
26 | }
27 | ```
28 | 如果自己搭建的deepseek,可以设置独立的server_url
29 | ```json
30 | {
31 | "services": {
32 | "openai": [
33 | {
34 | "models": ["deepseek-chat"],
35 | "enabled": true,
36 | "credentials": {
37 | "api_key": "xxx"
38 | },
39 | "server_url": "https://api.deepseek.com/v1"
40 | }
41 | ]
42 | }
43 | }
44 | ```
--------------------------------------------------------------------------------
/docs/groq接入指南.md:
--------------------------------------------------------------------------------
1 | # groq接入指南
2 |
3 |
4 | 文档地址:https://console.groq.com/docs/quickstart
5 |
6 | 后台地址:https://console.groq.com/keys
7 |
8 | 
9 |
10 | ## 在simple-one-api中使用
11 |
12 | groq的接口是兼容openai,因此在services中加一项openai,按照如下方式配置即可。
13 |
14 | ```json
15 | {
16 | "server_port": ":9099",
17 | "load_balancing": "random",
18 | "services": {
19 | "openai": [
20 | {
21 | "models": ["llama3-70b-8192","llama3-8b-8192","gemma-7b-it","mixtral-8x7b-32768"],
22 | "enabled": true,
23 | "credentials": {
24 | "api_key": "xxx"
25 | },
26 | "server_url":"https://api.groq.com/openai/v1"
27 | }
28 | ]
29 | }
30 | }
31 |
32 | ```
--------------------------------------------------------------------------------
/docs/llama_family接入指南.md:
--------------------------------------------------------------------------------
1 | # Llama Family接入指南
2 |
3 | 文档地址:[https://llama.family/docs/api](https://llama.family/docs/api)
4 |
5 | 密钥管理:[https://llama.family/docs/secret](https://llama.family/docs/secret)
6 |
7 | 目前Llama Family提供免费的调用次数,限制信息如下:
8 | > 速率限制:
9 | >
10 | > 1.每天 8-22 点:接口限速每分钟 20 次并发
11 | >
12 | > 2.每天 22-次日 8 点:接口限速每分钟 50 次并发
13 |
14 | 首先我们到Llama Family官网注册,并且到密钥管理后台获取到密钥。
15 | 
16 |
17 | ## 在simple-one-api中使用
18 | `Llama Family`兼容`openai`协议,因此只需要在`services`中的`openai`项中加入相关配置即可。配置好密钥`api_key`以及服务地址`server_url`
19 | ```json
20 | {
21 | "server_port": ":9099",
22 | "debug": false,
23 | "load_balancing": "random",
24 | "services": {
25 | "openai": [
26 | {
27 | "models": ["Atom-13B-Chat","Atom-7B-Chat","Atom-1B-Chat","Llama3-Chinese-8B-Instruct"],
28 | "enabled": true,
29 | "credentials": {
30 | "api_key": "xxx"
31 | },
32 | "server_url":"https://api.atomecho.cn/v1"
33 | }
34 | ]
35 | }
36 | }
37 |
38 | ```
--------------------------------------------------------------------------------
/docs/ollama接入指南.md:
--------------------------------------------------------------------------------
1 | # ollama接入使用指南
2 |
3 | ## 参考文档
4 |
5 | 开发文档地址:[https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion)
6 | 参考的是`Generate a chat completion`该部分描述来实现。
7 |
8 | ## 在simple-one-api中配置接入ollama
9 | 我们新建一个`ollama`的service,然后填入相关的配置信息
10 | ```json
11 | {
12 | "server_port": ":9099",
13 | "load_balancing": "random",
14 | "services": {
15 | "ollama": [
16 | {
17 | "models": ["llama2"],
18 | "enabled": true,
19 | "server_url":"http://127.0.0.1:11434/api/chat"
20 | }
21 | ]
22 | }
23 | }
24 |
25 | ```
26 |
--------------------------------------------------------------------------------
/docs/startup/nohup_startup.md:
--------------------------------------------------------------------------------
1 | # nohup启动
2 |
3 | 使用提供的`nohup_manage_simple_one_api.sh`脚本
4 | - 启动:
5 | ```bash
6 | ./nohup_manage_simple_one_api.sh start
7 | ```
8 | - 停止:
9 | ```bash
10 | ./nohup_manage_simple_one_api.sh stop
11 | ```
12 | - 重启:
13 | ```bash
14 | ./nohup_manage_simple_one_api.sh restart
15 | ```
--------------------------------------------------------------------------------
/docs/startup/systemd_startup.md:
--------------------------------------------------------------------------------
1 | # 使用 systemd 服务
2 |
3 | 您也可以使用我们提供的脚本 `install_simple_one_api_service.sh` 来设置服务。首先,您需要在脚本中指定应用的工作目录:
4 | ```bash
5 | WORKING_DIRECTORY="/path/to/your/application"
6 | ```
7 | 接着,为脚本文件设置执行权限,并执行安装:
8 | ```bash
9 | chmod +x install_simple_one_api_service.sh
10 | ./install_simple_one_api_service.sh
11 | ```
12 | 安装完成后,您可以通过以下 systemd 命令来管理服务:
13 | - 启动服务:
14 | ```bash
15 | sudo systemctl start simple-one-api
16 | ```
17 | - 停止服务:
18 | ```bash
19 | sudo systemctl stop simple-one-api
20 | ```
21 | - 重启服务:
22 | ```bash
23 | sudo systemctl restart simple-one-api
24 | ```
--------------------------------------------------------------------------------
/docs/智谱glm模型申请使用流程.md:
--------------------------------------------------------------------------------
1 | # 智谱glm模型申请使用流程
2 |
3 | ## glm API申请使用流程
4 |
5 | 默认智谱开放平台赠送18元金额,可以在财务总览中查看。
6 |
7 | 控制台地址:https://open.bigmodel.cn/usercenter/apikeys
8 |
9 | 
10 |
11 | 接下来,参考API文档地址来接入:https://open.bigmodel.cn/dev/api#language
12 |
13 | ## 在simple-one-api中配置使用
14 |
15 | 智谱的接口是兼容openai,因此在services中加一项openai,按照如下方式配置即可。
16 |
17 | ```json
18 | {
19 | "services": {
20 | "openai": [
21 | {
22 | "models": ["glm-4","glm-3-turbo"],
23 | "enabled": true,
24 | "credentials": {
25 | "api_key": "xxx"
26 | },
27 | "server_url":"https://open.bigmodel.cn/api/paas/v4/chat/completions"
28 | }
29 | ]
30 | }
31 | }
32 | ```
33 |
34 | 其中simple-one-api默认的服务地址是:https://open.bigmodel.cn/api/paas/v4/chat/completions,另外这里填写https://open.bigmodel.cn/api/paas/v4/chat/completions和https://open.bigmodel.cn/api/paas/v4都是可以的,simple-one-api中自动做了兼容。
35 |
36 | 自定义服务地址设置示例:
37 |
38 | ```
39 |
40 | {
41 | "services": {
42 | "openai": [
43 | {
44 | "models": ["deepseek-chat"],
45 | "enabled": true,
46 | "credentials": {
47 | "api_key": "xxx"
48 | },
49 | "server_url":"https://open.bigmodel.cn/api/paas/v4/chat/completions"
50 | }
51 | ]
52 | }
53 | }
54 | ```
55 |
56 |
--------------------------------------------------------------------------------
/docs/火山方舟大模型接入指南.md:
--------------------------------------------------------------------------------
1 | # 火山方舟接入指南
2 |
3 | 火山方舟文档地址:[https://www.volcengine.com/docs/82379/1263482](https://www.volcengine.com/docs/82379/1263482)
4 |
5 | **接入步骤**
6 | 1. 用户前往火山方舟的模型推理页面创建推理接入点,调用API时通过设置model=ep-xxxxxxxxxx-yyyy进行调用。
7 | 2. 官方推荐火山IAM授权 (推荐),文档地址[https://www.volcengine.com/docs/82379/1263279](https://www.volcengine.com/docs/82379/1263279)
8 | 3. 获取到access_key和secret_key之后,就可以开始填入到config.json了
9 |
10 | `simple-one-api`对应的配置文件示例:
11 | ```json
12 | {
13 | "server_port": ":9099",
14 | "load_balancing": "random",
15 | "services": {
16 | "huoshan": [
17 | {
18 | "models": ["ep-20240612090709-hzjz5"],
19 | "enabled": true,
20 | "credentials": {
21 | "access_key": "xxx",
22 | "secret_key": "xxx"
23 | },
24 | "server_url":"https://ark.cn-beijing.volces.com/api/v3"
25 | }
26 | ]
27 | }
28 | }
29 |
30 | ```
31 | **设置模型别名**
32 | 如果需要设置对外的模型名称,可以设置参数`model_map`,从而起到别名作用;
33 | 例如这里`ep-20240612090709-hzjz5`别名为`doubao32k`,从而在调用`simple-one-api`的时候传入`doubao32k`即可;
34 | `simple-one-api`对应的配置文件示例:
35 | ```
36 |
37 | ```json
38 | {
39 | "server_port": ":9099",
40 | "load_balancing": "random",
41 | "services": {
42 | "huoshan": [
43 | {
44 | "models": ["doubao32k"],
45 | "enabled": true,
46 | "credentials": {
47 | "access_key": "xxx",
48 | "secret_key": "xxx"
49 | },
50 | "model_map":{
51 | "doubao32k": "ep-20240612090709-hzjz5"
52 | },
53 | "server_url":"https://ark.cn-beijing.volces.com/api/v3"
54 | }
55 | ]
56 | }
57 | }
58 |
59 | ```
--------------------------------------------------------------------------------
/docs/百度千帆speed和lite模型申请流程.md:
--------------------------------------------------------------------------------
1 | # 百度千帆speed和lite模型申请流程
2 |
3 | 到千帆平台上开通免费的模型[https://console.bce.baidu.com/qianfan/ais/console/onlineService](https://console.bce.baidu.com/qianfan/ais/console/onlineService)。注意开通需要实名认证!!!
4 |
5 | 
6 |
7 | 到应用接入中创建应用,这里就有了`AppID`、`API Key`、`Secret Key`
8 |
9 | 
10 |
11 | 也可以到体验中心体验[https://console.bce.baidu.com/qianfan/ais/console/onlineTest](https://console.bce.baidu.com/qianfan/ais/console/onlineTest)
12 |
13 | 
--------------------------------------------------------------------------------
/docs/腾讯混元hunyuan-lite模型申请流程.md:
--------------------------------------------------------------------------------
1 | # 腾讯混元hunyuan-lite模型申请流程
2 |
3 | 腾讯混元大模型接入地址[https://console.cloud.tencent.com/hunyuan/start](https://console.cloud.tencent.com/hunyuan/start)
4 |
5 | 
6 |
7 |
8 |
9 | 点击创建密钥,到新页面,新建密钥
10 |
11 | 
12 |
13 | 也可以到调试界面进行调试使用
14 |
15 | 
--------------------------------------------------------------------------------
/docs/讯飞星火spark-lite模型申请流程.md:
--------------------------------------------------------------------------------
1 | # 讯飞星火spark-lite模型申请流程
2 |
3 | spark-lite介绍页面[https://xinghuo.xfyun.cn/sparkapi?scr=true](https://xinghuo.xfyun.cn/sparkapi?scr=true)
4 |
5 | 
6 |
7 | 到控制台[https://console.xfyun.cn/services/cbm](https://console.xfyun.cn/services/cbm)查看appid、apikey、apisecret信息
8 |
9 | 
10 |
11 | 也可以到调试中心调试使用
12 |
13 | 
--------------------------------------------------------------------------------
/docs/通义千问DashScope申请使用流程.md:
--------------------------------------------------------------------------------
1 | # 通义千问DashScope申请使用流程
2 |
3 | 通义API是有DashScope提供的,本身做了openai的接口兼容:
4 | [https://help.aliyun.com/zh/dashscope/developer-reference/compatibility-of-openai-with-dashscope](https://help.aliyun.com/zh/dashscope/developer-reference/compatibility-of-openai-with-dashscope)
5 |
6 | 因此`simple-one-api`直接是支持的,使用流程如下:
7 |
8 | 1. `api_key`获取说明文档:[https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key)
9 |
10 | 2. 在`simple-one-api`中可以这样配置:
11 | ```json
12 | {
13 | "server_port": ":9099",
14 | "load_balancing": "random",
15 | "services": {
16 | "openai": [
17 | {
18 | "models": ["qwen-plus"],
19 | "enabled": true,
20 | "credentials": {
21 | "api_key": "xxx"
22 | },
23 | "server_url":"https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
24 | }
25 | ]
26 | }
27 | }
28 |
29 | ```
30 |
31 |
32 |
--------------------------------------------------------------------------------
/docs/零一万物接入指南.md:
--------------------------------------------------------------------------------
1 | # 零一万物接入指南
2 |
3 | 文档中心:https://platform.lingyiwanwu.com/docs
4 | API 服务地址:https://api.lingyiwanwu.com/v1/chat/completions
5 | Key管理:https://platform.lingyiwanwu.com/apikeys
6 |
7 | ## 零一万物接入simple-one-api
8 |
9 | 兼容OpenAI: 零一万物 API 与 OpenAI API 完全兼容,咱们直接可以在simple-one-api中配置。
10 |
11 | ```json
12 | {
13 | "services": {
14 | "openai": [
15 | {
16 | "models": [
17 | "yi-large",
18 | "yi-spark",
19 | "yi-medium",
20 | "yi-medium-200k",
21 | "yi-large-turbo"
22 | ],
23 | "enabled": true,
24 | "credentials": {
25 | "api_key": "xxx"
26 | },
27 | "server_url": "https://api.lingyiwanwu.com/v1/chat/completions"
28 | }
29 | ]
30 | }
31 | }
32 | ```
--------------------------------------------------------------------------------
/install_simple_one_api_service.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 定义变量
4 | SERVICE_NAME="simple-one-api"
5 | SERVICE_FILE="/etc/systemd/system/$SERVICE_NAME.service"
6 | WORKING_DIRECTORY="/path/to/your/application"
7 | EXEC_START="$WORKING_DIRECTORY/simple-one-api"
8 | LOG_FILE="$WORKING_DIRECTORY/simple-one-api.log"
9 |
10 | # 创建 systemd 服务单元文件内容
11 | SERVICE_CONTENT="[Unit]
12 | Description=Simple One API Service
13 | After=network.target
14 |
15 | [Service]
16 | Type=simple
17 | WorkingDirectory=$WORKING_DIRECTORY
18 | ExecStart=$EXEC_START
19 | Restart=on-failure
20 | StandardOutput=append:$LOG_FILE
21 | StandardError=append:$LOG_FILE
22 |
23 | [Install]
24 | WantedBy=multi-user.target"
25 |
26 | # 检查工作目录和可执行文件是否存在
27 | if [ ! -d "$WORKING_DIRECTORY" ]; then
28 | echo "错误: 工作目录 $WORKING_DIRECTORY 不存在。请检查路径。"
29 | exit 1
30 | fi
31 |
32 | if [ ! -x "$EXEC_START" ]; then
33 | echo "错误: 可执行文件 $EXEC_START 不存在或不可执行。请检查路径。"
34 | exit 1
35 | fi
36 |
37 | # 创建服务单元文件
38 | echo "创建服务单元文件 $SERVICE_FILE"
39 | echo "$SERVICE_CONTENT" | sudo tee $SERVICE_FILE > /dev/null
40 |
41 | # 重新加载 systemd 配置
42 | echo "重新加载 systemd 配置"
43 | sudo systemctl daemon-reload
44 |
45 | # 启动并启用服务
46 | echo "启动 $SERVICE_NAME 服务"
47 | sudo systemctl start $SERVICE_NAME
48 |
49 | echo "启用 $SERVICE_NAME 服务在启动时自动运行"
50 | sudo systemctl enable $SERVICE_NAME
51 |
52 | echo "$SERVICE_NAME 服务已安装并启动"
53 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/gin-contrib/cors"
5 | "github.com/gin-gonic/gin"
6 | "go.uber.org/zap"
7 | "log"
8 | "net/http"
9 | "simple-one-api/pkg/apis"
10 | "simple-one-api/pkg/embedding"
11 | "simple-one-api/pkg/initializer"
12 | "simple-one-api/pkg/mylog"
13 | "simple-one-api/pkg/mywebui"
14 | "simple-one-api/pkg/translation"
15 | "strings"
16 |
17 | //"log"
18 | "os"
19 | "simple-one-api/pkg/config"
20 | "simple-one-api/pkg/handler"
21 | "time"
22 | )
23 |
24 | func main() {
25 | log.SetFlags(log.LstdFlags | log.Lshortfile)
26 |
27 | // 获取程序的第一个参数作为配置文件名
28 | var configName string
29 | if len(os.Args) > 1 {
30 | configName = os.Args[1]
31 | } else {
32 | configName = "config.json"
33 | }
34 |
35 | if err := initializer.Setup(configName); err != nil {
36 | return
37 | }
38 | defer initializer.Cleanup()
39 |
40 | // 创建一个 Gin 路由器实例
41 | r := gin.New()
42 | r.Use(gin.Recovery())
43 |
44 | // 配置 CORS 中间件
45 | r.Use(cors.New(cors.Config{
46 | AllowOrigins: []string{"*"}, // 允许所有来源,如果需要限制来源,可以将 "*" 替换为具体的 URL
47 | AllowMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"},
48 | AllowHeaders: []string{"Origin", "Content-Type", "Accept", "Authorization", "Access-Control-Request-Private-Network"},
49 | ExposeHeaders: []string{"Content-Length", "Access-Control-Allow-Private-Network"},
50 | AllowCredentials: true,
51 | MaxAge: 12 * time.Hour,
52 | }))
53 |
54 | r.OPTIONS("/*path", func(c *gin.Context) {
55 | if c.GetHeader("Access-Control-Request-Private-Network") == "true" {
56 | c.Header("Access-Control-Allow-Private-Network", "true")
57 | }
58 | if c.GetHeader("Access-Control-Request-Credentials") == "true" {
59 | c.Header("Access-Control-Request-Credentials", "true")
60 | }
61 | c.Status(204)
62 | })
63 |
64 | mylog.Logger.Info("check EnableWeb config", zap.Bool("config.GSOAConf.EnableWeb", config.GSOAConf.EnableWeb))
65 | if config.GSOAConf.EnableWeb {
66 | mylog.Logger.Info("web enabled")
67 | // 设置静态文件夹
68 | r.Static("/static", "./static")
69 |
70 | // 设置根路径访问静态文件
71 | r.StaticFile("/", "./static/index.html")
72 |
73 | // 动态路由处理所有html文件
74 | r.GET("/:filename", func(c *gin.Context) {
75 | filename := c.Param("filename")
76 | if strings.HasSuffix(filename, ".html") {
77 | c.File("./static/" + filename)
78 | } else {
79 | c.JSON(http.StatusNotFound, gin.H{"error": "File not found"})
80 | }
81 | })
82 | }
83 | // 添加POST请求方法处理
84 | //r.POST("/v1/chat/completions", handler.OpenAIHandler)
85 | r.GET("/v1/models", apis.ModelsHandler)
86 | r.GET("/v1/models/:model", apis.RetrieveModelHandler)
87 |
88 | r.POST("/v2/translate", translation.TranslateV2Handler)
89 | r.POST("/translate", translation.TranslateV1Handler)
90 |
91 | //r.POST("/v1/embeddings", embedding.EmbeddingsHandler)
92 |
93 | r.GET("/multimodelcall", mywebui.WSMultiModelCallHandler)
94 |
95 | // 啥也不错,有些客户端真的很无语,不知道会怎么补全,尽量兼容吧
96 | v1 := r.Group("/v1")
97 | {
98 | // 中间件检查路径是否以 /v1/chat/completions 结尾
99 | v1.POST("/*path", func(c *gin.Context) {
100 | if strings.HasSuffix(c.Request.URL.Path, "/v1/chat/completions") || strings.HasSuffix(c.Request.URL.Path, "/chat/completions") || strings.HasSuffix(c.Request.URL.Path, "/v1") {
101 | handler.OpenAIHandler(c)
102 | return
103 | } else if strings.HasSuffix(c.Request.URL.Path, "/v1/translate") {
104 | translation.TranslateV1Handler(c)
105 | return
106 | } else if strings.HasSuffix(c.Request.URL.Path, "/v1/embeddings") {
107 | embedding.EmbeddingsHandler(c)
108 | return
109 | }
110 | c.JSON(http.StatusNotFound, gin.H{"error": "Path not found"})
111 | })
112 | }
113 | // 启动服务器,使用配置中的端口
114 | if err := r.Run(config.ServerPort); err != nil {
115 | mylog.Logger.Error(err.Error())
116 | return
117 | }
118 | }
119 |
--------------------------------------------------------------------------------
/nohup_manage_simple_one_api.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 获取脚本所在目录
4 | DIR=$(dirname "$0")
5 |
6 | # 定义日志文件和PID文件
7 | LOG_FILE="$DIR/simple-one-api.log"
8 | PID_FILE="$DIR/simple-one-api.pid"
9 |
10 | # 启动 simple-one-api 并将输出重定向到日志文件
11 | start() {
12 | if [ -f $PID_FILE ]; then
13 | PID=$(cat $PID_FILE)
14 | if ps -p $PID > /dev/null; then
15 | echo "simple-one-api 已经在运行,进程ID: $PID"
16 | exit 1
17 | else
18 | echo "发现遗留的PID文件,但没有正在运行的进程。删除PID文件。"
19 | rm -f $PID_FILE
20 | fi
21 | fi
22 |
23 | cd $DIR
24 | nohup ./simple-one-api > $LOG_FILE 2>&1 &
25 | PID=$!
26 | echo $PID > $PID_FILE
27 | echo "simple-one-api 已启动,进程ID: $PID,日志文件: $LOG_FILE"
28 | }
29 |
30 | # 停止 simple-one-api
31 | stop() {
32 | if [ -f $PID_FILE ]; then
33 | PID=$(cat $PID_FILE)
34 | if ps -p $PID > /dev/null; then
35 | kill $PID
36 | echo "simple-one-api 已停止,进程ID: $PID"
37 | rm -f $PID_FILE
38 | else
39 | echo "没有正在运行的 simple-one-api 进程,删除遗留的PID文件。"
40 | rm -f $PID_FILE
41 | fi
42 | else
43 | echo "没有找到PID文件,simple-one-api 可能未运行。"
44 | fi
45 | }
46 |
47 | # 重启 simple-one-api
48 | restart() {
49 | stop
50 | start
51 | }
52 |
53 | # 检查参数
54 | case "$1" in
55 | start)
56 | start
57 | ;;
58 | stop)
59 | stop
60 | ;;
61 | restart)
62 | restart
63 | ;;
64 | *)
65 | echo "使用方法: $0 {start|stop|restart}"
66 | exit 1
67 | esac
68 |
--------------------------------------------------------------------------------
/pkg/adapter/aliyun-dashscope-adapter/dashscope_btype_openai.go:
--------------------------------------------------------------------------------
1 | package aliyun_dashscope_adapter
2 |
3 | import (
4 | "github.com/sashabaranov/go-openai"
5 | "simple-one-api/pkg/llm/aliyun-dashscope/common_btype"
6 | "simple-one-api/pkg/mycomdef"
7 | "simple-one-api/pkg/mycommon"
8 | myopenai "simple-one-api/pkg/openai"
9 | "time"
10 | )
11 |
12 | func OpenAIRequestToDashScopeBTypeRequest(oaiReq *openai.ChatCompletionRequest) *common_btype.DSBtypeRequestBody {
13 | var dsReq common_btype.DSBtypeRequestBody
14 |
15 | dsReq.Model = oaiReq.Model
16 |
17 | systemContent := mycommon.GetSystemMessage(oaiReq.Messages)
18 |
19 | if len(systemContent) > 0 {
20 | dsReq.Input.Prompt = systemContent + "\n"
21 | }
22 |
23 | dsReq.Input.Prompt += mycommon.GetLastestMessage(oaiReq.Messages)
24 |
25 | return &dsReq
26 | }
27 |
28 | func DashScopeBTypeResponseToOpenAIResponse(llamaResp *common_btype.DSBtypeResponseBody) *myopenai.OpenAIResponse {
29 | if llamaResp == nil {
30 | return nil
31 | }
32 |
33 | choices := []myopenai.Choice{
34 | {
35 | Index: 0,
36 | Message: myopenai.ResponseMessage{
37 | Role: mycomdef.KEYNAME_ASSISTANT,
38 | Content: llamaResp.Output.Text,
39 | },
40 | //FinishReason: determineFinishReason(resp.Done),
41 | },
42 | }
43 |
44 | usage := &myopenai.Usage{
45 | PromptTokens: llamaResp.Usage.InputTokens,
46 | CompletionTokens: llamaResp.Usage.OutputTokens,
47 | TotalTokens: llamaResp.Usage.InputTokens + llamaResp.Usage.OutputTokens,
48 | }
49 |
50 | return &myopenai.OpenAIResponse{
51 | ID: llamaResp.RequestID,
52 | Created: time.Now().Unix(),
53 | Model: "",
54 | Choices: choices,
55 | Usage: usage,
56 | }
57 | }
58 |
59 | func DashScopeBTypeResponseToOpenAIStreamResponse(dsResp *common_btype.DSBtypeResponseBody) *myopenai.OpenAIStreamResponse {
60 | openAIResp := &myopenai.OpenAIStreamResponse{
61 | ID: dsResp.RequestID,
62 | Object: "chat.completion.chunk",
63 | Created: time.Now().Unix(), // 使用当前 Unix 时间戳
64 | }
65 |
66 | openAIResp.Choices = append(openAIResp.Choices, struct {
67 | Index int `json:"index"`
68 | Delta myopenai.ResponseDelta `json:"delta,omitempty"`
69 | Logprobs interface{} `json:"logprobs,omitempty"`
70 | FinishReason interface{} `json:"finish_reason,omitempty"`
71 | }{
72 | Index: 0,
73 | Delta: myopenai.ResponseDelta{
74 | Role: openai.ChatMessageRoleAssistant,
75 | Content: dsResp.Output.Text,
76 | },
77 | })
78 |
79 | // 转换 Usage
80 | openAIResp.Usage = &myopenai.Usage{
81 | PromptTokens: dsResp.Usage.InputTokens,
82 | CompletionTokens: dsResp.Usage.OutputTokens,
83 | TotalTokens: dsResp.Usage.InputTokens + dsResp.Usage.OutputTokens,
84 | }
85 |
86 | return openAIResp
87 | }
88 |
--------------------------------------------------------------------------------
/pkg/adapter/aliyun-dashscope-adapter/dashscope_common_openai.go:
--------------------------------------------------------------------------------
1 | package aliyun_dashscope_adapter
2 |
3 | import (
4 | "github.com/sashabaranov/go-openai"
5 | "simple-one-api/pkg/llm/aliyun-dashscope/commsg/ds_com_request"
6 | "simple-one-api/pkg/llm/aliyun-dashscope/commsg/ds_com_resp"
7 | myopenai "simple-one-api/pkg/openai"
8 | "strings"
9 | "time"
10 | )
11 |
12 | func OpenAIRequestToDashScopeCommonRequest(oaiReq *openai.ChatCompletionRequest) *ds_com_request.ModelRequest {
13 | var dsComReq ds_com_request.ModelRequest
14 |
15 | dsComReq.Model = oaiReq.Model
16 |
17 | for _, msg := range oaiReq.Messages {
18 |
19 | var dsComMsg ds_com_request.Message
20 |
21 | dsComMsg.Role = msg.Role
22 | dsComMsg.Content = msg.Content
23 |
24 | dsComReq.Input.Messages = append(dsComReq.Input.Messages, dsComMsg)
25 |
26 | }
27 |
28 | var param ds_com_request.Parameters
29 | param.ResultFormat = "message"
30 |
31 | dsComReq.Parameters = ¶m
32 |
33 | return &dsComReq
34 | }
35 |
36 | func DashScopeCommonResponseToOpenAIResponse(dsComResp *ds_com_resp.ModelResponse) *myopenai.OpenAIResponse {
37 | if dsComResp == nil {
38 | return nil
39 | }
40 |
41 | var oaiChoices []myopenai.Choice
42 |
43 | for _, choice := range dsComResp.Output.Choices {
44 | var oaiChoice myopenai.Choice
45 | oaiChoice.Message.Role = choice.Message.Role
46 | oaiChoice.Message.Content = choice.Message.Content
47 | oaiChoices = append(oaiChoices, oaiChoice)
48 | }
49 |
50 | usage := &myopenai.Usage{
51 | PromptTokens: dsComResp.Usage.InputTokens,
52 | CompletionTokens: dsComResp.Usage.OutputTokens,
53 | TotalTokens: dsComResp.Usage.InputTokens + dsComResp.Usage.OutputTokens,
54 | }
55 |
56 | return &myopenai.OpenAIResponse{
57 | ID: dsComResp.RequestID,
58 | Created: time.Now().Unix(),
59 | Model: "",
60 | Choices: oaiChoices,
61 | Usage: usage,
62 | }
63 | }
64 |
65 | func compareAndExtractDelta(prev, current string) string {
66 | if prev == "" {
67 | return current
68 | }
69 |
70 | if strings.HasPrefix(current, prev) {
71 | return strings.TrimPrefix(current, prev)
72 | }
73 |
74 | return current
75 | }
76 |
77 | func GetStreamResponseContent(dsResp *ds_com_resp.ModelStreamResponse) string {
78 | if dsResp == nil || len(dsResp.Output.Choices) == 0 {
79 | return ""
80 | }
81 |
82 | choice := dsResp.Output.Choices[0]
83 |
84 | return choice.Message.Content
85 |
86 | }
87 |
88 | func DashScopeCommonResponseToOpenAIStreamResponse(dsResp *ds_com_resp.ModelStreamResponse, prevContent string) *myopenai.OpenAIStreamResponse {
89 | openAIResp := &myopenai.OpenAIStreamResponse{
90 | ID: dsResp.RequestID,
91 | Object: "chat.completion.chunk",
92 | Created: time.Now().Unix(), // 使用当前 Unix 时间戳
93 | }
94 |
95 | for _, dsChoice := range dsResp.Output.Choices {
96 | deltaContent := compareAndExtractDelta(prevContent, dsChoice.Message.Content)
97 | choice := struct {
98 | Index int `json:"index"`
99 | Delta myopenai.ResponseDelta `json:"delta,omitempty"`
100 | Logprobs interface{} `json:"logprobs,omitempty"`
101 | FinishReason interface{} `json:"finish_reason,omitempty"`
102 | }{
103 | Index: 0,
104 | Delta: myopenai.ResponseDelta{
105 | Role: dsChoice.Message.Role,
106 | Content: deltaContent,
107 | },
108 | }
109 |
110 | openAIResp.Choices = append(openAIResp.Choices, choice)
111 | }
112 |
113 | // 转换 Usage
114 | openAIResp.Usage = &myopenai.Usage{
115 | PromptTokens: dsResp.Usage.Kens,
116 | CompletionTokens: dsResp.Usage.OutputTokens,
117 | TotalTokens: dsResp.Usage.Kens + dsResp.Usage.OutputTokens,
118 | }
119 |
120 | return openAIResp
121 | }
122 |
--------------------------------------------------------------------------------
/pkg/adapter/azure_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai"
5 | "github.com/sashabaranov/go-openai"
6 | )
7 |
8 | func convertMessages2AzureMessage(chatMessages []openai.ChatCompletionMessage) []azopenai.ChatRequestMessageClassification {
9 | var messages []azopenai.ChatRequestMessageClassification
10 |
11 | for _, msg := range chatMessages {
12 | switch msg.Role {
13 | case "system":
14 | messages = append(messages, &azopenai.ChatRequestSystemMessage{
15 | Content: azopenai.NewChatRequestSystemMessageContent(msg.Content),
16 | })
17 | case "user":
18 | messages = append(messages, &azopenai.ChatRequestUserMessage{
19 | Content: azopenai.NewChatRequestUserMessageContent(msg.Content),
20 | })
21 | case "assistant":
22 | messages = append(messages, &azopenai.ChatRequestAssistantMessage{
23 | Content: azopenai.NewChatRequestAssistantMessageContent(msg.Content),
24 | })
25 | default:
26 | // 如果遇到未知的role,可以选择忽略或报错
27 | continue
28 | }
29 | }
30 |
31 | return messages
32 | }
33 |
34 | func OpenAIRequestToAzureRequest(oaiReq *openai.ChatCompletionRequest) *azopenai.ChatCompletionsOptions {
35 | azureMessages := convertMessages2AzureMessage(oaiReq.Messages)
36 | return &azopenai.ChatCompletionsOptions{
37 | Messages: azureMessages,
38 | }
39 | }
40 |
41 | func AzureResponseToOpenAIResponse(input *azopenai.GetChatCompletionsResponse) *openai.ChatCompletionResponse {
42 | // 转换 Choices
43 | choices := make([]openai.ChatCompletionChoice, len(input.ChatCompletions.Choices))
44 | for i, choice := range input.ChatCompletions.Choices {
45 | choices[i] = openai.ChatCompletionChoice{
46 | Index: i,
47 | Message: openai.ChatCompletionMessage{
48 | Role: safeString((*string)(choice.Message.Role)),
49 | Content: safeString(choice.Message.Content),
50 | Refusal: safeString(choice.Message.Refusal),
51 | //FunctionCall: choice.Message.FunctionCall,
52 | //ToolCalls: choice.Message.ToolCalls,
53 | },
54 | //Name: choice.Message.Name,
55 | //LogProbs: safeString(choice.LogProbs),
56 | //ContentFilterResults: choice.ContentFilterResults,
57 | }
58 | }
59 |
60 | // 转换 PromptFilterResults
61 | /*
62 | promptFilterResults := make([]openai.PromptFilterResult, len(input.ChatCompletions.PromptFilterResults))
63 | for i, result := range input.ChatCompletions.PromptFilterResults {
64 | promptFilterResults[i] = openai.PromptFilterResult{
65 | // 根据 PromptFilterResults 中字段对应的内容进行赋值
66 | // 示例: Prompt, Outcome 等字段
67 | }
68 | }
69 |
70 | */
71 |
72 | // 转换 ChatCompletionResponse
73 | return &openai.ChatCompletionResponse{
74 | ID: *input.ChatCompletions.ID,
75 | Object: "chat.completion",
76 | Created: input.ChatCompletions.Created.Unix(),
77 | Model: *input.ChatCompletions.Model,
78 | Choices: choices,
79 | Usage: openai.Usage{
80 | PromptTokens: safeInt(input.ChatCompletions.Usage.PromptTokens),
81 | CompletionTokens: safeInt(input.ChatCompletions.Usage.CompletionTokens),
82 | TotalTokens: safeInt(input.ChatCompletions.Usage.TotalTokens),
83 | },
84 | SystemFingerprint: *input.ChatCompletions.SystemFingerprint,
85 | //PromptFilterResults: promptFilterResults,
86 | }
87 | }
88 |
89 | func safeString(input *string) string {
90 | if input == nil {
91 | return ""
92 | }
93 | return *input
94 | }
95 |
96 | func safeInt(input *int32) int {
97 | if input == nil {
98 | return 0
99 | }
100 | return int(*input)
101 | }
102 |
--------------------------------------------------------------------------------
/pkg/adapter/baidu_agentbuilder_adapter/agentbuilder_openai.go:
--------------------------------------------------------------------------------
1 | package baidu_agentbuilder_adapter
2 |
3 | import (
4 | "github.com/sashabaranov/go-openai"
5 | "simple-one-api/pkg/llm/devplatform/baidu_agentbuilder"
6 | myopenai "simple-one-api/pkg/openai"
7 | "time"
8 | )
9 |
10 | func AgentBuilderResponseToOpenAIResponse(abResp *baidu_agentbuilder.GetAnswerResponse) *myopenai.OpenAIResponse {
11 | openAIResp := &myopenai.OpenAIResponse{
12 | ID: abResp.LogID,
13 | Object: "text_completion",
14 | //SystemFingerprint: qfResp.Header.Message,
15 | }
16 |
17 | // 转换 Choices
18 | for i := 0; i < len(abResp.Data.Content); i++ {
19 | openAIResp.Choices = append(openAIResp.Choices, myopenai.Choice{
20 | Index: 0,
21 | Message: myopenai.ResponseMessage{
22 | Role: openai.ChatMessageRoleAssistant,
23 | Content: abResp.Data.Content[i].Data,
24 | },
25 | })
26 | }
27 |
28 | // 设置 Created 时间为当前 Unix 时间戳(如果需要的话)
29 | openAIResp.Created = time.Now().Unix() // 你可以使用 time.Now().Unix() 设置为当前时间戳
30 |
31 | return openAIResp
32 | }
33 |
34 | func AgentBuilderResponseToOpenAIStreamResponse(abStreamResp *baidu_agentbuilder.ConversationResponse) *myopenai.OpenAIStreamResponse {
35 | openAIResp := &myopenai.OpenAIStreamResponse{
36 | ID: abStreamResp.LogID,
37 | Object: "chat.completion.chunk",
38 | Created: time.Now().Unix(), // 使用当前 Unix 时间戳
39 | //SystemFingerprint: qfResp.Header.Message,
40 | }
41 |
42 | for i := 0; i < len(abStreamResp.Data.Message.Content); i++ {
43 | content := abStreamResp.Data.Message.Content[i]
44 | if content.DataType == "null" {
45 | continue
46 | }
47 |
48 | openAIResp.Choices = append(openAIResp.Choices, struct {
49 | Index int `json:"index"`
50 | Delta myopenai.ResponseDelta `json:"delta,omitempty"`
51 | Logprobs interface{} `json:"logprobs,omitempty"`
52 | FinishReason interface{} `json:"finish_reason,omitempty"`
53 | }{
54 | Index: 0,
55 | Delta: myopenai.ResponseDelta{
56 | Role: openai.ChatMessageRoleAssistant,
57 | Content: content.Data.Text,
58 | },
59 | })
60 | }
61 |
62 | return openAIResp
63 | }
64 |
--------------------------------------------------------------------------------
/pkg/adapter/claude_stream_resp_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "simple-one-api/pkg/llm/claude"
5 | "simple-one-api/pkg/mycomdef"
6 | myopenai "simple-one-api/pkg/openai"
7 | )
8 |
9 | // 将 MsgMessageStart 转换为 OpenAIStreamResponse 的函数
10 | func ConvertMsgMessageStartToOpenAIStreamResponse(msg *claude.MsgMessageStart) *myopenai.OpenAIStreamResponse {
11 | response := &myopenai.OpenAIStreamResponse{
12 | ID: msg.Message.ID,
13 | Model: msg.Message.Model,
14 | Usage: &myopenai.Usage{
15 | PromptTokens: msg.Message.Usage.InputTokens,
16 | CompletionTokens: msg.Message.Usage.OutputTokens,
17 | TotalTokens: msg.Message.Usage.InputTokens + msg.Message.Usage.OutputTokens,
18 | },
19 | Choices: []myopenai.OpenAIStreamResponseChoice{
20 | {
21 | Delta: myopenai.ResponseDelta{
22 | Role: msg.Message.Role,
23 | Content: "", // 因为原数据中的 content 是一个空数组
24 | },
25 | },
26 | },
27 | }
28 | return response
29 | }
30 |
31 | func ConvertMsgContentBlockDeltaToOpenAIStreamResponse(msg *claude.MsgContentBlockDelta) *myopenai.OpenAIStreamResponse {
32 | return &myopenai.OpenAIStreamResponse{
33 | Choices: []myopenai.OpenAIStreamResponseChoice{
34 | {
35 | Index: msg.Index,
36 | Delta: myopenai.ResponseDelta{
37 | Role: mycomdef.KEYNAME_ASSISTANT,
38 | Content: msg.Delta.Text,
39 | },
40 | },
41 | },
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/adapter/cozecn_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "fmt"
5 | "github.com/sashabaranov/go-openai"
6 | "go.uber.org/zap"
7 | "simple-one-api/pkg/llm/devplatform/cozecn"
8 | "simple-one-api/pkg/mycommon"
9 | "simple-one-api/pkg/mylog"
10 | myopenai "simple-one-api/pkg/openai"
11 | "strings"
12 | "time"
13 | )
14 |
15 | func OpenAIRequestToCozecnRequest(oaiReq *openai.ChatCompletionRequest) *cozecn.CozeRequest {
16 | hisMessages := mycommon.ConvertSystemMessages2NoSystem(oaiReq.Messages)
17 | messageCount := len(hisMessages)
18 |
19 | // Directly get the content of the last message as the query
20 | query := hisMessages[messageCount-1].Content
21 |
22 | var cozeMessages []cozecn.Message
23 | if messageCount > 1 {
24 | // Iterate through the messages except the last one
25 | for i := 0; i < messageCount-1; i++ {
26 | msg := hisMessages[i] // Corrected to use hisMessages instead of oaiReq.Messages
27 | mt := ""
28 | if strings.ToLower(msg.Role) == "assistant" {
29 | mt = "answer"
30 | }
31 |
32 | cozeMessages = append(cozeMessages, cozecn.Message{
33 | Role: msg.Role,
34 | Type: mt,
35 | Content: msg.Content,
36 | ContentType: "text",
37 | })
38 | }
39 | }
40 |
41 | // Set a default user if the user field is empty
42 | user := oaiReq.User
43 | if user == "" {
44 | user = "12345678"
45 | }
46 |
47 | mylog.Logger.Debug("cozeMessages", zap.Any("cozeMessages", cozeMessages))
48 |
49 | return &cozecn.CozeRequest{
50 | ConversationID: "123", // Assuming a static conversation ID
51 | BotID: oaiReq.Model, // Assuming the model as the bot ID
52 | User: user,
53 | Query: query,
54 | Stream: oaiReq.Stream,
55 | ChatHistory: cozeMessages,
56 | }
57 | }
58 |
59 | func CozecnReponseToOpenAIResponse(resp *cozecn.Response) *myopenai.OpenAIResponse {
60 | if resp.Code != 0 {
61 | return &myopenai.OpenAIResponse{
62 | ID: resp.ConversationID,
63 | Error: &myopenai.ErrorDetail{
64 | Message: resp.Msg,
65 | Code: resp.Code,
66 | },
67 | }
68 | }
69 |
70 | choices := make([]myopenai.Choice, len(resp.Messages))
71 | for i, msg := range resp.Messages {
72 | if msg.Type == "verbose" {
73 | continue
74 | }
75 |
76 | choices[i] = myopenai.Choice{
77 | Index: i,
78 | Message: myopenai.ResponseMessage{
79 | Role: msg.Role,
80 | Content: msg.Content,
81 | },
82 | FinishReason: "stop", // Assuming all responses are finished
83 | }
84 | }
85 |
86 | return &myopenai.OpenAIResponse{
87 | ID: resp.ConversationID,
88 | Object: "text_completion",
89 | Created: time.Now().Unix(),
90 | Choices: choices,
91 | Error: func() *myopenai.ErrorDetail {
92 | if resp.Code != 200 {
93 | return &myopenai.ErrorDetail{
94 | Code: fmt.Sprintf("%d", resp.Code),
95 | Message: resp.Msg,
96 | }
97 | }
98 | return nil
99 | }(),
100 | }
101 | }
102 |
103 | func CozecnReponseToOpenAIResponseStream(resp *cozecn.StreamResponse) *myopenai.OpenAIStreamResponse {
104 | var choices []myopenai.OpenAIStreamResponseChoice
105 |
106 | if resp.Event == "message" {
107 | choices = append(choices, myopenai.OpenAIStreamResponseChoice{
108 | Index: resp.Index,
109 | Delta: myopenai.ResponseDelta{
110 | Role: resp.Message.Role,
111 | Content: resp.Message.Content,
112 | },
113 | })
114 | }
115 |
116 | var errorDetail *myopenai.ErrorDetail
117 | if resp.Event == "error" {
118 | errorDetail = &myopenai.ErrorDetail{
119 | Message: resp.ErrorInformation.Msg,
120 | Code: resp.ErrorInformation.Code,
121 | }
122 | }
123 |
124 | return &myopenai.OpenAIStreamResponse{
125 | ID: resp.ConversationID,
126 | Object: "chat.completion.chunk",
127 | Created: time.Now().Unix(),
128 | Choices: choices,
129 | Error: errorDetail,
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/pkg/adapter/dify_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "github.com/sashabaranov/go-openai"
5 | "simple-one-api/pkg/llm/devplatform/dify/chat_completion_response"
6 | "simple-one-api/pkg/llm/devplatform/dify/chunk_chat_completion_response"
7 | "simple-one-api/pkg/mycommon"
8 | myopenai "simple-one-api/pkg/openai"
9 | "time"
10 | )
11 | import "simple-one-api/pkg/llm/devplatform/dify/chat_message_request"
12 |
13 | func OpenAIRequestToDifyRequest(oaiReq *openai.ChatCompletionRequest) *chat_message_request.ChatMessageRequest {
14 | var difyReq chat_message_request.ChatMessageRequest
15 | difyReq.Query = mycommon.GetLastestMessage(oaiReq.Messages)
16 | if oaiReq.Stream {
17 | difyReq.ResponseMode = "streaming"
18 | } else {
19 | difyReq.ResponseMode = "blocking"
20 | }
21 |
22 | difyReq.User = oaiReq.User
23 |
24 | if difyReq.User == "" {
25 | difyReq.User = "abc-123"
26 | }
27 |
28 | return &difyReq
29 | }
30 |
31 | func DifyResponseToOpenAIResponse(difyResp *chat_completion_response.ChatCompletionResponse) *openai.ChatCompletionResponse {
32 | var oaiResp openai.ChatCompletionResponse
33 |
34 | oaiResp.ID = difyResp.MessageID
35 | oaiResp.Object = "chat.completion"
36 | oaiResp.Created = difyResp.CreatedAt.Unix()
37 | //oaiResp.Model = difyResp.Model
38 | //oaiResp.Choices = difyResp.Choices
39 |
40 | var choice openai.ChatCompletionChoice
41 | choice.Message = openai.ChatCompletionMessage{
42 | Role: openai.ChatMessageRoleAssistant,
43 | Content: difyResp.Answer,
44 | }
45 |
46 | oaiResp.Choices = append(oaiResp.Choices, choice)
47 |
48 | return &oaiResp
49 | }
50 |
51 | func DifyResponseToOpenAIResponseStream(difyResp *chunk_chat_completion_response.MessageEvent) *myopenai.OpenAIStreamResponse {
52 | var oaiStreamResp myopenai.OpenAIStreamResponse
53 |
54 | oaiStreamResp.Choices = []myopenai.OpenAIStreamResponseChoice{
55 | {
56 | Delta: myopenai.ResponseDelta{
57 | Role: openai.ChatMessageRoleAssistant,
58 | Content: difyResp.Answer,
59 | },
60 | },
61 | }
62 |
63 | return &oaiStreamResp
64 | }
65 |
66 | func DifyMessageEndEventToOpenAIResponseStream(difyResp *chunk_chat_completion_response.MessageEndEvent) *myopenai.OpenAIStreamResponse {
67 | if difyResp == nil {
68 | return nil
69 | }
70 |
71 | var oaiuasge myopenai.Usage
72 |
73 | oaiuasge.PromptTokens = difyResp.Metadata.Usage.PromptTokens
74 | oaiuasge.CompletionTokens = difyResp.Metadata.Usage.CompletionTokens
75 | oaiuasge.TotalTokens = difyResp.Metadata.Usage.TotalTokens
76 |
77 | return &myopenai.OpenAIStreamResponse{
78 | ID: difyResp.ID,
79 | Object: "chat.completion.chunk",
80 | Created: time.Now().Unix(),
81 | //Error: errorDetail,
82 | Usage: &oaiuasge,
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/pkg/adapter/gemini_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | google_gemini "simple-one-api/pkg/llm/google-gemini"
5 | myopenai "simple-one-api/pkg/openai"
6 | "strings"
7 | "time"
8 | )
9 |
10 | func GeminiResponseToOpenAIResponse(qfResp *google_gemini.GeminiResponse) *myopenai.OpenAIResponse {
11 | // 创建 OpenAIResponse 实例
12 | openAIResp := &myopenai.OpenAIResponse{
13 | Object: "chat.completion",
14 | Usage: &myopenai.Usage{
15 | PromptTokens: qfResp.UsageMetadata.PromptTokenCount,
16 | CompletionTokens: qfResp.UsageMetadata.CandidatesTokenCount,
17 | TotalTokens: qfResp.UsageMetadata.TotalTokenCount,
18 | },
19 | Choices: make([]myopenai.Choice, len(qfResp.Candidates)),
20 | }
21 |
22 | // 遍历所有候选项
23 | for i, candidate := range qfResp.Candidates {
24 |
25 | role := candidate.Content.Role
26 | if strings.ToLower(role) == "model" {
27 | role = "assitant"
28 | }
29 |
30 | var content string
31 | if len(candidate.Content.Parts) > 0 {
32 | content = candidate.Content.Parts[0].Text
33 | }
34 |
35 | openAIResp.Choices[i] = myopenai.Choice{
36 | Index: candidate.Index,
37 | Message: myopenai.ResponseMessage{
38 | Role: role,
39 | Content: content,
40 | },
41 | FinishReason: candidate.FinishReason,
42 | }
43 |
44 | // 示例代码,假设不处理 LogProbs
45 | /*
46 | var logProbs json.RawMessage = nil
47 | openAIResp.Choices[i].LogProbs = &logProbs
48 |
49 | */
50 | }
51 |
52 | return openAIResp
53 | }
54 |
55 | func GeminiResponseToOpenAIStreamResponse(qfResp *google_gemini.GeminiResponse) *myopenai.OpenAIStreamResponse {
56 | if qfResp == nil {
57 | return nil
58 | }
59 |
60 | var Choices []myopenai.OpenAIStreamResponseChoice
61 |
62 | for i, candidate := range qfResp.Candidates {
63 | role := candidate.Content.Role
64 | if strings.ToLower(role) == "model" {
65 | role = "assitant"
66 | }
67 |
68 | var content string
69 | if len(candidate.Content.Parts) > 0 {
70 | content = candidate.Content.Parts[0].Text
71 | }
72 |
73 | choice := myopenai.OpenAIStreamResponseChoice{
74 | Index: i,
75 | Delta: myopenai.ResponseDelta{
76 | Role: role,
77 | Content: content,
78 | },
79 | //FinishReason: candidate.FinishReason,
80 | }
81 |
82 | Choices = append(Choices, choice)
83 | }
84 |
85 | openAIResponse := &myopenai.OpenAIStreamResponse{
86 | ID: "chatcmpl-" + time.Now().Format("20060102150405"), // 生成一个唯一的ID
87 | Object: "chat.completion.chunk",
88 | Created: time.Now().Unix(),
89 | //Model: "gpt-3.5-turbo-0613", // 假设模型名称
90 | Choices: Choices,
91 | Usage: &myopenai.Usage{
92 | PromptTokens: qfResp.UsageMetadata.PromptTokenCount,
93 | CompletionTokens: qfResp.UsageMetadata.CandidatesTokenCount,
94 | TotalTokens: qfResp.UsageMetadata.TotalTokenCount,
95 | },
96 | }
97 |
98 | return openAIResponse
99 | }
100 |
--------------------------------------------------------------------------------
/pkg/adapter/huoshanbot_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "github.com/volcengine/volcengine-go-sdk/service/arkruntime/model"
5 | myopenai "simple-one-api/pkg/openai"
6 | "time"
7 | )
8 |
9 | func HuoShanBotResponseToOpenAIResponse(huoshanBotResp *model.BotChatCompletionResponse) *myopenai.OpenAIResponse {
10 | if huoshanBotResp == nil {
11 | return nil
12 | }
13 |
14 | resp := huoshanBotResp.ChatCompletionResponse
15 |
16 | // 转换 Choices
17 | var choices []myopenai.Choice
18 | for _, choice := range resp.Choices {
19 | var content string
20 | if choice.Message.Content != nil && choice.Message.Content.StringValue != nil {
21 | content = *choice.Message.Content.StringValue
22 | }
23 |
24 | choices = append(choices, myopenai.Choice{
25 | Index: choice.Index,
26 | Message: myopenai.ResponseMessage{
27 | Role: choice.Message.Role,
28 | Content: content,
29 | },
30 | LogProbs: nil, // 假设 logprobs 不存在
31 | FinishReason: string(choice.FinishReason),
32 | })
33 | }
34 |
35 | var mu model.BotModelUsage
36 | if huoshanBotResp.BotUsage != nil && len(huoshanBotResp.BotUsage.ModelUsage) > 0 {
37 | mu = *huoshanBotResp.BotUsage.ModelUsage[0]
38 | }
39 | // 转换 Usage
40 | usage := &myopenai.Usage{
41 | PromptTokens: mu.PromptTokens,
42 | CompletionTokens: mu.CompletionTokens,
43 | TotalTokens: mu.TotalTokens,
44 | }
45 |
46 | // 创建 OpenAIResponse
47 | openAIResp := &myopenai.OpenAIResponse{
48 | ID: resp.ID,
49 | Object: resp.Object,
50 | Created: resp.Created,
51 | Model: resp.Model,
52 | Choices: choices,
53 | Usage: usage,
54 | // Error 信息可以根据具体需求进行设置
55 | }
56 |
57 | return openAIResp
58 | }
59 |
60 | // HuoShanBotResponseToOpenAIStreamResponse converts a HuoShanBot stream response to an OpenAIStreamResponse
61 | func HuoShanBotResponseToOpenAIStreamResponse(huoshanBotResp *model.BotChatCompletionStreamResponse) *myopenai.OpenAIStreamResponse {
62 |
63 | var mu model.BotModelUsage
64 | if huoshanBotResp.BotUsage != nil && len(huoshanBotResp.BotUsage.ModelUsage) > 0 {
65 | mu = *huoshanBotResp.BotUsage.ModelUsage[0]
66 | }
67 | // 转换 Usage
68 | usage := &myopenai.Usage{
69 | PromptTokens: mu.PromptTokens,
70 | CompletionTokens: mu.CompletionTokens,
71 | TotalTokens: mu.TotalTokens,
72 | }
73 |
74 | response := &myopenai.OpenAIStreamResponse{
75 | ID: huoshanBotResp.ID,
76 | Object: "chat.completion.chunk",
77 | Created: time.Now().Unix(),
78 | Model: huoshanBotResp.Model,
79 | Choices: make([]myopenai.OpenAIStreamResponseChoice, len(huoshanBotResp.Choices)),
80 | Usage: usage,
81 | //Error: mapErrorDetails(huoshanBotResp.Error),
82 | }
83 |
84 | for i, choice := range huoshanBotResp.Choices {
85 |
86 | response.Choices[i] = myopenai.OpenAIStreamResponseChoice{
87 | Index: choice.Index,
88 | Delta: myopenai.ResponseDelta{
89 | Role: choice.Delta.Role,
90 | Content: choice.Delta.Content,
91 | },
92 | Logprobs: nil,
93 | FinishReason: nil,
94 | }
95 | }
96 |
97 | return response
98 | }
99 |
--------------------------------------------------------------------------------
/pkg/adapter/ollama_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "github.com/google/uuid"
5 | "github.com/sashabaranov/go-openai"
6 | "simple-one-api/pkg/llm/ollama"
7 | myopenai "simple-one-api/pkg/openai"
8 | "simple-one-api/pkg/utils"
9 | )
10 |
11 | const (
12 | jsonFormat = "json"
13 | textFormat = "text"
14 | stopFinish = "stop"
15 | lengthFinish = "length"
16 | )
17 |
18 | func OpenAIRequestToOllamaRequest(oaiReq *openai.ChatCompletionRequest) *ollama.ChatRequest {
19 | messages := make([]ollama.Message, len(oaiReq.Messages))
20 | for i, msg := range oaiReq.Messages {
21 | messages[i] = ollama.Message{
22 | Role: msg.Role,
23 | Content: msg.Content,
24 | }
25 | }
26 |
27 | options := ollama.AdvancedModelOptions{
28 | Temperature: oaiReq.Temperature,
29 | TopP: oaiReq.TopP,
30 | NumPredict: oaiReq.MaxTokens,
31 | }
32 |
33 | return &ollama.ChatRequest{
34 | Model: oaiReq.Model,
35 | Messages: messages,
36 | Stream: oaiReq.Stream,
37 | Options: options,
38 | Format: getFormat(oaiReq.ResponseFormat),
39 | }
40 | }
41 |
42 | func getFormat(format *openai.ChatCompletionResponseFormat) string {
43 | if format == nil {
44 | return ""
45 | }
46 |
47 | switch format.Type {
48 | case openai.ChatCompletionResponseFormatTypeJSONObject:
49 | return jsonFormat
50 | case openai.ChatCompletionResponseFormatTypeText:
51 | return textFormat
52 | default:
53 | return ""
54 | }
55 | }
56 |
57 | func OllamaResponseToOpenAIResponse(resp *ollama.ChatResponse) *myopenai.OpenAIResponse {
58 | if resp == nil {
59 | return nil
60 | }
61 |
62 | choices := []myopenai.Choice{
63 | {
64 | Index: 0,
65 | Message: myopenai.ResponseMessage{
66 | Role: resp.Message.Role,
67 | Content: resp.Message.Content,
68 | },
69 | //FinishReason: determineFinishReason(resp.Done),
70 | },
71 | }
72 |
73 | usage := &myopenai.Usage{
74 | PromptTokens: resp.PromptEvalCount,
75 | CompletionTokens: resp.EvalCount,
76 | TotalTokens: resp.PromptEvalCount + resp.EvalCount,
77 | }
78 |
79 | timeCreate, _ := utils.ParseRFC3339NanoToUnixTime(resp.CreatedAt)
80 |
81 | return &myopenai.OpenAIResponse{
82 | ID: uuid.New().String(),
83 | Created: timeCreate,
84 | Model: resp.Model,
85 | Choices: choices,
86 | Usage: usage,
87 | }
88 | }
89 |
90 | func determineFinishReason(done bool) string {
91 | if done {
92 | return stopFinish
93 | }
94 | return lengthFinish
95 | }
96 |
97 | func OllamaResponseToOpenAIStreamResponse(resp *ollama.ChatResponse) *myopenai.OpenAIStreamResponse {
98 | if resp == nil {
99 | return nil
100 | }
101 |
102 | //log.Println(resp.Message.Role, resp.Message.Content)
103 | choices := []myopenai.OpenAIStreamResponseChoice{
104 | {
105 | Index: 0,
106 | Delta: myopenai.ResponseDelta{
107 | Role: resp.Message.Role,
108 | Content: resp.Message.Content,
109 | },
110 | },
111 | }
112 |
113 | usage := &myopenai.Usage{
114 | PromptTokens: resp.PromptEvalCount,
115 | CompletionTokens: resp.EvalCount,
116 | TotalTokens: resp.PromptEvalCount + resp.EvalCount,
117 | }
118 |
119 | timeCreate, _ := utils.ParseRFC3339NanoToUnixTime(resp.CreatedAt)
120 |
121 | return &myopenai.OpenAIStreamResponse{
122 | ID: uuid.New().String(),
123 | Created: timeCreate,
124 | Model: resp.Model,
125 | Choices: choices,
126 | Usage: usage,
127 | }
128 | }
129 |
--------------------------------------------------------------------------------
/pkg/adapter/openai_openai.go:
--------------------------------------------------------------------------------
1 | package adapter
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/google/uuid"
6 | "github.com/sashabaranov/go-openai"
7 | "simple-one-api/pkg/mycomdef"
8 | myopenai "simple-one-api/pkg/openai"
9 | "strings"
10 | )
11 |
12 | func CheckOpenAIStreamRespone(respStream *openai.ChatCompletionStreamResponse) {
13 | for i := range respStream.Choices {
14 | if respStream.Choices[i].Delta.Role == "" {
15 | respStream.Choices[i].Delta.Role = mycomdef.KEYNAME_ASSISTANT
16 | }
17 | }
18 | }
19 |
20 | func OpenAIResponseToOpenAIResponse(resp *openai.ChatCompletionResponse) *myopenai.OpenAIResponse {
21 | if resp == nil {
22 | return nil
23 | }
24 |
25 | var choices []myopenai.Choice
26 | for _, choice := range resp.Choices {
27 | role := choice.Message.Role
28 | if role == "" {
29 | role = mycomdef.KEYNAME_ASSISTANT
30 | }
31 | message := myopenai.ResponseMessage{
32 | Role: role,
33 | Content: choice.Message.Content,
34 | }
35 | var logProbs json.RawMessage
36 | if choice.LogProbs != nil {
37 | logProbs, _ = json.Marshal(choice.LogProbs)
38 | }
39 | choices = append(choices, myopenai.Choice{
40 | Index: choice.Index,
41 | Message: message,
42 | LogProbs: &logProbs,
43 | FinishReason: string(choice.FinishReason),
44 | })
45 | }
46 |
47 | usage := myopenai.Usage{
48 | PromptTokens: resp.Usage.PromptTokens,
49 | CompletionTokens: resp.Usage.CompletionTokens,
50 | TotalTokens: resp.Usage.TotalTokens,
51 | }
52 |
53 | idStr := resp.ID
54 | if idStr == "" {
55 | idStr = uuid.New().String()
56 | }
57 | return &myopenai.OpenAIResponse{
58 | ID: idStr,
59 | Object: resp.Object,
60 | Created: resp.Created,
61 | Model: resp.Model,
62 | SystemFingerprint: resp.SystemFingerprint,
63 | Choices: choices,
64 | Usage: &usage,
65 | }
66 | }
67 |
68 | // OpenAIMultiContentRequestToOpenAIContentResponse 转换含多内容消息的请求到单内容响应。
69 | func OpenAIMultiContentRequestToOpenAIContentRequest(oaiReq *openai.ChatCompletionRequest) {
70 | for i := range oaiReq.Messages {
71 | msg := &oaiReq.Messages[i]
72 | //mylog.Logger.Info("1")
73 | if len(msg.MultiContent) > 0 && msg.Content == "" {
74 | //mylog.Logger.Info("2")
75 | for _, content := range msg.MultiContent {
76 | //mylog.Logger.Info(content.Text)
77 | if content.Type == openai.ChatMessagePartTypeText {
78 | msg.Content += content.Text
79 | } else if content.Type == openai.ChatMessagePartTypeImageURL {
80 | if strings.HasPrefix(content.ImageURL.URL, "http") {
81 | msg.Content += "\n" + content.ImageURL.URL
82 | }
83 | }
84 | }
85 | msg.MultiContent = nil
86 | }
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/apis/models_handler.go:
--------------------------------------------------------------------------------
1 | package apis
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "net/http"
6 | "simple-one-api/pkg/config"
7 | "sort"
8 | "time"
9 | )
10 |
11 | type Model struct {
12 | ID string `json:"id"`
13 | Object string `json:"object"`
14 | Created int64 `json:"created"`
15 | OwnedBy string `json:"owned_by"`
16 | }
17 |
18 | func ModelsHandler(c *gin.Context) {
19 | var models []Model
20 | keys := make([]string, 0, len(config.ModelToService))
21 |
22 | for k := range config.SupportModels {
23 | keys = append(keys, k)
24 | }
25 | sort.Strings(keys) // 对keys进行排序
26 |
27 | t := time.Now()
28 | for _, k := range keys {
29 | models = append(models, Model{
30 | ID: k,
31 | Object: "model",
32 | Created: t.Unix(),
33 | OwnedBy: "openai",
34 | })
35 | }
36 |
37 | if len(models) > 0 {
38 | models = append(models, Model{
39 | ID: "random",
40 | Object: "model",
41 | Created: t.Unix(),
42 | OwnedBy: "openai",
43 | })
44 | }
45 |
46 | if len(models) == 0 {
47 | c.IndentedJSON(http.StatusNotFound, gin.H{"error": "No models found"})
48 | return
49 | }
50 | c.IndentedJSON(http.StatusOK, gin.H{
51 | "object": "list",
52 | "data": models,
53 | })
54 | }
55 |
56 | // RetrieveModelHandler RetrieveModelHandler用于根据模型ID检索模型信息
57 | func RetrieveModelHandler(c *gin.Context) {
58 | modelID := c.Param("model") // 从路径中获取模型ID
59 |
60 | if _, found := config.ModelToService[modelID]; found {
61 | model := Model{
62 | ID: "gpt-3.5-turbo-instruct",
63 | Object: "model",
64 | Created: time.Now().Unix(),
65 | OwnedBy: "openai",
66 | }
67 | c.IndentedJSON(http.StatusOK, model)
68 | return
69 | }
70 |
71 | c.IndentedJSON(http.StatusNotFound, gin.H{"error": "Model not found"})
72 | }
73 |
--------------------------------------------------------------------------------
/pkg/apis/text2speech/text2speech_handler.go:
--------------------------------------------------------------------------------
1 | package text2speech
2 |
3 | import (
4 | "fmt"
5 | "github.com/gin-gonic/gin"
6 | "net/http"
7 | )
8 |
9 | // CreateSpeechHandler 处理生成音频的请求
10 | func CreateSpeechHandler(c *gin.Context) {
11 | var requestBody struct {
12 | Model string `json:"model" binding:"required"`
13 | Input string `json:"input" binding:"required"`
14 | Voice string `json:"voice" binding:"required"`
15 | ResponseFormat string `json:"response_format"`
16 | Speed float64 `json:"speed"`
17 | }
18 | if err := c.ShouldBindJSON(&requestBody); err != nil {
19 | c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
20 | return
21 | }
22 |
23 | // 生成模拟响应
24 | response := fmt.Sprintf("模拟响应:使用模型 '%s' 和声音 '%s' 生成音频。文本内容为 '%s'。", requestBody.Model, requestBody.Voice, requestBody.Input)
25 |
26 | // 返回描述性文本
27 | c.JSON(http.StatusOK, gin.H{"message": response})
28 | }
29 |
--------------------------------------------------------------------------------
/pkg/config/conf_def.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | var ServiceTimeOut int = 30
4 |
5 | var PROXY_STRATEGY_FORCEALL = "force_all"
6 | var PROXY_STRATEGY_ALL = "all"
7 | var PROXY_STRATEGY_DEFAULT = "default"
8 | var PROXY_STRATEGY_DISABLED = "disabled"
9 |
--------------------------------------------------------------------------------
/pkg/config/config_json_check.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "bytes"
5 | "strings"
6 | )
7 |
8 | func FindLineAndCharacter(data []byte, offset int) (int, int) {
9 | lines := bytes.Split(data, []byte{'\n'})
10 | lineNumber := 1
11 | characterPosition := offset
12 |
13 | for _, line := range lines {
14 | if len(line)+1 < characterPosition { // +1 是因为换行符
15 | lineNumber++
16 | characterPosition -= len(line) + 1
17 | } else {
18 | break
19 | }
20 | }
21 |
22 | return lineNumber, characterPosition
23 | }
24 |
25 | // getErrorContext 获取错误上下文的文本
26 | func GetErrorContext(data []byte, offset int) string {
27 | start := offset - 20 // 显示错误位置前后的文本
28 | end := offset + 20
29 |
30 | if start < 0 {
31 | start = 0
32 | }
33 | if end > len(data) {
34 | end = len(data)
35 | }
36 |
37 | return strings.TrimSpace(string(data[start:end]))
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/config/config_keyname.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | const KEYNAME_API_KEY = "api_key"
4 | const KEYNAME_TOKEN = "token"
5 | const KEYNAME_SECRET_ID = "secret_id"
6 | const KEYNAME_SECRET_KEY = "secret_key"
7 | const KEYNAME_GROUP_ID = "group_id"
8 | const KEYNAME_APPID = "appid"
9 | const KEYNAME_API_SECRET = "api_secret"
10 | const KEYNAME_DOMAIN = "domain"
11 | const KEYNAME_ACCESS_KEY = "access_key"
12 | const KEYNAME_ADDRESSS = "addresss"
13 |
14 | const KEYNAME_GCP_PROJECT_ID = "project_id"
15 | const KEYNAME_GCP_LOCATION = "location"
16 | const KEYNAME_GCP_MODEL_ID = "model_id"
17 | const KEYNAME_GCP_JSON_FILE = "json_file"
18 |
19 | const KEYNAME_RANDOM = "random"
20 | const KEYNAME_ALL = "all"
21 |
--------------------------------------------------------------------------------
/pkg/config/default_config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | var DefaultSupportModelMap = map[string][]string{
4 | "qianfan": {"yi_34b_chat", "ERNIE-Speed-8K", "ERNIE-Speed-128K", "ERNIE-Lite-8K", "ERNIE-Lite-8K-0922", "ERNIE-Tiny-8K"},
5 | "hunyuan": {"hunyuan-lite", "hunyuan-standard", "hunyuan-standard-256K", "hunyuan-pro"},
6 | "xinghuo": {"spark-lite", "spark-v2.0", "spark-pro", "spark-max"},
7 | "deepseek": {"deepseek-chat", "deepseek-coder"},
8 | "zhipu": {"glm-3-turbo", "glm-4-0520", "glm-4", "glm-4-air", "glm-4-airx", "glm-4-flash", "glm-4v"},
9 | "minimax": {"abab6.5", "abab6.5s", "abab6.5t", "abab6.5g", "abab5.5s"},
10 | "huoshan": {"Doubao-pro-4k", "Doubao-pro-32k", "Doubao-pro-128k", "Doubao-lite-4k", "Doubao-lite-32k", "Doubao-lite-128k"},
11 | "gemini": {"gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.0-pro", "gemini-pro-vision"},
12 | "groq": {"llama3-70b-8192", "llama3-8b-8192", "gemma-7b-it", "mixtral-8x7b-32768"},
13 | "aliyun": {"qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext"},
14 | }
15 |
--------------------------------------------------------------------------------
/pkg/config/lb_strategy.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "hash/fnv"
5 | "math/rand"
6 | "simple-one-api/pkg/mycomdef"
7 | "strings"
8 | "sync"
9 | "sync/atomic"
10 | "time"
11 | )
12 |
13 | var (
14 | rrIndices = make(map[string]*uint32)
15 | randLock = &sync.Mutex{}
16 | modelLock = &sync.RWMutex{}
17 | )
18 |
19 | func getRandomIndex(n int) int {
20 | randLock.Lock()
21 | defer randLock.Unlock()
22 | return rand.Intn(n)
23 | }
24 |
25 | func getRoundRobinIndex(modelName string, n int) int {
26 | modelLock.RLock()
27 | idx, exists := rrIndices[modelName]
28 | modelLock.RUnlock()
29 |
30 | if !exists {
31 | modelLock.Lock()
32 | if idx, exists = rrIndices[modelName]; !exists { // double check locking
33 | var newIndex uint32 = 0
34 | rrIndices[modelName] = &newIndex
35 | idx = &newIndex
36 | }
37 | modelLock.Unlock()
38 | }
39 |
40 | // Increment index atomically and get the server
41 | newIdx := atomic.AddUint32(idx, 1)
42 | return int(newIdx) % n
43 | }
44 |
45 | func getHashIndex(key string, n int) int {
46 | // 包含到毫秒的时间戳
47 | timestamp := time.Now().Format("2006-01-02 15:04:05.999")
48 | h := fnv.New32a()
49 | h.Write([]byte(key + timestamp))
50 | return int(h.Sum32()) % n
51 | }
52 |
53 | func GetLBIndex(lbStrategy string, key string, length int) int {
54 | lbs := strings.ToLower(lbStrategy)
55 | switch lbs {
56 | case mycomdef.KEYNAME_FIRST:
57 | return 0
58 | case mycomdef.KEYNAME_RANDOM, mycomdef.KEYNAME_RAND:
59 | return getRandomIndex(length)
60 | case mycomdef.KEYNAME_ROUND_ROBIN, mycomdef.KEYNAME_RR:
61 | return getRoundRobinIndex(key, length)
62 | case mycomdef.KEYNAME_HASH:
63 | return getHashIndex(key, length)
64 | default:
65 | return getRandomIndex(length)
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/pkg/config/proxy_strategy.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "golang.org/x/net/proxy"
8 | "net"
9 | "net/http"
10 | "net/url"
11 | "strings"
12 | "time"
13 | )
14 |
15 | // 定义代理类型常量
16 | const (
17 | ProxyTypeHTTP = "http"
18 | ProxyTypeSOCKS5 = "socks5"
19 | )
20 |
21 | // GetHttpProxyTransport 返回配置了 HTTP 代理的 http.Transport
22 | func getHttpProxyTransport(proxyURL string, timeout int) (*http.Transport, error) {
23 | parsedProxyURL, err := url.Parse(proxyURL)
24 | if err != nil {
25 | return nil, fmt.Errorf("error parsing proxy URL %s: %v", proxyURL, err)
26 | }
27 |
28 | transport := &http.Transport{
29 | Proxy: http.ProxyURL(parsedProxyURL),
30 | DialContext: (&net.Dialer{
31 | Timeout: time.Duration(timeout) * time.Second,
32 | KeepAlive: time.Duration(timeout) * time.Second,
33 | }).DialContext,
34 | }
35 |
36 | return transport, nil
37 | }
38 |
39 | // GetSocks5Transport 返回配置了 SOCKS5 代理的 http.Transport,并设置超时时间
40 | func getSocks5Transport(proxyAddr string, timeout int) (*http.Transport, error) {
41 | dialer, err := proxy.SOCKS5("tcp", proxyAddr, nil, proxy.Direct)
42 | if err != nil {
43 | return nil, fmt.Errorf("error creating SOCKS5 proxy at %s: %v", proxyAddr, err)
44 | }
45 |
46 | dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) {
47 | conn, err := dialer.Dial(network, addr)
48 | if err != nil {
49 | return nil, err
50 | }
51 | return conn, nil
52 | }
53 |
54 | transport := &http.Transport{
55 | DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
56 | ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
57 | defer cancel()
58 | return dialContext(ctx, network, addr)
59 | },
60 | }
61 |
62 | return transport, nil
63 | }
64 |
65 | // GetTypeProxyTransport 根据代理类型返回相应的 http.Transport
66 | func GetTypeProxyTransport(proxyType, proxyAddr string, timeout int) (*http.Transport, error) {
67 | switch proxyType {
68 | case ProxyTypeHTTP:
69 | return getHttpProxyTransport(proxyAddr, timeout)
70 | case ProxyTypeSOCKS5:
71 | return getSocks5Transport(proxyAddr, timeout)
72 | default:
73 | return nil, errors.New("unsupported proxy type: " + proxyType)
74 | }
75 | }
76 |
77 | // GetConfProxyTransport 根据全局配置返回相应的 http.Transport
78 | func GetConfProxyTransport() (string, string, *http.Transport, error) {
79 | proxyType := strings.ToLower(GProxyConf.Type)
80 | var proxyAddr string
81 | var transport *http.Transport
82 | var err error
83 |
84 | timeout := GProxyConf.Timeout
85 | if timeout <= 0 {
86 | timeout = 30
87 | }
88 |
89 | switch proxyType {
90 | case ProxyTypeHTTP:
91 | proxyAddr = GProxyConf.HTTPProxy
92 | transport, err = getHttpProxyTransport(proxyAddr, timeout)
93 | case ProxyTypeSOCKS5:
94 | if len(GProxyConf.Socks5Proxy) >= 7 && GProxyConf.Socks5Proxy[:7] == "socks5:" {
95 | proxyURL, err := url.Parse(GProxyConf.Socks5Proxy)
96 | if err != nil {
97 | return "", "", nil, errors.New(fmt.Sprintf("error parsing proxy URL: %v\n", err))
98 | }
99 | proxyAddr = proxyURL.Host
100 | } else {
101 | proxyAddr = GProxyConf.Socks5Proxy
102 | }
103 |
104 | transport, err = getSocks5Transport(proxyAddr, timeout)
105 | default:
106 | return "", "", nil, errors.New("unsupported proxy type: " + proxyType)
107 | }
108 |
109 | return proxyType, proxyAddr, transport, err
110 | }
111 |
--------------------------------------------------------------------------------
/pkg/embedding/baiduqianfan/qianfan_embedding.go:
--------------------------------------------------------------------------------
1 | package baiduqianfan
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "github.com/baidubce/bce-qianfan-sdk/go/qianfan"
8 | "github.com/sashabaranov/go-openai"
9 | "io"
10 | "net/http"
11 | "simple-one-api/pkg/embedding/oai"
12 | baidu_qianfan "simple-one-api/pkg/llm/baidu-qianfan"
13 | "time"
14 | )
15 |
16 | func convertOpenAIEmbeddingRequestToBaiduEmbeddingRequest(src *oai.EmbeddingRequest) *qianfan.EmbeddingRequest {
17 | var inputs []string
18 | switch v := src.Input.(type) {
19 | case string:
20 | inputs = []string{v}
21 | case []string:
22 | inputs = v
23 | case []any:
24 | for _, item := range v {
25 | if str, ok := item.(string); ok {
26 | inputs = append(inputs, str)
27 | }
28 | }
29 | default:
30 | fmt.Println("Unsupported input type")
31 | return nil
32 | }
33 |
34 | return &qianfan.EmbeddingRequest{
35 | Input: inputs,
36 | UserID: src.User,
37 | }
38 | }
39 |
40 | func convertBaiduEmbeddingResponseToOpenAIEmbeddingResponse(src *qianfan.EmbeddingResponse) *oai.EmbeddingResponse {
41 | var data []openai.Embedding
42 | for _, d := range src.Data {
43 | // 将浮点数从 float64 转为 float32
44 | embedding := make([]float32, len(d.Embedding))
45 | for i, val := range d.Embedding {
46 | embedding[i] = float32(val)
47 | }
48 |
49 | data = append(data, openai.Embedding{
50 | Object: d.Object,
51 | Embedding: embedding,
52 | Index: d.Index,
53 | })
54 | }
55 |
56 | return &oai.EmbeddingResponse{
57 | Object: src.Object,
58 | Data: data,
59 | //Model: src.Id,
60 | Usage: openai.Usage{
61 | PromptTokens: src.Usage.PromptTokens,
62 | TotalTokens: src.Usage.TotalTokens,
63 | },
64 | }
65 | }
66 |
67 | func getBaiduEmbeddings(request *oai.EmbeddingRequest, accessToken string, proxyTransport *http.Transport) (*oai.EmbeddingResponse, error) {
68 | requestURL := fmt.Sprintf("https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1?access_token=%s", accessToken)
69 |
70 | bdReq := convertOpenAIEmbeddingRequestToBaiduEmbeddingRequest(request)
71 |
72 | jsonData, err := json.Marshal(bdReq)
73 | if err != nil {
74 | return nil, err
75 | }
76 |
77 | req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(jsonData))
78 | if err != nil {
79 | return nil, err
80 | }
81 |
82 | req.Header.Set("Content-Type", "application/json")
83 |
84 | var client *http.Client
85 | if proxyTransport != nil {
86 | client = &http.Client{
87 | Timeout: 60 * time.Second,
88 | Transport: proxyTransport,
89 | }
90 | } else {
91 | client = &http.Client{
92 | Timeout: 60 * time.Second,
93 | Transport: http.DefaultTransport, // 使用默认的 Transport
94 | }
95 | }
96 | res, err := client.Do(req)
97 | if err != nil {
98 | return nil, err
99 | }
100 | defer res.Body.Close()
101 |
102 | body, err := io.ReadAll(res.Body)
103 | if err != nil {
104 | return nil, err
105 | }
106 |
107 | var embeddingRes qianfan.EmbeddingResponse
108 | if err := json.Unmarshal(body, &embeddingRes); err != nil {
109 | return nil, err
110 | }
111 |
112 | return convertBaiduEmbeddingResponseToOpenAIEmbeddingResponse(&embeddingRes), nil
113 | }
114 | func BaiduQianfanEmbedding(req *oai.EmbeddingRequest, accessKey string, secretKey string, proxyTransport *http.Transport) (*oai.EmbeddingResponse, error) {
115 |
116 | accessToken := baidu_qianfan.GetAccessToken(accessKey, secretKey)
117 |
118 | return getBaiduEmbeddings(req, accessToken, proxyTransport)
119 | }
120 |
--------------------------------------------------------------------------------
/pkg/embedding/oai/oai_embedding.go:
--------------------------------------------------------------------------------
1 | package oai
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "time"
10 | )
11 |
12 | // GenerateEmbedding 生成文本的嵌入向量
13 | func OpenAIEmbedding(embReq *EmbeddingRequest, apiKey string, proxyTransport *http.Transport) (*EmbeddingResponse, error) {
14 |
15 | url := "https://api.openai.com/v1/embeddings"
16 | requestBody, err := json.Marshal(embReq)
17 | if err != nil {
18 | return nil, fmt.Errorf("JSON 编码错误: %v", err)
19 | }
20 |
21 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(requestBody))
22 | if err != nil {
23 | return nil, fmt.Errorf("创建请求错误: %v", err)
24 | }
25 |
26 | req.Header.Set("Content-Type", "application/json")
27 | req.Header.Set("Authorization", "Bearer "+apiKey)
28 |
29 | var client *http.Client
30 | if proxyTransport != nil {
31 | client = &http.Client{
32 | Timeout: 60 * time.Second,
33 | Transport: proxyTransport,
34 | }
35 | } else {
36 | client = &http.Client{
37 | Timeout: 60 * time.Second,
38 | Transport: http.DefaultTransport, // 使用默认的 Transport
39 | }
40 | }
41 | resp, err := client.Do(req)
42 | if err != nil {
43 | return nil, fmt.Errorf("请求错误: %v", err)
44 | }
45 | defer resp.Body.Close()
46 |
47 | body, err := io.ReadAll(resp.Body)
48 | if err != nil {
49 | return nil, fmt.Errorf("读取响应错误: %v", err)
50 | }
51 |
52 | var response EmbeddingResponse
53 | err = json.Unmarshal(body, &response)
54 | if err != nil {
55 | return nil, err
56 | }
57 |
58 | return &response, nil
59 | }
60 |
--------------------------------------------------------------------------------
/pkg/embedding/oai/oai_embedding_message.go:
--------------------------------------------------------------------------------
1 | package oai
2 |
3 | import (
4 | "github.com/sashabaranov/go-openai"
5 | )
6 |
7 | type EmbeddingRequest struct {
8 | Input any `json:"input"`
9 | Model string `json:"model"`
10 | User string `json:"user,omitempty"`
11 | EncodingFormat openai.EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
12 | // Dimensions The number of dimensions the resulting output embeddings should have.
13 | // Only supported in text-embedding-3 and later models.
14 | Dimensions int `json:"dimensions,omitempty"`
15 | }
16 |
17 | type EmbeddingResponse struct {
18 | Object string `json:"object"`
19 | Data []openai.Embedding `json:"data"`
20 | Model string `json:"model"`
21 | Usage openai.Usage `json:"usage"`
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/handler/openai_agentbuilder_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/gin-gonic/gin"
6 | "go.uber.org/zap"
7 | "log"
8 | "net/http"
9 | "simple-one-api/pkg/adapter/baidu_agentbuilder_adapter"
10 | "simple-one-api/pkg/config"
11 | "simple-one-api/pkg/llm/devplatform/baidu_agentbuilder"
12 | "simple-one-api/pkg/mycommon"
13 | "simple-one-api/pkg/mylog"
14 | "simple-one-api/pkg/utils"
15 | )
16 |
17 | func OpenAI2AgentBuilderHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
18 | oaiReq := oaiReqParam.chatCompletionReq
19 | //s := oaiReqParam.modelDetails
20 | credentials := oaiReqParam.creds
21 | secretKey, _ := utils.GetStringFromMap(credentials, config.KEYNAME_SECRET_KEY)
22 |
23 | query := mycommon.GetLastestMessage(oaiReq.Messages)
24 |
25 | if oaiReq.Stream {
26 | cb := func(data string) {
27 | log.Println(data)
28 | var resp baidu_agentbuilder.ConversationResponse
29 | err := json.Unmarshal([]byte(data), &resp)
30 | if err != nil {
31 | mylog.Logger.Error("An error occurred",
32 | zap.Error(err)) // 记录错误对象
33 | return
34 | }
35 |
36 | oaiRespStream := baidu_agentbuilder_adapter.AgentBuilderResponseToOpenAIStreamResponse(&resp)
37 |
38 | oaiRespStream.Model = oaiReq.Model
39 |
40 | respData, err := json.Marshal(&oaiRespStream)
41 | if err != nil {
42 | mylog.Logger.Error("Error marshaling response:", zap.Error(err))
43 | return
44 | }
45 |
46 | // 假设 mylog.Logger 是一个已经配置好的 zap.Logger 实例
47 | mylog.Logger.Info("Response HTTP data",
48 | zap.String("data", string(respData))) // 记录响应数据
49 |
50 | _, err = c.Writer.WriteString("data: " + string(respData) + "\n\n")
51 | if err != nil {
52 | // 假设 mylog.Logger 是一个已经配置好的 zap.Logger 实例
53 | mylog.Logger.Error("An error occurred",
54 | zap.Error(err)) // 记录错误对象
55 |
56 | return
57 | }
58 | c.Writer.(http.Flusher).Flush()
59 | }
60 |
61 | err := baidu_agentbuilder.Conversation(oaiReq.Model, secretKey, query, cb)
62 | if err != nil {
63 | // 假设 mylog.Logger 是一个已经配置好的 zap.Logger 实例
64 | mylog.Logger.Error("OpenAI2AgentBuilderHandler|baidu_agentbuilder.Conversation",
65 | zap.Error(err)) // 记录错误对象
66 |
67 | return err
68 | }
69 |
70 | } else {
71 | abResp, err := baidu_agentbuilder.GetAnswer(oaiReq.Model, secretKey, query)
72 | if err != nil {
73 |
74 | return err
75 | }
76 |
77 | oaiResp := baidu_agentbuilder_adapter.AgentBuilderResponseToOpenAIResponse(abResp)
78 |
79 | oaiResp.Model = oaiReq.Model
80 |
81 | // 假设 mylog.Logger 是一个已经配置好的 zap.Logger 实例
82 | mylog.Logger.Info("Standard response",
83 | zap.Any("response", *oaiResp)) // 记录响应对象
84 |
85 | c.JSON(http.StatusOK, oaiResp)
86 |
87 | }
88 |
89 | return nil
90 | }
91 |
--------------------------------------------------------------------------------
/pkg/handler/openai_aliyun_bailian_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import "github.com/gin-gonic/gin"
4 |
5 | func OpenAI2AliyunBaiLianHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
6 | return nil
7 | }
8 |
--------------------------------------------------------------------------------
/pkg/handler/openai_azure_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "context"
5 | "github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai"
6 | "github.com/Azure/azure-sdk-for-go/sdk/azcore"
7 | "github.com/gin-gonic/gin"
8 | "log"
9 | "net/http"
10 | "net/url"
11 | "simple-one-api/pkg/adapter"
12 | "simple-one-api/pkg/config"
13 | "simple-one-api/pkg/utils"
14 | )
15 |
16 | func formatAzureURL(inputURL string) (string, error) {
17 | // 解析URL
18 | parsedURL, err := url.Parse(inputURL)
19 | if err != nil {
20 | return "", err
21 | }
22 |
23 | // 构建新的URL
24 | formattedURL := &url.URL{
25 | Scheme: parsedURL.Scheme,
26 | Host: parsedURL.Host,
27 | }
28 |
29 | return formattedURL.String(), nil
30 | }
31 |
32 | // OpenAI2AzureOpenAIHandler handles OpenAI to Azure OpenAI requests
33 | func OpenAI2AzureOpenAIHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
34 | req := oaiReqParam.chatCompletionReq
35 | s := oaiReqParam.modelDetails
36 |
37 | apiKey, _ := utils.GetStringFromMap(s.Credentials, config.KEYNAME_API_KEY)
38 | serverURL, err := formatAzureURL(s.ServerURL)
39 | if err != nil {
40 | serverURL = s.ServerURL
41 | }
42 |
43 | clientModel := oaiReqParam.ClientModel
44 |
45 | log.Println(req, apiKey, serverURL, clientModel)
46 |
47 | keyCredential := azcore.NewKeyCredential(apiKey)
48 | client, err := azopenai.NewClientWithKeyCredential(serverURL, keyCredential, nil)
49 |
50 | azureReq := adapter.OpenAIRequestToAzureRequest(req)
51 |
52 | resp, err := client.GetChatCompletions(context.TODO(), *azureReq, nil)
53 |
54 | myresp := adapter.AzureResponseToOpenAIResponse(&resp)
55 |
56 | c.JSON(http.StatusOK, myresp)
57 |
58 | return nil
59 | }
60 |
--------------------------------------------------------------------------------
/pkg/handler/openai_dify_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/gin-gonic/gin"
6 | "github.com/google/uuid"
7 | "go.uber.org/zap"
8 | "net/http"
9 | "simple-one-api/pkg/adapter"
10 | "simple-one-api/pkg/config"
11 | "simple-one-api/pkg/llm/devplatform/dify/chat_message_request"
12 | "simple-one-api/pkg/llm/devplatform/dify/chunk_chat_completion_response"
13 | "simple-one-api/pkg/mylog"
14 | myopenai "simple-one-api/pkg/openai"
15 | "simple-one-api/pkg/utils"
16 | "time"
17 | )
18 |
19 | func OpenAI2DifyHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
20 | oaiReq := oaiReqParam.chatCompletionReq
21 | difyReq := adapter.OpenAIRequestToDifyRequest(oaiReqParam.chatCompletionReq)
22 | credentials := oaiReqParam.creds
23 |
24 | respID := uuid.New().String()
25 |
26 | apiKey, _ := utils.GetStringFromMap(credentials, config.KEYNAME_API_KEY)
27 |
28 | if oaiReq.Stream == false {
29 |
30 | difyResp, err := chat_message_request.CallChatMessagesNoneStreamMode(difyReq, apiKey, nil)
31 | if err != nil {
32 | mylog.Logger.Error(err.Error())
33 | return err
34 | }
35 |
36 | // 转换响应
37 | myresp := adapter.DifyResponseToOpenAIResponse(difyResp)
38 | myresp.Model = oaiReqParam.ClientModel
39 |
40 | c.JSON(http.StatusOK, myresp)
41 |
42 | return nil
43 | }
44 |
45 | // 流式处理
46 | cb := func(eventData string) {
47 | mylog.Logger.Debug("Received event: " + eventData)
48 | var commonEvent chunk_chat_completion_response.CommonEvent
49 | if err := json.Unmarshal([]byte(eventData), &commonEvent); err != nil {
50 | mylog.Logger.Error("Error parsing common event: " + err.Error())
51 | return
52 | }
53 |
54 | // 处理不同的事件类型
55 | if err := processEvent(c, eventData, oaiReqParam, commonEvent.Event, respID); err != nil {
56 | mylog.Logger.Error("Error processing event: " + err.Error())
57 | return
58 | }
59 | }
60 |
61 | // 调用流式接口
62 | if err := chat_message_request.CallChatMessagesStreamMode(difyReq, apiKey, cb, oaiReqParam.httpTransport); err != nil {
63 | mylog.Logger.Error(err.Error())
64 | return err
65 | }
66 |
67 | return nil
68 | }
69 |
70 | // 处理不同事件类型的通用函数
71 | func processEvent(c *gin.Context, eventData string, oaiReqParam *OAIRequestParam, eventType string, respID string) error {
72 | var oaiRespStream *myopenai.OpenAIStreamResponse
73 | var err error
74 |
75 | // 根据 event 类型解析对应的事件
76 | switch eventType {
77 | case "message":
78 | var messageEvent chunk_chat_completion_response.MessageEvent
79 | if err = json.Unmarshal([]byte(eventData), &messageEvent); err != nil {
80 | mylog.Logger.Error(err.Error())
81 | return err
82 | }
83 | oaiRespStream = adapter.DifyResponseToOpenAIResponseStream(&messageEvent)
84 | case "message_end":
85 | var messageEndEvent chunk_chat_completion_response.MessageEndEvent
86 | if err = json.Unmarshal([]byte(eventData), &messageEndEvent); err != nil {
87 | mylog.Logger.Error(err.Error())
88 | return err
89 | }
90 |
91 | mylog.Logger.Debug("processEvent", zap.Any("messageEndEvent", messageEndEvent))
92 | oaiRespStream = adapter.DifyMessageEndEventToOpenAIResponseStream(&messageEndEvent)
93 | default:
94 | // 如果是未知的 event 类型,可以选择忽略或记录错误
95 | mylog.Logger.Warn("Unknown event type: " + eventType)
96 | return nil
97 | }
98 |
99 | oaiRespStream.ID = respID
100 | oaiRespStream.Object = "chat.completion.chunk"
101 | oaiRespStream.Created = time.Now().Unix()
102 |
103 | // 设置模型
104 | oaiRespStream.Model = oaiReqParam.ClientModel
105 |
106 | // 将响应数据写入客户端
107 | return writeResponse(c, oaiRespStream)
108 | }
109 |
110 | // 将响应数据写入客户端的辅助函数
111 | func writeResponse(c *gin.Context, oaiRespStream interface{}) error {
112 | respData, err := json.Marshal(oaiRespStream)
113 | if err != nil {
114 | return err
115 | }
116 |
117 | mylog.Logger.Info(string(respData))
118 |
119 | _, err = c.Writer.WriteString("data: " + string(respData) + "\n\n")
120 | if err != nil {
121 | return err
122 | }
123 |
124 | // 确保响应被及时发送
125 | c.Writer.(http.Flusher).Flush()
126 | return nil
127 | }
128 |
--------------------------------------------------------------------------------
/pkg/handler/openai_groq_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "github.com/sashabaranov/go-openai"
6 | )
7 |
8 | // https://console.groq.com/docs/openai
9 | func adjustGroqReq(req *openai.ChatCompletionRequest) {
10 | req.LogProbs = false
11 | req.LogitBias = nil
12 | req.TopLogProbs = 0
13 | if req.N != 0 {
14 | req.N = 1
15 | }
16 |
17 | if req.Temperature <= 0 {
18 | req.Temperature = 0.1
19 | }
20 |
21 | if req.Temperature > 2 {
22 | req.Temperature = 2
23 | }
24 | }
25 |
26 | // OpenAI2GroqOpenAIHandler handles OpenAI to Azure OpenAI requests
27 | func OpenAI2GroqOpenAIHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
28 | req := oaiReqParam.chatCompletionReq
29 | s := oaiReqParam.modelDetails
30 | //credentials := oaiReqParam.creds
31 | conf, err := getConfig(s, oaiReqParam)
32 | if err != nil {
33 | return err
34 | }
35 |
36 | adjustGroqReq(req)
37 |
38 | clientModel := oaiReqParam.ClientModel
39 | return handleOpenAIOpenAIRequest(conf, c, req, clientModel)
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/handler/openai_hunyuan_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/gin-gonic/gin"
6 | "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
7 | "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
8 | hunyuan "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/hunyuan/v20230901"
9 | "net/http"
10 | "simple-one-api/pkg/adapter"
11 | "simple-one-api/pkg/config"
12 | "simple-one-api/pkg/mylog"
13 | "simple-one-api/pkg/utils"
14 | )
15 |
16 | func OpenAI2HunYuanHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
17 | // 创建认证对象
18 | oaiReq := oaiReqParam.chatCompletionReq
19 | //s := oaiReqParam.modelDetails
20 | credentials := oaiReqParam.creds
21 | secretId, _ := utils.GetStringFromMap(credentials, config.KEYNAME_SECRET_ID)
22 | secretKey, _ := utils.GetStringFromMap(credentials, config.KEYNAME_SECRET_KEY)
23 | credential := common.NewCredential(
24 | secretId,
25 | secretKey,
26 | )
27 |
28 | // 创建客户端配置
29 | cpf := profile.NewClientProfile()
30 | cpf.HttpProfile.Endpoint = "hunyuan.tencentcloudapi.com"
31 |
32 | // 创建HunYuan客户端
33 | client, err := hunyuan.NewClient(credential, "", cpf)
34 | if err != nil {
35 | mylog.Logger.Error(err.Error())
36 | return err
37 | }
38 |
39 | if oaiReqParam.httpTransport != nil {
40 | client.Client.WithHttpTransport(oaiReqParam.httpTransport)
41 | }
42 |
43 | // 创建HunYuan请求对象
44 | request := adapter.OpenAIRequestToHunYuanRequest(oaiReq)
45 |
46 | // 打印请求数据
47 | djData, _ := json.Marshal(request)
48 | mylog.Logger.Info(string(djData))
49 |
50 | // 发送请求并处理响应
51 | response, err := client.ChatCompletions(request)
52 | if err != nil {
53 | mylog.Logger.Error(err.Error())
54 | return err
55 | }
56 |
57 | // 处理响应数据
58 | return handleHunYuanResponse(c, response, oaiReq.Model, oaiReqParam)
59 | }
60 |
61 | // handleHunYuanResponse 处理HunYuan的响应数据
62 | func handleHunYuanResponse(c *gin.Context, response *hunyuan.ChatCompletionsResponse, model string, oaiReqParam *OAIRequestParam) error {
63 | if response.Response != nil {
64 | // 非流式响应
65 | return handleHunYuanNonStreamResponse(c, response, model, oaiReqParam)
66 | }
67 |
68 | // 流式响应
69 | utils.SetEventStreamHeaders(c)
70 | for event := range response.Events {
71 | oaiStreamResp, err := adapter.HunYuanResponseToOpenAIStreamResponse(event)
72 | if err != nil {
73 | mylog.Logger.Error(err.Error())
74 | return err
75 | }
76 | oaiStreamResp.Model = oaiReqParam.ClientModel
77 | respData, err := json.Marshal(&oaiStreamResp)
78 | if err != nil {
79 | mylog.Logger.Error(err.Error())
80 | return err
81 | }
82 | mylog.Logger.Info(string(respData))
83 | _, err = c.Writer.WriteString("data: " + string(respData) + "\n\n")
84 | if err != nil {
85 | mylog.Logger.Error(err.Error())
86 | return err
87 | }
88 | c.Writer.(http.Flusher).Flush()
89 | }
90 | return nil
91 | }
92 |
93 | // handleNonStreamResponse 处理非流式响应
94 | func handleHunYuanNonStreamResponse(c *gin.Context, response *hunyuan.ChatCompletionsResponse, model string, oaiReqParam *OAIRequestParam) error {
95 | oaiResp := adapter.HunYuanResponseToOpenAIResponse(response)
96 | oaiResp.Model = oaiReqParam.ClientModel
97 |
98 | jdata, _ := json.Marshal(*oaiResp)
99 | mylog.Logger.Info(string(jdata))
100 | c.JSON(http.StatusOK, oaiResp)
101 | return nil
102 | }
103 |
--------------------------------------------------------------------------------
/pkg/handler/openai_qianfan_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/gin-gonic/gin"
6 | "go.uber.org/zap"
7 | "net/http"
8 | "simple-one-api/pkg/adapter"
9 | "simple-one-api/pkg/config"
10 | baiduqianfan "simple-one-api/pkg/llm/baidu-qianfan"
11 | "simple-one-api/pkg/mylog"
12 | "simple-one-api/pkg/utils"
13 | )
14 |
15 | func OpenAI2QianFanHandler(c *gin.Context, oaiReqParam *OAIRequestParam) error {
16 |
17 | oaiReq := oaiReqParam.chatCompletionReq
18 | //s := oaiReqParam.modelDetails
19 | credentials := oaiReqParam.creds
20 | apiKey, _ := utils.GetStringFromMap(credentials, config.KEYNAME_API_KEY)
21 | secretKey, _ := utils.GetStringFromMap(credentials, config.KEYNAME_SECRET_KEY)
22 | configAddress, _ := utils.GetStringFromMap(credentials, config.KEYNAME_ADDRESSS)
23 | qfReq := adapter.OpenAIRequestToQianFanRequest(oaiReq)
24 |
25 | client := &http.Client{}
26 | if oaiReqParam.httpTransport != nil {
27 | client.Transport = oaiReqParam.httpTransport
28 | }
29 |
30 | clientModel := oaiReqParam.ClientModel
31 |
32 | if oaiReq.Stream {
33 | return handleQianFanStreamRequest(c, client, apiKey, secretKey, oaiReq.Model, clientModel, configAddress, qfReq)
34 | } else {
35 | return handleQianFanStandardRequest(c, client, apiKey, secretKey, oaiReq.Model, clientModel, configAddress, qfReq)
36 | }
37 | }
38 |
39 | func handleQianFanStreamRequest(c *gin.Context, client *http.Client, apiKey, secretKey, model string, clientModel string, configAddress string, qfReq *baiduqianfan.QianFanRequest) error {
40 | utils.SetEventStreamHeaders(c)
41 |
42 | err := baiduqianfan.QianFanCallSSE(client, apiKey, secretKey, model, configAddress, qfReq, func(qfResp *baiduqianfan.QianFanResponse) {
43 | oaiRespStream := adapter.QianFanResponseToOpenAIStreamResponse(qfResp)
44 | oaiRespStream.Model = clientModel
45 |
46 | respData, err := json.Marshal(&oaiRespStream)
47 | if err != nil {
48 | mylog.Logger.Error("Error marshaling response",
49 | zap.Error(err)) // 记录错误对象
50 |
51 | return
52 | }
53 |
54 | mylog.Logger.Info("Response HTTP data",
55 | zap.String("http_data", string(respData))) // 记录 HTTP 响应数据
56 |
57 | if qfResp.ErrorCode != 0 && oaiRespStream.Error != nil {
58 | mylog.Logger.Error("Error response",
59 | zap.Any("error", *oaiRespStream.Error)) // 记录错误对象
60 |
61 | c.JSON(http.StatusBadRequest, qfResp)
62 | return
63 | }
64 |
65 | c.Writer.WriteString("data: " + string(respData) + "\n\n")
66 | c.Writer.(http.Flusher).Flush()
67 | })
68 |
69 | if err != nil {
70 | mylog.Logger.Error("Error during SSE call",
71 | zap.Error(err)) // 记录错误对象
72 |
73 | return err
74 | }
75 |
76 | return nil
77 | }
78 |
79 | func handleQianFanStandardRequest(c *gin.Context, client *http.Client, apiKey, secretKey, model string, clientModel string, configAddress string, qfReq *baiduqianfan.QianFanRequest) error {
80 | qfResp, err := baiduqianfan.QianFanCall(client, apiKey, secretKey, model, configAddress, qfReq)
81 | if err != nil {
82 | mylog.Logger.Error("Error during API call",
83 | zap.Error(err)) // 记录错误对象
84 |
85 | return err
86 | }
87 |
88 | oaiResp := adapter.QianFanResponseToOpenAIResponse(qfResp)
89 | oaiResp.Model = clientModel
90 | mylog.Logger.Info("Standard response",
91 | zap.Any("response", oaiResp)) // 记录标准响应对象
92 |
93 | c.JSON(http.StatusOK, oaiResp)
94 | return nil
95 | }
96 |
--------------------------------------------------------------------------------
/pkg/handler/openai_zhipu_handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "fmt"
5 | "github.com/sashabaranov/go-openai"
6 | "go.uber.org/zap"
7 | "simple-one-api/pkg/mylog"
8 | "simple-one-api/pkg/utils"
9 | "strings"
10 | )
11 |
12 | func extractBase64Data(base64Image string) (string, error) {
13 | if base64Image == "" {
14 | return "", fmt.Errorf("base64Image is empty")
15 | }
16 |
17 | parts := strings.SplitN(base64Image, ",", 2)
18 | if len(parts) != 2 {
19 | return "", fmt.Errorf("invalid base64Image format")
20 | }
21 |
22 | return parts[1], nil
23 | }
24 |
25 | func AdjustChatCompletionRequestForZhiPu(oaiReq *openai.ChatCompletionRequest) {
26 | for i := range oaiReq.Messages {
27 | msg := &oaiReq.Messages[i]
28 | if len(msg.MultiContent) > 0 {
29 | mylog.Logger.Info("2")
30 | for _, content := range msg.MultiContent {
31 | if content.Type == openai.ChatMessagePartTypeImageURL {
32 | if strings.HasPrefix(content.ImageURL.URL, "data:image/") {
33 | encodedData, err := extractBase64Data(content.ImageURL.URL)
34 | if err != nil {
35 | debugstr := content.ImageURL.URL[:utils.Min(len(content.ImageURL.URL), 10)] + "..."
36 | mylog.Logger.Warn("base64 format err", zap.String("ImageURL.URL", debugstr))
37 | continue
38 | }
39 |
40 | content.ImageURL.URL = encodedData
41 | }
42 | }
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/pkg/initializer/initializer.go:
--------------------------------------------------------------------------------
1 | // pkg/initializer/initializer.go
2 | package initializer
3 |
4 | import (
5 | "github.com/gin-gonic/gin"
6 | "log"
7 | "simple-one-api/pkg/config"
8 | "simple-one-api/pkg/mylog"
9 | "sync"
10 | )
11 |
12 | var once sync.Once
13 |
14 | // Setup initializes the configuration and logging system.
15 | func Setup(configName string) error {
16 | var err error
17 | once.Do(func() {
18 | err = config.InitConfig(configName)
19 | if err != nil {
20 | log.Println("Error initializing config:", err)
21 | return
22 | }
23 |
24 | log.Println("config.InitConfig ok")
25 |
26 | if !config.Debug {
27 | gin.SetMode(gin.ReleaseMode)
28 | }
29 |
30 | mylog.InitLog(config.LogLevel)
31 | log.Println("config.LogLevel ok")
32 | })
33 | return err
34 | }
35 |
36 | func Cleanup() {
37 | mylog.Logger.Sync() // Ensure all logs are flushed properly
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/llm/aliyun-dashscope/common_btype/ds_common_btype_msg.go:
--------------------------------------------------------------------------------
1 | package common_btype
2 |
3 | // Message 代表一个对话消息
4 | type Message struct {
5 | Content string `json:"content"`
6 | Role string `json:"role"`
7 | }
8 |
9 | // Input 代表一个对话的输入部分
10 | type Input struct {
11 | Prompt string `json:"prompt"`
12 | }
13 |
14 | // ModelRequest 代表一个模型请求,包括模型名称和输入消息
15 | type DSBtypeRequestBody struct {
16 | Model string `json:"model"`
17 | Input Input `json:"input"`
18 | }
19 |
20 | type DSBtypeResponseBody struct {
21 | Output struct {
22 | Text string `json:"text"`
23 | } `json:"output"`
24 | Usage struct {
25 | OutputTokens int `json:"output_tokens"`
26 | InputTokens int `json:"input_tokens"`
27 | } `json:"usage"`
28 | RequestID string `json:"request_id"`
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/llm/aliyun-dashscope/commsg/ds_com_request/dashscope_prompt_request.go:
--------------------------------------------------------------------------------
1 | package ds_com_request
2 |
3 | // Input 代表输入部分,包括提示词
4 | type ModelPromptInput struct {
5 | Prompt string `json:"prompt"`
6 | }
7 |
8 | // ModelRequest 代表整个请求结构体,包括模型名称、输入和参数
9 | type ModelPromptRequest struct {
10 | Model string `json:"model"`
11 | Input ModelPromptInput `json:"input"`
12 | Parameters *Parameters `json:"parameters"`
13 | }
14 |
--------------------------------------------------------------------------------
/pkg/llm/aliyun-dashscope/commsg/ds_com_request/ds_com_request.go:
--------------------------------------------------------------------------------
1 | package ds_com_request
2 |
3 | // Message 代表一个对话消息
4 | type Message struct {
5 | Role string `json:"role"`
6 | Content string `json:"content"`
7 | }
8 |
9 | // Input 代表一个对话的输入部分
10 | type Input struct {
11 | Messages []Message `json:"messages"`
12 | }
13 |
14 | // Parameters 代表请求的参数
15 | type Parameters struct {
16 | ResultFormat string `json:"result_format,omitempty"`
17 | }
18 |
19 | // ModelRequest 代表一个模型请求,包括模型名称、输入消息和参数
20 | type ModelRequest struct {
21 | Model string `json:"model"`
22 | Input Input `json:"input"`
23 | Parameters *Parameters `json:"parameters,omitempty"`
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/llm/aliyun-dashscope/commsg/ds_com_resp/ds_com_resp.go:
--------------------------------------------------------------------------------
1 | package ds_com_resp
2 |
3 | // Message 代表返回的消息内容
4 | type Message struct {
5 | Role string `json:"role"`
6 | ContentType string `json:"content_type"`
7 | Content string `json:"content"`
8 | }
9 |
10 | // Choice 代表返回的一个选择项
11 | type Choice struct {
12 | FinishReason string `json:"finish_reason"`
13 | Message Message `json:"message"`
14 | }
15 |
16 | // Output 代表输出部分,包括多个选择项
17 | type Output struct {
18 | Choices []Choice `json:"choices"`
19 | }
20 |
21 | // Usage 代表请求的使用情况,包括输入和输出的token数量
22 | type Usage struct {
23 | OutputTokens int `json:"output_tokens"`
24 | InputTokens int `json:"input_tokens"`
25 | }
26 |
27 | // ModelResponse 代表整个响应结构体,包括输出、使用情况和请求ID
28 | type ModelResponse struct {
29 | Output Output `json:"output"`
30 | Usage Usage `json:"usage"`
31 | RequestID string `json:"request_id"`
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/llm/aliyun-dashscope/commsg/ds_com_resp/ds_com_stream_resp.go:
--------------------------------------------------------------------------------
1 | package ds_com_resp
2 |
3 | type StreamResponseOutput struct {
4 | Choices []StreamResponseChoice `json:"choices"`
5 | }
6 |
7 | type StreamResponseChoice struct {
8 | Message StreamResponseMessage `json:"message"`
9 | FinishReason string `json:"finish_reason"`
10 | }
11 |
12 | type StreamResponseMessage struct {
13 | Content string `json:"content"`
14 | Role string `json:"role"`
15 | }
16 |
17 | type StreamResponseUsage struct {
18 | Kens int `json:"kens"`
19 | OutputTokens int `json:"output_tokens"`
20 | }
21 |
22 | type ModelStreamResponse struct {
23 | Output StreamResponseOutput `json:"output"`
24 | Usage StreamResponseUsage `json:"usage"`
25 | RequestID string `json:"request_id"`
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/llm/baidu-qianfan/qianfan_model2address.go:
--------------------------------------------------------------------------------
1 | package baidu_qianfan
2 |
3 | import "errors"
4 |
5 | // 定义服务对应关系
6 | var serviceMap = map[string]string{
7 | "ERNIE-4.0-8K": "completions_pro",
8 | "ERNIE-4.0-8K-Latest": "ernie-4.0-8k-latest",
9 | "ERNIE-4.0-8K-Preview": "ernie-4.0-8k-preview",
10 | "ERNIE-4.0-8K-0329": "ernie-4.0-8k-0329",
11 | "ERNIE-4.0-8K-0613": "ernie-4.0-8k-0613",
12 | "ERNIE-4.0-Turbo-8K": "ernie-4.0-turbo-8k",
13 | "ERNIE-4.0-Turbo-8K-Preview": "ernie-4.0-turbo-8k-preview",
14 | "ERNIE-3.5-8K": "completions",
15 | "ERNIE-3.5-8K-Preview": "ernie-3.5-8k-preview",
16 | "ERNIE-3.5-8K-0329": "ernie-3.5-8k-0329",
17 | "ERNIE-3.5-128K": "ernie-3.5-128k",
18 | "ERNIE-3.5-8K-0613": "ernie-3.5-8k-0613",
19 | "ERNIE-3.5-8K-0701": "ernie-3.5-8k-0701",
20 | "ERNIE-Speed-8K": "ernie_speed",
21 | "ERNIE-Speed-128K": "ernie-speed-128k",
22 | "ERNIE-Lite-8K-0922": "eb-instant",
23 | "ERNIE-Lite-8K": "ernie-lite-8k",
24 | "ERNIE-Lite-8K-0725": "{}",
25 | "ERNIE-Lite-4K-0704": "{}",
26 | "ERNIE-Lite-4K-0516": "{}",
27 | "ERNIE-Lite-128K-0419": "{}",
28 | "ERNIE-Tiny-8K": "ernie-tiny-8k",
29 | "ERNIE-Novel-8K": "ernie-novel-8k",
30 | "ERNIE-Character-8K": "ernie-char-8k",
31 | "ERNIE-Functions-8K": "ernie-func-8k",
32 | "Qianfan-Dynamic-8K": "qianfan-dynamic-8k",
33 | "ERNIE-Speed-AppBuilder-8K": "ai_apaas",
34 | "ERNIE-Lite-AppBuilder-8K-0614": "ai_apaas_lite",
35 | "Gemma-2B-it": "{}",
36 | "Gemma-7B-it": "gemma_7b_it",
37 | "Yi-34B-Chat": "yi_34b_chat",
38 | "Mixtral-8x7B-Instruct": "mixtral_8x7b_instruct",
39 | "Mistral-7B-Instruct": "{}",
40 | "Llama-2-7b-chat": "llama_2_7b",
41 | "Linly-Chinese-LLaMA-2-7B": "{}",
42 | "Qianfan-Chinese-Llama-2-7B": "qianfan_chinese_llama_2_7b",
43 | "Qianfan-Chinese-Llama-2-7B-32K": "{}",
44 | "Llama-2-13b-chat": "llama_2_13b",
45 | "Linly-Chinese-LLaMA-2-13B": "{}",
46 | "Qianfan-Chinese-Llama-2-13B-v1": "qianfan_chinese_llama_2_13b",
47 | "Qianfan-Chinese-Llama-2-13B-v2": "{}",
48 | "Llama-2-70b-chat": "llama_2_70b",
49 | "Qianfan-Llama-2-70B-compressed": "{}",
50 | "Qianfan-Chinese-Llama-2-70B": "qianfan_chinese_llama_2_70b",
51 | "Qianfan-Chinese-Llama-2-1.3B": "{}",
52 | "Meta-Llama-3-8B-Instruct": "llama_3_8b",
53 | "Meta-Llama-3-70B-Instruct": "llama_3_70b",
54 | "ChatGLM3-6B": "{}",
55 | "chatglm3-6b-32k": "{}",
56 | "ChatGLM2-6B-32K": "chatglm2_6b_32k",
57 | "ChatGLM2-6B-INT4": "{}",
58 | "ChatGLM2-6B": "{}",
59 | "Baichuan2-7B-Chat": "{}",
60 | "Baichuan2-13B-Chat": "{}",
61 | "XVERSE-13B-Chat": "{}",
62 | "XuanYuan-70B-Chat-4bit": "xuanyuan_70b_chat",
63 | "DISC-MedLLM": "{}",
64 | "ChatLaw": "chatlaw",
65 | "Falcon-7B": "{}",
66 | "Falcon-40B-Instruct": "{}",
67 | "AquilaChat-7B": "aquilachat_7b",
68 | "RWKV-4-World": "{}",
69 | "BLOOMZ-7B": "bloomz_7b1",
70 | "Qianfan-BLOOMZ-7B-compressed": "qianfan_bloomz_7b_compressed",
71 | "RWKV-4-pile-14B": "{}",
72 | "RWKV-Raven-14B": "{}",
73 | "OpenLLaMA-7B": "{}",
74 | "Dolly-12B": "{}",
75 | "MPT-7B-Instruct": "{}",
76 | "MPT-30B-instruct": "{}",
77 | "OA-Pythia-12B-SFT-4": "{}",
78 | }
79 |
80 | // 根据输入的服务名返回对应的字符串
81 | func qianfanModel2Address(serviceName string) (string, error) {
82 | // 查找并返回映射值
83 | if code, exists := serviceMap[serviceName]; exists {
84 | return code, nil
85 | }
86 |
87 | return "", errors.New("服务名未找到")
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/llm/baidu-qianfan/qianfan_request.go:
--------------------------------------------------------------------------------
1 | package baidu_qianfan
2 |
3 | import (
4 | "simple-one-api/pkg/mycommon"
5 | )
6 |
7 | // Request 定义了API请求的主体结构
8 | type QianFanRequest struct {
9 | Messages []mycommon.Message `json:"messages"` // 对话消息列表
10 | Stream *bool `json:"stream,omitempty"` // 是否以流式接口返回数据
11 | Temperature *float64 `json:"temperature,omitempty"` // 输出随机性控制
12 | TopP *float64 `json:"top_p,omitempty"` // 输出多样性控制
13 | PenaltyScore *float64 `json:"penalty_score,omitempty"` // 减少重复的惩罚分数
14 | System *string `json:"system,omitempty"` // 系统人设设置
15 | Stop []string `json:"stop,omitempty"` // 生成停止标识
16 | MaxOutputTokens *int `json:"max_output_tokens,omitempty"` // 最大输出token数
17 | UserID *string `json:"user_id,omitempty"` // 用户唯一标识符
18 | }
19 |
--------------------------------------------------------------------------------
/pkg/llm/baidu-qianfan/qianfan_response.go:
--------------------------------------------------------------------------------
1 | package baidu_qianfan
2 |
3 | // Response 定义了整个响应体的结构
4 | type QianFanResponse struct {
5 | ID string `json:"id"`
6 | Object string `json:"object,omitempty"`
7 | Created int64 `json:"created,omitempty"`
8 | SentenceID *int `json:"sentence_id,omitempty"` // 在流式接口模式下返回
9 | IsEnd *bool `json:"is_end,omitempty"` // 在流式接口模式下返回
10 | IsTruncated bool `json:"is_truncated,omitempty"`
11 | Result string `json:"result,omitempty"`
12 | NeedClearHistory bool `json:"need_clear_history,omitempty"`
13 | BanRound *int `json:"ban_round,omitempty"` // 只有当 need_clear_history 为 true 时返回
14 | Usage Usage `json:"usage,omitempty"`
15 |
16 | ErrorCode int `json:"error_code,omitempty"`
17 | ErrorMsg string `json:"error_msg,omitempty"`
18 | }
19 |
20 | // Usage 定义了 token 统计信息的结构
21 | type Usage struct {
22 | PromptTokens int `json:"prompt_tokens,omitempty"`
23 | CompletionTokens int `json:"completion_tokens,omitempty"`
24 | TotalTokens int `json:"total_tokens,omitempty"`
25 | }
26 |
27 | // ErrorResponse 定义了错误响应的结构
28 | type QianFanErrorResponse struct {
29 | ErrorCode int `json:"error_code,omitempty"`
30 | ErrorMsg string `json:"error_msg,omitempty"`
31 | ID string `json:"id,omitempty"`
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/llm/claude/claude_request.go:
--------------------------------------------------------------------------------
1 | package claude
2 |
3 | import "encoding/json"
4 |
5 | // MarshalJSON 自定义JSON序列化
6 | func (m Message) MarshalJSON() ([]byte, error) {
7 | type Alias Message
8 | if len(m.MultiContent) > 0 && m.Content == "" {
9 | return json.Marshal(&struct {
10 | Alias
11 | Content []ContentBlock `json:"content"`
12 | }{
13 | Alias: (Alias)(m),
14 | Content: m.MultiContent,
15 | })
16 | } else {
17 | return json.Marshal(&struct {
18 | Alias
19 | }{
20 | Alias: (Alias)(m),
21 | })
22 | }
23 | }
24 |
25 | // ContentBlock 定义内容块结构体
26 | type ContentBlock struct {
27 | Type string `json:"type"`
28 | Text string `json:"text,omitempty"`
29 | Image *Image `json:"image,omitempty"`
30 | }
31 |
32 | // Image 定义图像结构体
33 | type Image struct {
34 | Source ImageSource `json:"source"`
35 | }
36 |
37 | // ImageSource 定义图像源结构体
38 | type ImageSource struct {
39 | Type string `json:"type"`
40 | MediaType string `json:"media_type"`
41 | Data string `json:"data"`
42 | }
43 |
44 | // Message 定义消息结构体
45 | type Message struct {
46 | Role string `json:"role"`
47 | Content string `json:"content"`
48 | MultiContent []ContentBlock `json:"-"`
49 | }
50 |
51 | type Metadata struct {
52 | UserID string `json:"user_id,omitempty"`
53 | }
54 |
55 | // ToolInputSchema 定义工具输入的 JSON schema
56 | type ToolInputSchema struct {
57 | Type string `json:"type"`
58 | Properties map[string]interface{} `json:"properties,omitempty"`
59 | Required []string `json:"required,omitempty"`
60 | }
61 |
62 | type Tool struct {
63 | Name string `json:"name"`
64 | Description string `json:"description,omitempty"`
65 | InputSchema ToolInputSchema `json:"input_schema"`
66 | }
67 |
68 | // ToolChoice 定义工具选择结构体
69 | type ToolChoice struct {
70 | Type string `json:"type"` // 可选值:"tool"
71 | Name string `json:"name"` // 工具名称
72 | }
73 |
74 | // RequestBody 定义请求体结构体
75 | type RequestBody struct {
76 | Model string `json:"model"`
77 | Messages []Message `json:"messages"`
78 | MaxTokens int `json:"max_tokens"`
79 | Metadata *Metadata `json:"metadata,omitempty"`
80 | StopSequences []string `json:"stop_sequences,omitempty"`
81 | Stream bool `json:"stream"`
82 | System string `json:"system,omitempty"`
83 | Temperature float32 `json:"temperature"`
84 | ToolChoice *ToolChoice `json:"tool_choice,omitempty"`
85 | Tools []Tool `json:"tools,omitempty"`
86 | TopK int `json:"top_k,omitempty"`
87 | TopP float32 `json:"top_p,omitempty"`
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/llm/claude/claude_response.go:
--------------------------------------------------------------------------------
1 | package claude
2 |
3 | type StopReasonType string
4 |
5 | const (
6 | EndTurn StopReasonType = "end_turn"
7 | MaxTokens StopReasonType = "max_tokens"
8 | StopSequence StopReasonType = "stop_sequence"
9 | ToolUse StopReasonType = "tool_use"
10 | )
11 |
12 | type RespContent struct {
13 | Type string `json:"type"`
14 | Text string `json:"text,omitempty"`
15 | }
16 |
17 | type ResponseBody struct {
18 | ID string `json:"id"`
19 | Type string `json:"type"`
20 | Role string `json:"role"`
21 | Content []RespContent `json:"content"`
22 | Model string `json:"model"`
23 | StopReason string `json:"stop_reason"`
24 | StopSequence string `json:"stop_sequence"`
25 | Usage struct {
26 | InputTokens int `json:"input_tokens"`
27 | OutputTokens int `json:"output_tokens"`
28 | } `json:"usage"`
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/llm/claude/claude_stream_response.go:
--------------------------------------------------------------------------------
1 | package claude
2 |
3 | // 定义事件类型的通用结构体
4 | type Event struct {
5 | Type string `json:"type"`
6 | Message *RespMessage `json:"message,omitempty"`
7 | ContentBlock *ContentBlock `json:"content_block,omitempty"`
8 | Delta *Delta `json:"delta,omitempty"`
9 | Usage *Usage `json:"usage,omitempty"`
10 | Index *int `json:"index,omitempty"`
11 | MessageEnd *MessageEnd `json:"message_end,omitempty"`
12 | StopReason *string `json:"stop_reason,omitempty"`
13 | StopSequence *string `json:"stop_sequence,omitempty"`
14 | ContentBlockDelta *ContentBlockDelta `json:"content_block_delta,omitempty"`
15 | ContentBlockStop *ContentBlockStop `json:"content_block_stop,omitempty"`
16 | }
17 |
18 | type RespMessage struct {
19 | ID string `json:"id"`
20 | Type string `json:"type"`
21 | Role string `json:"role"`
22 | Content []string `json:"content"`
23 | Model string `json:"model"`
24 | StopReason *string `json:"stop_reason"`
25 | StopSequence *string `json:"stop_sequence"`
26 | Usage Usage `json:"usage"`
27 | }
28 |
29 | type Delta struct {
30 | Type string `json:"type"`
31 | Text string `json:"text"`
32 | }
33 |
34 | type Usage struct {
35 | InputTokens int `json:"input_tokens"`
36 | OutputTokens int `json:"output_tokens"`
37 | }
38 |
39 | type MessageEnd struct {
40 | StopReason string `json:"stop_reason"`
41 | StopSequence *string `json:"stop_sequence"`
42 | }
43 |
44 | type ContentBlockDelta struct {
45 | Index int `json:"index"`
46 | Delta Delta `json:"delta"`
47 | }
48 |
49 | type ContentBlockStop struct {
50 | Index int `json:"index"`
51 | }
52 |
53 | type MsgMessageStart struct {
54 | Type string `json:"type"`
55 | Message struct {
56 | ID string `json:"id"`
57 | Type string `json:"type"`
58 | Role string `json:"role"`
59 | Model string `json:"model"`
60 | StopSequence any `json:"stop_sequence"`
61 | Usage struct {
62 | InputTokens int `json:"input_tokens"`
63 | OutputTokens int `json:"output_tokens"`
64 | } `json:"usage"`
65 | Content []any `json:"content"`
66 | StopReason *string `json:"stop_reason"`
67 | } `json:"message"`
68 | }
69 |
70 | type MsgContentBlockStart struct {
71 | Type string `json:"type"`
72 | Index int `json:"index"`
73 | ContentBlock struct {
74 | Type string `json:"type"`
75 | Text string `json:"text"`
76 | } `json:"content_block"`
77 | }
78 |
79 | type MsgContentBlockDelta struct {
80 | Type string `json:"type"`
81 | Index int `json:"index"`
82 | Delta struct {
83 | Type string `json:"type"`
84 | Text string `json:"text"`
85 | } `json:"delta"`
86 | }
87 |
88 | type MsgContentBlockStop struct {
89 | Type string `json:"type"`
90 | Index int `json:"index"`
91 | }
92 |
93 | type MsgMessageDelta struct {
94 | Type string `json:"type"`
95 | Delta struct {
96 | StopReason string `json:"stop_reason"`
97 | StopSequence any `json:"stop_sequence"`
98 | } `json:"delta"`
99 | Usage struct {
100 | OutputTokens int `json:"output_tokens"`
101 | } `json:"usage"`
102 | }
103 |
104 | type MsgMessageStop struct {
105 | Type string `json:"type"`
106 | }
107 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/baidu_agentbuilder/baidu_agentbuilder_conversation.go:
--------------------------------------------------------------------------------
1 | package baidu_agentbuilder
2 |
3 | // MessageContent 定义消息主体的结构
4 | type ConversationMessageContent struct {
5 | Type string `json:"type"`
6 | Value map[string]interface{} `json:"value"`
7 | }
8 |
9 | // Message 定义会话请求消息的结构
10 | type ConversationMessage struct {
11 | Content ConversationMessageContent `json:"content"`
12 | }
13 |
14 | // ConversationRequest 定义 Conversation 请求的结构
15 | type ConversationRequest struct {
16 | Message ConversationMessage `json:"message"`
17 | Source string `json:"source"`
18 | From string `json:"from"`
19 | OpenID string `json:"openId"`
20 | ThreadID string `json:"threadId,omitempty"`
21 | }
22 |
23 | type ConversationResponse struct {
24 | Status int `json:"status"`
25 | Message string `json:"message"`
26 | LogID string `json:"logid"`
27 | Data struct {
28 | Message struct {
29 | Content []struct {
30 | DataType string `json:"dataType"`
31 | IsFinished bool `json:"isFinished"`
32 | Data struct {
33 | Text string `json:"text"`
34 | } `json:"data"`
35 | } `json:"content"`
36 | ThreadID string `json:"threadId"`
37 | MsgID string `json:"msgId"`
38 | EndTurn bool `json:"endTurn"`
39 | } `json:"message"`
40 | } `json:"data"`
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/baidu_agentbuilder/baidu_agentbuilder_getanswer.go:
--------------------------------------------------------------------------------
1 | package baidu_agentbuilder
2 |
3 | // 定义请求消息的结构
4 | type GetAnswerMessageContent struct {
5 | Type string `json:"type"`
6 | Value map[string]string `json:"value"`
7 | }
8 |
9 | type GetAnswerMessage struct {
10 | Content GetAnswerMessageContent `json:"content"`
11 | }
12 |
13 | type GetAnswerRequest struct {
14 | Message GetAnswerMessage `json:"message"`
15 | Source string `json:"source"`
16 | From string `json:"from"`
17 | OpenID string `json:"openId"`
18 | ThreadID string `json:"threadId,omitempty"`
19 | }
20 |
21 | // 定义响应消息的结构
22 | type GetAnswerResponse struct {
23 | Status int `json:"status"`
24 | Message string `json:"message"`
25 | LogID string `json:"logid"`
26 | Data struct {
27 | Content []struct {
28 | DataType string `json:"dataType"`
29 | Data string `json:"data"`
30 | } `json:"content"`
31 | ThreadID string `json:"threadId"`
32 | MsgID string `json:"msgId"`
33 | } `json:"data"`
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn/cozecn_request.go:
--------------------------------------------------------------------------------
1 | package cozecn
2 |
3 | // 请求数据结构体
4 | type CozeRequest struct {
5 | ConversationID string `json:"conversation_id"`
6 | BotID string `json:"bot_id"`
7 | User string `json:"user"`
8 | Query string `json:"query"`
9 | Stream bool `json:"stream"`
10 | ChatHistory []Message `json:"chat_history,omitempty"`
11 | }
12 |
13 | type Message struct {
14 | Role string `json:"role"`
15 | Type string `json:"type,omitempty"`
16 | Content string `json:"content"`
17 | ContentType string `json:"content_type"`
18 | }
19 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn/cozecn_response.go:
--------------------------------------------------------------------------------
1 | package cozecn
2 |
3 | type StreamResponse struct {
4 | Event string `json:"event"`
5 | Message Message `json:"message,omitempty"`
6 | IsFinish bool `json:"is_finish,omitempty"`
7 | Index int `json:"index,omitempty"`
8 | ConversationID string `json:"conversation_id,omitempty"`
9 | ErrorInformation struct {
10 | Code int `json:"code"`
11 | Msg string `json:"msg"`
12 | } `json:"error_information,omitempty"`
13 | }
14 |
15 | type Response struct {
16 | Messages []Message `json:"messages"`
17 | ConversationID string `json:"conversation_id"`
18 | Code int `json:"code"`
19 | Msg string `json:"msg"`
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/common/cozecn_v3_http_request.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "go.uber.org/zap"
8 | "io"
9 | "net/http"
10 | "simple-one-api/pkg/mylog"
11 | "strings"
12 | )
13 |
14 | // 非SSE的HTTP请求处理函数
15 | func SendCozeV3HTTPRequest(apiKey, url string, reqBody []byte, httpTransport *http.Transport) ([]byte, error) {
16 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(reqBody))
17 | if err != nil {
18 | return nil, fmt.Errorf("failed to create request: %w", err)
19 | }
20 |
21 | req.Header.Set("Content-Type", "application/json")
22 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
23 |
24 | client := &http.Client{}
25 |
26 | if httpTransport != nil {
27 | client.Transport = httpTransport
28 | }
29 | resp, err := client.Do(req)
30 | if err != nil {
31 | return nil, fmt.Errorf("failed to send request: %w", err)
32 | }
33 | defer resp.Body.Close()
34 |
35 | respBody, err := io.ReadAll(resp.Body)
36 | if err != nil {
37 | return nil, fmt.Errorf("failed to read response body: %w", err)
38 | }
39 |
40 | return respBody, nil
41 | }
42 |
43 | // SSE的HTTP请求处理函数,带回调处理每次接收的数据
44 | func SendCozeV3StreamHttpRequest(apiKey, url string, reqBody []byte, callback func(event, data string), httpTransport *http.Transport) error {
45 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(reqBody))
46 | if err != nil {
47 | return fmt.Errorf("failed to create request: %w", err)
48 | }
49 |
50 | req.Header.Set("Content-Type", "application/json")
51 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
52 | req.Header.Set("Accept", "text/event-stream")
53 |
54 | client := &http.Client{}
55 | if httpTransport != nil {
56 | client.Transport = httpTransport
57 | }
58 |
59 | mylog.Logger.Info("SendCozeV3StreamHttpRequest", zap.String("url", url), zap.String("reqBody", string(reqBody)))
60 |
61 | resp, err := client.Do(req)
62 | if err != nil {
63 | return fmt.Errorf("failed to send request: %w", err)
64 | }
65 | defer resp.Body.Close()
66 |
67 | reader := bufio.NewReader(resp.Body)
68 | scanner := bufio.NewScanner(reader)
69 |
70 | // 自定义分割函数,以两个换行符作为分隔
71 | scanner.Split(splitOnDoubleNewline)
72 | for scanner.Scan() {
73 | part := scanner.Text()
74 | rParts := strings.Split(part, "\n")
75 | if len(rParts) == 2 {
76 | if strings.HasPrefix(rParts[0], "event:") && strings.HasPrefix(rParts[1], "data:") {
77 | event, data := strings.TrimSpace(rParts[0][6:]), strings.TrimSpace(rParts[1][5:])
78 | //log.Println(event, data)
79 | callback(event, data)
80 | }
81 | }
82 | }
83 |
84 | return nil
85 | }
86 |
87 | // 修正后的 splitOnDoubleNewline,不返回空的 token
88 | func splitOnDoubleNewline(data []byte, atEOF bool) (advance int, token []byte, err error) {
89 | doubleNewline := []byte("\n\n")
90 | if i := bytes.Index(data, doubleNewline); i >= 0 {
91 | if i == 0 {
92 | // 如果分隔符在开头,跳过它
93 | return len(doubleNewline), nil, nil
94 | }
95 | // 返回分隔符前的内容
96 | return i + len(doubleNewline), data[0:i], nil
97 | }
98 | if atEOF {
99 | if len(data) == 0 {
100 | return 0, nil, nil
101 | }
102 | return len(data), data, nil
103 | }
104 | // 请求更多的数据
105 | return 0, nil, nil
106 | }
107 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/common/request.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | // 对话请求结构体
4 | type ChatRequest struct {
5 | BotID string `json:"bot_id"`
6 | UserID string `json:"user_id"`
7 | Stream bool `json:"stream"`
8 | AutoSaveHistory bool `json:"auto_save_history"`
9 | AdditionalMessages []Message `json:"additional_messages"`
10 | CustomVariables map[string]string `json:"custom_variables,omitempty"`
11 | ExtraParams map[string]string `json:"extra_params,omitempty"`
12 | }
13 |
14 | // 消息结构体
15 | type Message struct {
16 | Role string `json:"role"`
17 | Content string `json:"content"`
18 | ContentType string `json:"content_type"`
19 | }
20 |
21 | type ObjectStringMessage struct {
22 | Type string `json:"type"`
23 | Text string `json:"text,omitempty"`
24 | FileID string `json:"file_id,omitempty"`
25 | FileURL string `json:"file_url,omitempty"`
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat/chat.go:
--------------------------------------------------------------------------------
1 | package chat
2 |
3 | import (
4 | "encoding/json"
5 | "log"
6 | "net/http"
7 | "simple-one-api/pkg/llm/devplatform/cozecn_v3/common"
8 | )
9 |
10 | func Chat(token string, chatRequest *common.ChatRequest, httpTransport *http.Transport) (*Response, error) {
11 | serverURL := "https://api.coze.cn/v3/chat"
12 |
13 | reqData, _ := json.Marshal(chatRequest)
14 | respData, err := common.SendCozeV3HTTPRequest(token, serverURL, reqData, httpTransport)
15 | if err != nil {
16 | log.Println(err)
17 | return nil, err
18 | }
19 |
20 | var respJson Response
21 | json.Unmarshal(respData, &respJson)
22 |
23 | log.Println(respJson)
24 |
25 | return &respJson, err
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat/response.go:
--------------------------------------------------------------------------------
1 | package chat
2 |
3 | // 定义结构体
4 | type LastError struct {
5 | Code int `json:"code"`
6 | Msg string `json:"msg"`
7 | }
8 |
9 | type Data struct {
10 | ID string `json:"id"`
11 | ConversationID string `json:"conversation_id"`
12 | BotID string `json:"bot_id"`
13 | CreatedAt int64 `json:"created_at"` // 将Unix时间戳转换为time.Time类型
14 | LastError LastError `json:"last_error"`
15 | Status string `json:"status"`
16 | }
17 |
18 | type Response struct {
19 | Data Data `json:"data"`
20 | Code int `json:"code"`
21 | Msg string `json:"msg"`
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat_message_list/chat_message_list.go:
--------------------------------------------------------------------------------
1 | package chat_message_list
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | )
10 |
11 | // 发起POST请求并处理响应
12 | func ChatMessageslist(chatID string, conversationID string, token string) (*MessageListResponse, error) {
13 | // 构造请求URL
14 | url := fmt.Sprintf("https://api.coze.cn/v3/chat/message/list?chat_id=%s&conversation_id=%s", chatID, conversationID)
15 |
16 | // 创建一个新的POST请求,空的body
17 | req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte("{}")))
18 | if err != nil {
19 | return nil, err
20 | }
21 |
22 | // 设置头部
23 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
24 | req.Header.Set("Content-Type", "application/json")
25 |
26 | // 创建HTTP客户端并发送请求
27 | client := &http.Client{}
28 | resp, err := client.Do(req)
29 | if err != nil {
30 | return nil, err
31 | }
32 | defer resp.Body.Close()
33 |
34 | // 读取响应体
35 | body, err := io.ReadAll(resp.Body)
36 | if err != nil {
37 | return nil, err
38 | }
39 |
40 | // log.Println(string(body))
41 |
42 | // 解析响应体JSON
43 | var messageListResponse MessageListResponse
44 | err = json.Unmarshal(body, &messageListResponse)
45 | if err != nil {
46 | return nil, err
47 | }
48 |
49 | return &messageListResponse, nil
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat_message_list/response.go:
--------------------------------------------------------------------------------
1 | package chat_message_list
2 |
3 | // 定义响应数据中的Message结构体
4 | type Message struct {
5 | BotID string `json:"bot_id"`
6 | ChatID string `json:"chat_id"`
7 | Content string `json:"content"`
8 | ContentType string `json:"content_type"`
9 | ID string `json:"id"`
10 | ConversationID string `json:"conversation_id"`
11 | Role string `json:"role"`
12 | Type string `json:"type"`
13 | }
14 |
15 | // 定义响应结构体
16 | type MessageListResponse struct {
17 | Code int `json:"code"`
18 | Data []Message `json:"data"`
19 | Msg string `json:"msg"`
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat_retrieve/chat_retrieve.go:
--------------------------------------------------------------------------------
1 | package chat_retrieve
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io"
7 | "net/http"
8 | "time"
9 | )
10 |
11 | // 发起GET请求并处理响应
12 | func ChatRetrieve(chatID string, conversationID string, token string) (*ChatRetrieveResponse, error) {
13 | // 构造请求URL
14 | url := fmt.Sprintf("https://api.coze.cn/v3/chat/retrieve?chat_id=%s&conversation_id=%s", chatID, conversationID)
15 |
16 | // 创建一个新的请求
17 | req, err := http.NewRequest("GET", url, nil)
18 | if err != nil {
19 | return nil, err
20 | }
21 |
22 | // 设置头部
23 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
24 | req.Header.Set("Content-Type", "application/json")
25 |
26 | // 创建HTTP客户端并发送请求
27 | client := &http.Client{
28 | Timeout: 3 * time.Minute,
29 | }
30 | resp, err := client.Do(req)
31 | if err != nil {
32 | return nil, err
33 | }
34 | defer resp.Body.Close()
35 |
36 | // 读取响应体
37 | body, err := io.ReadAll(resp.Body)
38 | if err != nil {
39 | return nil, err
40 | }
41 |
42 | // 解析响应体JSON
43 | var chatResponse ChatRetrieveResponse
44 | err = json.Unmarshal(body, &chatResponse)
45 | if err != nil {
46 | return nil, err
47 | }
48 |
49 | return &chatResponse, nil
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat_retrieve/defines.go:
--------------------------------------------------------------------------------
1 | package chat_retrieve
2 |
3 | const (
4 | StatusCreated = "created"
5 | StatusInProgress = "in_progress"
6 | StatusCompleted = "completed"
7 | StatusFailed = "failed"
8 | StatusRequiresAction = "requires_action"
9 | StatusCanceled = "canceled"
10 | )
11 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat_retrieve/response.go:
--------------------------------------------------------------------------------
1 | package chat_retrieve
2 |
3 | // 定义使用情况的结构体
4 | type Usage struct {
5 | InputCount int `json:"input_count"`
6 | OutputCount int `json:"output_count"`
7 | TokenCount int `json:"token_count"`
8 | }
9 |
10 | // 定义数据结构体
11 | type ChatData struct {
12 | BotID string `json:"bot_id"`
13 | CompletedAt int64 `json:"completed_at"`
14 | ConversationID string `json:"conversation_id"`
15 | CreatedAt int64 `json:"created_at"`
16 | ID string `json:"id"`
17 | Status string `json:"status"`
18 | Usage Usage `json:"usage"`
19 | }
20 |
21 | // 定义响应结构体
22 | type ChatRetrieveResponse struct {
23 | Code int `json:"code"`
24 | Data ChatData `json:"data"`
25 | Msg string `json:"msg"`
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/nonestream/chat_with_none_stream.go:
--------------------------------------------------------------------------------
1 | package nonestream
2 |
3 | import (
4 | "log"
5 | "net/http"
6 | "simple-one-api/pkg/llm/devplatform/cozecn_v3/common"
7 | "simple-one-api/pkg/llm/devplatform/cozecn_v3/nonestream/chat"
8 | "simple-one-api/pkg/llm/devplatform/cozecn_v3/nonestream/chat_message_list"
9 | "simple-one-api/pkg/llm/devplatform/cozecn_v3/nonestream/chat_retrieve"
10 | "time"
11 | )
12 |
13 | func ChatWithNoneStream(token string, chatRequest *common.ChatRequest, httpTransport *http.Transport, timeout int) (*chat_message_list.MessageListResponse, error) {
14 |
15 | chatResp, err := chat.Chat(token, chatRequest, httpTransport)
16 | if err != nil {
17 | log.Println(err)
18 | return nil, err
19 | }
20 |
21 | if timeout < 0 {
22 | timeout = 60
23 | }
24 |
25 | for i := 0; i < timeout; i++ {
26 | chatRetrieveResp, err := chat_retrieve.ChatRetrieve(chatResp.Data.ID, chatResp.Data.ConversationID, token)
27 | if err != nil {
28 | log.Println(err)
29 | return nil, err
30 | }
31 |
32 | if chatRetrieveResp.Data.Status == chat_retrieve.StatusCreated || chatRetrieveResp.Data.Status == chat_retrieve.StatusInProgress {
33 | time.Sleep(1 * time.Second)
34 | continue
35 | } else if chatRetrieveResp.Data.Status == chat_retrieve.StatusCompleted {
36 | messageListResponse, err := chat_message_list.ChatMessageslist(chatResp.Data.ID, chatResp.Data.ConversationID, token)
37 | if err != nil {
38 | log.Println(err)
39 | return nil, err
40 | }
41 |
42 | return messageListResponse, nil
43 | } else {
44 | log.Println(chatRetrieveResp)
45 | break
46 | }
47 | }
48 |
49 | return nil, err
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/streammode/chat.go:
--------------------------------------------------------------------------------
1 | package streammode
2 |
3 | import (
4 | "encoding/json"
5 | "log"
6 | "net/http"
7 | "simple-one-api/pkg/llm/devplatform/cozecn_v3/common"
8 | )
9 |
10 | func Chat(token string, chatRequest *common.ChatRequest, callback func(event, data string), httpTransport *http.Transport) error {
11 | serverURL := "https://api.coze.cn/v3/chat"
12 |
13 | reqData, _ := json.Marshal(chatRequest)
14 |
15 | err := common.SendCozeV3StreamHttpRequest(token, serverURL, reqData, callback, httpTransport)
16 | if err != nil {
17 | log.Println(err)
18 | return err
19 | }
20 |
21 | return err
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/cozecn_v3/streammode/response.go:
--------------------------------------------------------------------------------
1 | package streammode
2 |
3 | type EventData struct {
4 | ID string `json:"id"`
5 | ConversationID string `json:"conversation_id"`
6 | BotID string `json:"bot_id"`
7 | Role string `json:"role"`
8 | Type string `json:"type"`
9 | Content string `json:"content"`
10 | ContentType string `json:"content_type"`
11 | ChatID string `json:"chat_id"`
12 | CompletedAt int64 `json:"completed_at"`
13 | LastError struct {
14 | Code int `json:"code"`
15 | Msg string `json:"msg"`
16 | } `json:"last_error"`
17 | Status string `json:"status"`
18 | Usage struct {
19 | TokenCount int `json:"token_count"`
20 | OutputCount int `json:"output_count"`
21 | InputCount int `json:"input_count"`
22 | } `json:"usage"`
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/dify/chat_completion_response/chat_completion_response.go:
--------------------------------------------------------------------------------
1 | package chat_completion_response
2 |
3 | import "time"
4 |
5 | // RetrieverResource represents a retriever resource metadata
6 | type RetrieverResource struct {
7 | Position int `json:"position"`
8 | DatasetID string `json:"dataset_id"`
9 | DatasetName string `json:"dataset_name"`
10 | DocumentID string `json:"document_id"`
11 | DocumentName string `json:"document_name"`
12 | SegmentID string `json:"segment_id"`
13 | Score float64 `json:"score"`
14 | Content string `json:"content"`
15 | }
16 |
17 | // Usage represents the usage metadata
18 | type Usage struct {
19 | PromptTokens int `json:"prompt_tokens"`
20 | PromptUnitPrice string `json:"prompt_unit_price"`
21 | PromptPriceUnit string `json:"prompt_price_unit"`
22 | PromptPrice string `json:"prompt_price"`
23 | CompletionTokens int `json:"completion_tokens"`
24 | CompletionUnitPrice string `json:"completion_unit_price"`
25 | CompletionPriceUnit string `json:"completion_price_unit"`
26 | CompletionPrice string `json:"completion_price"`
27 | TotalTokens int `json:"total_tokens"`
28 | TotalPrice string `json:"total_price"`
29 | Currency string `json:"currency"`
30 | Latency float64 `json:"latency"`
31 | }
32 |
33 | // Metadata represents the metadata of the event
34 | type Metadata struct {
35 | Usage Usage `json:"usage"`
36 | RetrieverResources []RetrieverResource `json:"retriever_resources"`
37 | }
38 |
39 | // Event represents the main event structure
40 | type ChatCompletionResponse struct {
41 | Event string `json:"event"`
42 | MessageID string `json:"message_id"`
43 | ConversationID string `json:"conversation_id"`
44 | Mode string `json:"mode"`
45 | Answer string `json:"answer"`
46 | Metadata Metadata `json:"metadata"`
47 | CreatedAt time.Time `json:"created_at"`
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/dify/chat_message_request/chat_message_request.go:
--------------------------------------------------------------------------------
1 | package chat_message_request
2 |
3 | type ChatMessageRequest struct {
4 | Inputs map[string]interface{} `json:"inputs"`
5 | Query string `json:"query"`
6 | ResponseMode string `json:"response_mode"`
7 | ConversationID string `json:"conversation_id"`
8 | User string `json:"user"`
9 | }
10 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/dify/chat_message_request/chat_messages.go:
--------------------------------------------------------------------------------
1 | package chat_message_request
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "simple-one-api/pkg/llm/devplatform/dify/chat_completion_response"
10 | "simple-one-api/pkg/utils"
11 | )
12 |
13 | var baseURL = "https://api.dify.ai/v1"
14 |
15 | func CallChatMessagesStreamMode(difyReq *ChatMessageRequest, apiKey string, callback func(data string), httpTransport *http.Transport) error {
16 | serverUrl := "https://api.dify.ai/v1/chat-messages"
17 |
18 | reqData, _ := json.Marshal(difyReq)
19 |
20 | return utils.SendSSERequest(apiKey, serverUrl, reqData, callback, httpTransport)
21 | }
22 |
23 | func CallChatMessagesNoneStreamMode(difyReq *ChatMessageRequest, apiKey string, httpTransport *http.Transport) (*chat_completion_response.ChatCompletionResponse, error) {
24 | serverUrl := "https://api.dify.ai/v1/chat-messages"
25 | // 创建请求体
26 |
27 | reqData, _ := json.Marshal(difyReq)
28 |
29 | respData, err := utils.SendHTTPRequest(apiKey, serverUrl, reqData, httpTransport)
30 | if err != nil {
31 | return nil, err
32 | }
33 |
34 | var difyResp chat_completion_response.ChatCompletionResponse
35 | err = json.Unmarshal(respData, &difyResp)
36 | if err != nil {
37 | return nil, err
38 | }
39 |
40 | return &difyResp, nil
41 | }
42 | func CallChatMessages(query, conversationID string, apiKey string, streamMode bool) (string, error) {
43 | url := fmt.Sprintf("%s/chat-messages", baseURL)
44 |
45 | responseMode := "streaming"
46 | if !streamMode {
47 | responseMode = "blocking"
48 | }
49 | // 创建请求体
50 | requestBody := ChatMessageRequest{
51 | Inputs: map[string]interface{}{},
52 | Query: query,
53 | ResponseMode: responseMode,
54 | ConversationID: conversationID,
55 | User: "abc-123",
56 | }
57 |
58 | jsonData, err := json.Marshal(requestBody)
59 | if err != nil {
60 | return "", err
61 | }
62 |
63 | // 创建 HTTP POST 请求
64 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
65 | if err != nil {
66 | return "", err
67 | }
68 |
69 | // 设置请求头
70 | req.Header.Set("Authorization", "Bearer "+apiKey)
71 | req.Header.Set("Content-Type", "application/json")
72 |
73 | // 发送请求
74 | client := &http.Client{}
75 | resp, err := client.Do(req)
76 | if err != nil {
77 | return "", err
78 | }
79 | defer resp.Body.Close()
80 |
81 | // 读取响应
82 | body, err := io.ReadAll(resp.Body)
83 | if err != nil {
84 | return "", err
85 | }
86 |
87 | return string(body), nil
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/llm/devplatform/dify/chunk_chat_completion_response/chunk_chat_completion_response.go:
--------------------------------------------------------------------------------
1 | package chunk_chat_completion_response
2 |
3 | // CommonEvent represents the common structure for all events
4 | type CommonEvent struct {
5 | Event string `json:"event"`
6 | }
7 |
8 | // MessageEvent represents a standard message event
9 | type MessageEvent struct {
10 | Event string `json:"event"`
11 | MessageID string `json:"message_id"`
12 | ConversationID string `json:"conversation_id"`
13 | Answer string `json:"answer"`
14 | CreatedAt int64 `json:"created_at"`
15 | }
16 |
17 | // MessageEndEvent represents the end of a message event
18 | type MessageEndEvent struct {
19 | Event string `json:"event"`
20 | ID string `json:"id"`
21 | ConversationID string `json:"conversation_id"`
22 | Metadata Metadata `json:"metadata"`
23 | }
24 |
25 | // Metadata represents metadata for the message_end event
26 | type Metadata struct {
27 | Usage struct {
28 | PromptTokens int `json:"prompt_tokens"`
29 | PromptUnitPrice string `json:"prompt_unit_price"`
30 | PromptPriceUnit string `json:"prompt_price_unit"`
31 | PromptPrice string `json:"prompt_price"`
32 | CompletionTokens int `json:"completion_tokens"`
33 | CompletionUnitPrice string `json:"completion_unit_price"`
34 | CompletionPriceUnit string `json:"completion_price_unit"`
35 | CompletionPrice string `json:"completion_price"`
36 | TotalTokens int `json:"total_tokens"`
37 | TotalPrice string `json:"total_price"`
38 | Currency string `json:"currency"`
39 | Latency float64 `json:"latency"`
40 | } `json:"usage"`
41 | RetrieverResources []struct {
42 | Position int `json:"position"`
43 | DatasetID string `json:"dataset_id"`
44 | DatasetName string `json:"dataset_name"`
45 | DocumentID string `json:"document_id"`
46 | DocumentName string `json:"document_name"`
47 | SegmentID string `json:"segment_id"`
48 | Score float64 `json:"score"`
49 | Content string `json:"content"`
50 | } `json:"retriever_resources"`
51 | }
52 |
53 | // TTSEvent represents a TTS message event
54 | type TTSEvent struct {
55 | Event string `json:"event"`
56 | ConversationID string `json:"conversation_id"`
57 | MessageID string `json:"message_id"`
58 | CreatedAt int64 `json:"created_at"`
59 | TaskID string `json:"task_id"`
60 | Audio string `json:"audio"`
61 | }
62 |
63 | // TTSEndEvent represents the end of a TTS message event
64 | type TTSEndEvent struct {
65 | Event string `json:"event"`
66 | ConversationID string `json:"conversation_id"`
67 | MessageID string `json:"message_id"`
68 | CreatedAt int64 `json:"created_at"`
69 | TaskID string `json:"task_id"`
70 | Audio string `json:"audio"`
71 | }
72 |
--------------------------------------------------------------------------------
/pkg/llm/google-gemini/gemini_request.go:
--------------------------------------------------------------------------------
1 | package google_gemini
2 |
3 | import "fmt"
4 |
5 | type Part struct {
6 | Text string `json:"text,omitempty"`
7 | InlineData *Blob `json:"inlineData,omitempty"`
8 | }
9 |
10 | // Blob 表示内嵌的媒体字节数据
11 | type Blob struct {
12 | MimeType string `json:"mimeType,omitempty"`
13 | Data string `json:"data,omitempty"`
14 | }
15 |
16 | // Entry represents a single entry in the conversation.
17 | type ContentEntity struct {
18 | Role string `json:"role"`
19 | Parts []Part `json:"parts"`
20 | }
21 |
22 | type SafetySetting struct {
23 | Category string `json:"category,omitempty"`
24 | Threshold string `json:"threshold,omitempty"`
25 | }
26 |
27 | type GenerationConfig struct {
28 | StopSequences []string `json:"stopSequences,omitempty"`
29 | Temperature float32 `json:"temperature,omitempty"`
30 | MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
31 | TopP float32 `json:"topP,omitempty"`
32 | TopK int `json:"topK,omitempty"`
33 | }
34 |
35 | type GeminiRequest struct {
36 | Contents []ContentEntity `json:"contents"`
37 | SafetySettings []SafetySetting `json:"safetySettings,omitempty"`
38 | GenerationConfig GenerationConfig `json:"generationConfig,omitempty"`
39 | }
40 |
41 | func (b Blob) GoString() string {
42 | return fmt.Sprintf("Blob{MimeType: %q, Data : ...}", b.MimeType)
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/llm/google-gemini/gemini_response.go:
--------------------------------------------------------------------------------
1 | package google_gemini
2 |
3 | // SafetyRating 定义安全等级评分
4 | type SafetyRating struct {
5 | Category string `json:"category"`
6 | Probability string `json:"probability"`
7 | }
8 |
9 | // Candidate 定义候选者信息
10 | type Candidate struct {
11 | Content ContentEntity `json:"content"`
12 | FinishReason string `json:"finishReason"`
13 | Index int `json:"index"`
14 | SafetyRatings []SafetyRating `json:"safetyRatings"`
15 | }
16 |
17 | // UsageMetadata 定义使用元数据
18 | type UsageMetadata struct {
19 | PromptTokenCount int `json:"promptTokenCount"`
20 | CandidatesTokenCount int `json:"candidatesTokenCount"`
21 | TotalTokenCount int `json:"totalTokenCount"`
22 | }
23 |
24 | // GeminiResponse 定义总体响应结构
25 | type GeminiResponse struct {
26 | Candidates []Candidate `json:"candidates"`
27 | UsageMetadata UsageMetadata `json:"usageMetadata"`
28 | }
29 |
--------------------------------------------------------------------------------
/pkg/llm/minimax/minimax_request.go:
--------------------------------------------------------------------------------
1 | package minimax
2 |
3 | // RequestBody 定义请求体的结构
4 | type MinimaxRequest struct {
5 | Model string `json:"model"` // 模型名称
6 | Stream bool `json:"stream,omitempty"` // 是否流式返回
7 | TokensToGenerate int64 `json:"tokens_to_generate,omitempty"` // 最大生成token数
8 | Temperature float32 `json:"temperature,omitempty"` // 温度
9 | TopP float32 `json:"top_p,omitempty"` // 采样方法
10 | MaskSensitiveInfo bool `json:"mask_sensitive_info,omitempty"` // 是否打码敏感信息
11 | Messages []Message `json:"messages"` // 对话内容
12 | BotSetting []BotSetting `json:"bot_setting"` // 机器人的设定
13 | ReplyConstraints ReplyConstraints `json:"reply_constraints"` // 模型回复要求
14 | }
15 |
16 | // Message 定义对话内容的结构
17 | type Message struct {
18 | SenderType string `json:"sender_type"` // 发送者类型
19 | SenderName string `json:"sender_name"` // 发送者名称
20 | Text string `json:"text"` // 消息内容
21 | }
22 |
23 | // BotSetting 定义机器人设定的结构
24 | type BotSetting struct {
25 | BotName string `json:"bot_name"` // 机器人的名字
26 | Content string `json:"content"` // 具体机器人的设定
27 | }
28 |
29 | // ReplyConstraints 定义模型回复要求的结构
30 | type ReplyConstraints struct {
31 | SenderType string `json:"sender_type"` // 回复角色类型
32 | SenderName string `json:"sender_name"` // 回复机器人名称
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/llm/minimax/minimax_response.go:
--------------------------------------------------------------------------------
1 | package minimax
2 |
3 | // Response 定义响应的结构体
4 | type MinimaxResponse struct {
5 | Created int64 `json:"created"` // 请求发起时间
6 | Model string `json:"model"` // 请求指定的模型名称
7 | Reply string `json:"reply"` // 回复内容
8 | InputSensitive bool `json:"input_sensitive"` // 输入命中敏感词
9 | InputSensitiveType int64 `json:"input_sensitive_type,omitempty"` // 输入命中敏感词类型
10 | OutputSensitive bool `json:"output_sensitive"` // 输出命中敏感词
11 | OutputSensitiveType int64 `json:"output_sensitive_type,omitempty"` // 输出命中敏感词类型
12 | Choices []Choice `json:"choices"` // 所有结果
13 | Usage Usage `json:"usage"` // tokens数使用情况
14 | ID string `json:"id"` // 本次请求的唯一标识
15 | BaseResp BaseResp `json:"base_resp"` // 错误状态码和详情
16 | }
17 |
18 | // Choice 定义选择结果的结构体
19 | type Choice struct {
20 | Messages []Message `json:"messages"` // 回复结果的具体内容
21 | Index int64 `json:"index"` // 排名
22 | FinishReason string `json:"finish_reason"` // 结束原因
23 | }
24 |
25 | // Usage 定义tokens使用情况的结构体
26 | type Usage struct {
27 | TotalTokens int64 `json:"total_tokens"` // 消耗tokens总数
28 | }
29 |
30 | // BaseResp 定义错误状态码和详情的结构体
31 | type BaseResp struct {
32 | StatusCode int64 `json:"status_code"` // 状态码
33 | StatusMsg string `json:"status_msg"` // 错误详情
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/llm/ollama/ollma_request.go:
--------------------------------------------------------------------------------
1 | package ollama
2 |
3 | type ChatRequest struct {
4 | Model string `json:"model"`
5 | Messages []Message `json:"messages"`
6 | Stream bool `json:"stream"`
7 | Format string `json:"format,omitempty"`
8 | Options AdvancedModelOptions `json:"options,omitempty"`
9 | KeepAlive string `json:"keep_alive,omitempty"`
10 | }
11 |
12 | type Message struct {
13 | Role string `json:"role"`
14 | Content string `json:"content"`
15 | Images []string `json:"images,omitempty"`
16 | }
17 |
18 | type AdvancedModelOptions struct {
19 | Temperature float32 `json:"temperature,omitempty"`
20 | Seed int `json:"seed,omitempty"`
21 | Mirostat int `json:"mirostat,omitempty"`
22 | MirostatEta float32 `json:"mirostat_eta,omitempty"`
23 | MirostatTau float32 `json:"mirostat_tau,omitempty"`
24 | NumCtx int `json:"num_ctx,omitempty"`
25 | RepeatLastN int `json:"repeat_last_n,omitempty"`
26 | RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
27 | Stop string `json:"stop,omitempty"`
28 | TfsZ float32 `json:"tfs_z,omitempty"`
29 | NumPredict int `json:"num_predict,omitempty"`
30 | TopK int `json:"top_k,omitempty"`
31 | TopP float32 `json:"top_p,omitempty"`
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/llm/ollama/olloma_response.go:
--------------------------------------------------------------------------------
1 | package ollama
2 |
3 | type ChatResponse struct {
4 | Model string `json:"model"`
5 | CreatedAt string `json:"created_at"`
6 | Message ChatMessage `json:"message"`
7 | Done bool `json:"done"`
8 | TotalDuration int64 `json:"total_duration"`
9 | LoadDuration int64 `json:"load_duration"`
10 | PromptEvalCount int `json:"prompt_eval_count"`
11 | PromptEvalDuration int64 `json:"prompt_eval_duration"`
12 | EvalCount int `json:"eval_count"`
13 | EvalDuration int64 `json:"eval_duration"`
14 | }
15 |
16 | type ChatMessage struct {
17 | Role string `json:"role"`
18 | Content string `json:"content"`
19 | Images []string `json:"images,omitempty"`
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/llm/tecent-hunyuan/hunyuan_response.go:
--------------------------------------------------------------------------------
1 | package tecent_hunyuan
2 |
3 | type HunYuannResponseError struct {
4 | Code string `json:"Code,omitempty"`
5 | Message string `json:"Message,omitempty"`
6 | }
7 |
8 | type HunYuanResponse struct {
9 | Response struct {
10 | RequestID string `json:"RequestId"`
11 | Note string `json:"Note"`
12 | Choices []struct {
13 | Message struct {
14 | Role string `json:"Role"`
15 | Content string `json:"Content"`
16 | } `json:"Message"`
17 | FinishReason string `json:"FinishReason"`
18 | } `json:"Choices"`
19 | Created int `json:"Created"`
20 | ID string `json:"Id"`
21 | Usage struct {
22 | PromptTokens int `json:"PromptTokens"`
23 | CompletionTokens int `json:"CompletionTokens"`
24 | TotalTokens int `json:"TotalTokens"`
25 | } `json:"Usage"`
26 | Error *HunYuannResponseError `json:"Error"`
27 | } `json:"Response"`
28 | }
29 |
30 | type StreamResponse struct {
31 | Note string `json:"Note"`
32 | Choices []struct {
33 | Delta struct {
34 | Role string `json:"Role"`
35 | Content string `json:"Content"`
36 | } `json:"Delta"`
37 | FinishReason string `json:"FinishReason"`
38 | } `json:"Choices"`
39 | Created int64 `json:"Created"`
40 | ID string `json:"Id"`
41 | Usage struct {
42 | PromptTokens int `json:"PromptTokens"`
43 | CompletionTokens int `json:"CompletionTokens"`
44 | TotalTokens int `json:"TotalTokens"`
45 | } `json:"Usage"`
46 | }
47 |
--------------------------------------------------------------------------------
/pkg/llm/xunfei-xinghuo/xinghuo.go:
--------------------------------------------------------------------------------
1 | package xunfei_xinghuo
2 |
--------------------------------------------------------------------------------
/pkg/llm/zijie-huoshan/huoshan_request.go:
--------------------------------------------------------------------------------
1 | package zijie_huoshan
2 |
--------------------------------------------------------------------------------
/pkg/llm/zijie-huoshan/huoshan_response.go:
--------------------------------------------------------------------------------
1 | package zijie_huoshan
2 |
--------------------------------------------------------------------------------
/pkg/mycomdef/com_keyname.go:
--------------------------------------------------------------------------------
1 | package mycomdef
2 |
3 | const KEYNAME_MODEL = "model"
4 | const KEYNAME_ASSISTANT = "assistant"
5 |
6 | const KEYNAME_QPS = "qps"
7 | const KEYNAME_QPM = "qpm"
8 | const KEYNAME_RPM = "rpm"
9 | const KEYNAME_CONCURRENCY = "concurrency"
10 |
11 | const KEYNAME_FIRST = "first"
12 | const KEYNAME_RANDOM = "random"
13 | const KEYNAME_RAND = "rand"
14 | const KEYNAME_ROUND_ROBIN = "round-robin"
15 | const KEYNAME_RR = "rr"
16 | const KEYNAME_HASH = "hash"
17 |
--------------------------------------------------------------------------------
/pkg/mycommon/common_credentials.go:
--------------------------------------------------------------------------------
1 | package mycommon
2 |
3 | import (
4 | "simple-one-api/pkg/config"
5 | "simple-one-api/pkg/mycomdef"
6 | "strconv"
7 | )
8 |
9 | // GetACredentials 根据模型名从ModelDetails中选择合适的凭证
10 | func GetACredentials(s *config.ModelDetails, model string) (map[string]interface{}, string) {
11 | // 检查是否有多个凭据列表可用
12 | var credID string
13 | if s.CredentialList != nil && len(s.CredentialList) > 0 {
14 | key := s.ServiceID + "credentials"
15 |
16 | index := config.GetLBIndex(config.LoadBalancingStrategy, key, len(s.CredentialList))
17 | credID = s.ServiceID + "_credentials_" + strconv.Itoa(index)
18 | return s.CredentialList[index], credID
19 | }
20 | return s.Credentials, credID
21 | }
22 |
23 | func GetCredentialLimit(credentials map[string]interface{}) (limitType string, limitn float64, timeout int) {
24 | // 假设'limit'键下是一个JSON表示的map
25 | limitData, ok := credentials["limit"].(map[string]interface{})
26 | if !ok {
27 | return "", 0, 0 // 没有找到或类型不匹配
28 | }
29 |
30 | if to, ok := limitData["timeout"].(int); ok {
31 | timeout = to
32 | }
33 | // 按优先级查找限制值:qps, qpm, rpm, concurrency
34 | if qps, ok := limitData[mycomdef.KEYNAME_QPS].(float64); ok {
35 | return mycomdef.KEYNAME_QPS, qps, timeout
36 | }
37 | if qpm, ok := limitData[mycomdef.KEYNAME_QPM].(float64); ok {
38 | return mycomdef.KEYNAME_QPM, qpm, timeout
39 | }
40 | if rpm, ok := limitData[mycomdef.KEYNAME_RPM].(float64); ok {
41 | return mycomdef.KEYNAME_QPM, rpm, timeout
42 | }
43 | if concurrency, ok := limitData[mycomdef.KEYNAME_CONCURRENCY].(float64); ok {
44 | return mycomdef.KEYNAME_CONCURRENCY, concurrency, timeout
45 | }
46 |
47 | return "", 0, 0 // 默认返回
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/mycommon/common_err_resp.go:
--------------------------------------------------------------------------------
1 | package mycommon
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "go.uber.org/zap"
7 | "io"
8 | "net/http"
9 | "simple-one-api/pkg/mylog"
10 | )
11 |
12 | // 假设 somewhere in your code you have initialized the logger correctly.
13 | // mylog.InitLog("dev") or mylog.InitLog("prod") should have been called.
14 |
15 | func CheckStatusCode(resp *http.Response) error {
16 | if resp.StatusCode != http.StatusOK {
17 | // Efficiently reading the body and logging in case of an error.
18 | errMsg, err := io.ReadAll(resp.Body)
19 | if err != nil {
20 | mylog.Logger.Error("Failed to read response body",
21 | zap.Int("status", resp.StatusCode),
22 | zap.Error(err))
23 | return errors.New("failed to read error response body")
24 | }
25 |
26 | // Logging the error with more context.
27 | mylog.Logger.Error("Unexpected status code",
28 | zap.Int("status", resp.StatusCode),
29 | zap.String("body", string(errMsg)))
30 |
31 | // Returning a new error with the status code and the body message
32 | return errors.New(fmt.Sprintf("status %d: %s", resp.StatusCode, string(errMsg)))
33 | }
34 | return nil
35 | }
36 |
--------------------------------------------------------------------------------
/pkg/mycommon/common_model_params.go:
--------------------------------------------------------------------------------
1 | package mycommon
2 |
3 | import (
4 | "errors"
5 | "go.uber.org/zap"
6 | "simple-one-api/pkg/mylog"
7 | )
8 |
9 | const adjustmentFloatValue = 0.01 // 定义调整值为常量
10 |
11 | type ModelParams struct {
12 | TemperatureRange Range // 温度参数范围
13 | TopPRange Range // TopP 参数范围
14 | MaxTokens int // 最大 tokens 数量
15 | }
16 |
17 | type Range struct {
18 | Min float32
19 | Max float32
20 | }
21 |
22 | // 共享的模型参数配置
23 | var glmCommonModelParams = ModelParams{
24 | TemperatureRange: Range{0.0, 1.0},
25 | TopPRange: Range{0.0, 1.0},
26 | }
27 |
28 | var modelParamsMap = map[string]ModelParams{
29 | "glm-4-0520": {
30 | TemperatureRange: glmCommonModelParams.TemperatureRange,
31 | TopPRange: glmCommonModelParams.TopPRange,
32 | MaxTokens: 4095,
33 | },
34 | "glm-4": {
35 | TemperatureRange: glmCommonModelParams.TemperatureRange,
36 | TopPRange: glmCommonModelParams.TopPRange,
37 | MaxTokens: 4095,
38 | },
39 | "glm-4-air": {
40 | TemperatureRange: glmCommonModelParams.TemperatureRange,
41 | TopPRange: glmCommonModelParams.TopPRange,
42 | MaxTokens: 4095,
43 | },
44 | "glm-4-airx": {
45 | TemperatureRange: glmCommonModelParams.TemperatureRange,
46 | TopPRange: glmCommonModelParams.TopPRange,
47 | MaxTokens: 4095,
48 | },
49 | "glm-4-flash": {
50 | TemperatureRange: glmCommonModelParams.TemperatureRange,
51 | TopPRange: glmCommonModelParams.TopPRange,
52 | MaxTokens: 4095,
53 | },
54 | "glm-3-turbo": {
55 | TemperatureRange: glmCommonModelParams.TemperatureRange,
56 | TopPRange: glmCommonModelParams.TopPRange,
57 | MaxTokens: 4095,
58 | },
59 | "glm-4v": {
60 | TemperatureRange: Range{0.0, 1.0},
61 | TopPRange: Range{0.0, 1.0},
62 | MaxTokens: 1024,
63 | },
64 | }
65 |
66 | func GetModelParams(modelName string) (ModelParams, error) {
67 | params, ok := modelParamsMap[modelName]
68 | if !ok {
69 | return ModelParams{}, errors.New("unsupported model")
70 | }
71 | return params, nil
72 | }
73 |
74 | func adjustFloatValue(value, min, max float32) float32 {
75 | if value < 0 {
76 | value = 0
77 | }
78 | if value < min {
79 | value = min + adjustmentFloatValue
80 | } else if value >= max {
81 | value = max - adjustmentFloatValue
82 | }
83 | return value
84 | }
85 |
86 | func AdjustParamsToRange(modelName string, temperature, topP float32, maxTokens int) (float32, float32, int, error) {
87 | params, err := GetModelParams(modelName)
88 | if err != nil {
89 | return temperature, topP, maxTokens, err
90 | }
91 |
92 | temperature = adjustFloatValue(temperature, params.TemperatureRange.Min, params.TemperatureRange.Max)
93 |
94 | topP = adjustFloatValue(topP, params.TopPRange.Min, params.TopPRange.Max)
95 |
96 | if maxTokens < 0 {
97 | maxTokens = 0
98 | }
99 | if maxTokens > params.MaxTokens {
100 | maxTokens = params.MaxTokens
101 | }
102 |
103 | mylog.Logger.Debug("", zap.Float32("adjusted_temperature", temperature),
104 | zap.Float32("adjusted_topP", topP),
105 | zap.Int("adjusted_maxTokens", maxTokens))
106 |
107 | return temperature, topP, maxTokens, nil
108 | }
109 |
--------------------------------------------------------------------------------
/pkg/mycommon/common_modeldetails.go:
--------------------------------------------------------------------------------
1 | package mycommon
2 |
3 | import (
4 | "simple-one-api/pkg/config"
5 | "simple-one-api/pkg/mycomdef"
6 | )
7 |
8 | // 通用的限流器详情获取函数
9 | func getLimitDetails(limit config.Limit) (string, float64, int) {
10 | switch {
11 | case limit.QPS > 0:
12 | return mycomdef.KEYNAME_QPS, limit.QPS, limit.Timeout
13 | case limit.QPM > 0:
14 | return mycomdef.KEYNAME_QPM, limit.QPM, limit.Timeout
15 | case limit.RPM > 0:
16 | return mycomdef.KEYNAME_QPM, limit.RPM, limit.Timeout
17 | case limit.Concurrency > 0:
18 | return mycomdef.KEYNAME_CONCURRENCY, limit.Concurrency, limit.Timeout
19 | default:
20 | return "", 0, 0 // 默认返回
21 | }
22 | }
23 |
24 | // 获取服务模型的限流详情
25 | func GetServiceModelDetailsLimit(s *config.ModelDetails) (string, float64, int) {
26 | return getLimitDetails(s.Limit)
27 | }
28 |
29 | // 获取服务限流器的限流详情
30 | func GetServiceLimiterDetailsLimit(l *config.Limit) (string, float64, int) {
31 | return getLimitDetails(*l)
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/mycommon/common_msg.go:
--------------------------------------------------------------------------------
1 | package mycommon
2 |
3 | // Message 定义了对话中的消息结构体
4 | type Message struct {
5 | Role string `json:"role"` // 用户或助手的角色
6 | Content string `json:"content"` // 对话内容
7 | }
8 |
--------------------------------------------------------------------------------
/pkg/mylimiter/limiter.go:
--------------------------------------------------------------------------------
1 | package mylimiter
2 |
3 | import (
4 | "context"
5 | "time"
6 |
7 | "golang.org/x/sync/semaphore"
8 | "golang.org/x/time/rate"
9 | "simple-one-api/pkg/mycomdef"
10 | "sync"
11 | )
12 |
13 | type Limiter struct {
14 | QPSLimiter *rate.Limiter
15 | QPMLimiter *SlidingWindowLimiter
16 | ConcurrencyLimiter *semaphore.Weighted
17 | }
18 |
19 | type SlidingWindowLimiter struct {
20 | mu sync.Mutex
21 | maxRequests int
22 | interval time.Duration
23 | requests []time.Time
24 | }
25 |
26 | var (
27 | limiterMap = make(map[string]*Limiter)
28 | mapMutex sync.RWMutex
29 | )
30 |
31 | func NewSlidingWindowLimiter(qpm int) *SlidingWindowLimiter {
32 | return &SlidingWindowLimiter{
33 | maxRequests: qpm,
34 | interval: time.Minute,
35 | requests: make([]time.Time, 0, qpm),
36 | }
37 | }
38 |
39 | func (l *SlidingWindowLimiter) Allow() bool {
40 | now := time.Now()
41 | windowStart := now.Add(-l.interval)
42 |
43 | l.mu.Lock()
44 | defer l.mu.Unlock()
45 |
46 | // 移除窗口外的请求
47 | i := 0
48 | for ; i < len(l.requests) && l.requests[i].Before(windowStart); i++ {
49 | }
50 | l.requests = l.requests[i:]
51 |
52 | // 检查是否允许新请求
53 | if len(l.requests) < l.maxRequests {
54 | l.requests = append(l.requests, now)
55 | return true
56 | }
57 | return false
58 | }
59 |
60 | func (l *SlidingWindowLimiter) Wait(ctx context.Context) error {
61 | waitTime := 10 * time.Millisecond // 初始等待时间
62 |
63 | for {
64 | if l.Allow() {
65 | return nil
66 | }
67 |
68 | select {
69 | case <-ctx.Done():
70 | return ctx.Err()
71 | case <-time.After(waitTime):
72 | l.mu.Lock()
73 | if len(l.requests) > 0 {
74 | // 计算到下一个请求可以被允许的时间间隔
75 | nextAllowedTime := l.requests[0].Add(l.interval)
76 | timeUntilNextAllowed := time.Until(nextAllowedTime)
77 |
78 | // 根据时间间隔调整等待时间
79 | if timeUntilNextAllowed < waitTime {
80 | waitTime = timeUntilNextAllowed
81 | } else {
82 | waitTime *= 2
83 | if waitTime > time.Second {
84 | waitTime = time.Second
85 | }
86 | }
87 | }
88 | l.mu.Unlock()
89 | }
90 | }
91 | }
92 |
93 | // NewLimiter 创建一个新的限流器,根据指定的类型和限制值进行配置
94 | func NewLimiter(limitType string, limitn float64) *Limiter {
95 | lim := &Limiter{}
96 | switch limitType {
97 | case mycomdef.KEYNAME_QPS:
98 | lim.QPSLimiter = rate.NewLimiter(rate.Limit(limitn), int(limitn))
99 | case mycomdef.KEYNAME_QPM, mycomdef.KEYNAME_RPM:
100 | lim.QPMLimiter = NewSlidingWindowLimiter(int(limitn))
101 | case mycomdef.KEYNAME_CONCURRENCY:
102 | lim.ConcurrencyLimiter = semaphore.NewWeighted(int64(limitn))
103 | default:
104 | // 对无效类型无操作,或者可以抛出错误
105 | }
106 | return lim
107 | }
108 |
109 | // Wait 使用QPS限流器等待直到获得令牌
110 | func (l *Limiter) Wait(ctx context.Context) error {
111 | if l.QPSLimiter != nil {
112 | return l.QPSLimiter.Wait(ctx)
113 | }
114 | if l.QPMLimiter != nil {
115 | return l.QPMLimiter.Wait(ctx)
116 | }
117 | return nil
118 | }
119 |
120 | // Acquire 尝试获取并发限制的许可,如果设置了超时则可以被中断
121 | func (l *Limiter) Acquire(ctx context.Context) error {
122 | if l.ConcurrencyLimiter != nil {
123 | return l.ConcurrencyLimiter.Acquire(ctx, 1)
124 | }
125 | return nil
126 | }
127 |
128 | // Release 释放并发限制的一个许可
129 | func (l *Limiter) Release() {
130 | if l.ConcurrencyLimiter != nil {
131 | l.ConcurrencyLimiter.Release(1)
132 | }
133 | }
134 |
135 | // GetLimiter 根据键获取或创建对应的限流器,支持线程安全操作
136 | func GetLimiter(key string, limitType string, limitn float64) *Limiter {
137 | mapMutex.RLock()
138 | if lim, exists := limiterMap[key]; exists {
139 | mapMutex.RUnlock()
140 | return lim
141 | }
142 | mapMutex.RUnlock()
143 |
144 | mapMutex.Lock()
145 | defer mapMutex.Unlock()
146 | // 双重检查以防在锁定期间已被创建
147 | if lim, exists := limiterMap[key]; exists {
148 | return lim
149 | }
150 |
151 | lim := NewLimiter(limitType, limitn)
152 | limiterMap[key] = lim
153 | return lim
154 | }
155 |
--------------------------------------------------------------------------------
/pkg/mylog/logger.go:
--------------------------------------------------------------------------------
1 | package mylog
2 |
3 | import (
4 | "go.uber.org/zap"
5 | "go.uber.org/zap/zapcore"
6 | "log"
7 | "os"
8 | )
9 |
10 | var Logger *zap.Logger
11 |
12 | func InitLog(mode string) {
13 |
14 | log.Println("level mode", mode)
15 | var encoder zapcore.Encoder
16 | var encoderConfig zapcore.EncoderConfig
17 | var level zapcore.Level
18 |
19 | // 根据模式选择合适的编码器配置和日志级别
20 | switch mode {
21 | case "prod", "production", "prodj", "prodjson", "productionjson":
22 | encoderConfig = zap.NewProductionEncoderConfig()
23 | level = zapcore.WarnLevel
24 | case "dev", "development":
25 | encoderConfig = zap.NewDevelopmentEncoderConfig()
26 | level = zapcore.InfoLevel
27 | case "debug":
28 | encoderConfig = zap.NewDevelopmentEncoderConfig()
29 | level = zapcore.DebugLevel
30 | default:
31 | log.Println("level mode default prod")
32 | encoderConfig = zap.NewDevelopmentEncoderConfig()
33 | level = zapcore.WarnLevel
34 | }
35 |
36 | // 设置时间键和时间格式
37 | encoderConfig.TimeKey = "timestamp"
38 | encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
39 | encoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
40 |
41 | // 根据需要选择输出为JSON或控制台格式
42 | if mode == "prodj" || mode == "prodjson" || mode == "productionjson" {
43 | encoder = zapcore.NewJSONEncoder(encoderConfig)
44 | log.Println("log json format")
45 | } else {
46 | encoder = zapcore.NewConsoleEncoder(encoderConfig)
47 | log.Println("log plain-text format")
48 | }
49 |
50 | // 创建日志核心
51 | core := zapcore.NewCore(
52 | encoder,
53 | zapcore.Lock(os.Stdout),
54 | zap.NewAtomicLevelAt(level),
55 | )
56 |
57 | // 构建日志器
58 | Logger = zap.New(core, zap.AddCaller())
59 | }
60 |
--------------------------------------------------------------------------------
/pkg/openai/openai_request.go:
--------------------------------------------------------------------------------
1 | package myopenai
2 |
3 | import (
4 | "encoding/json"
5 | "simple-one-api/pkg/mycommon"
6 | )
7 |
8 | // RequestBody 定义 API 请求的主体结构
9 | type OpenAIRequest struct {
10 | Model string `json:"model"`
11 | Messages []mycommon.Message `json:"messages"`
12 | FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"`
13 | LogitBias map[int]int `json:"logit_bias,omitempty"`
14 | LogProbs *bool `json:"logprobs,omitempty"`
15 | TopLogProbs *int `json:"top_logprobs,omitempty"`
16 | MaxTokens *int `json:"max_tokens,omitempty"`
17 | N *int `json:"n,omitempty"`
18 | PresencePenalty *float32 `json:"presence_penalty,omitempty"`
19 | ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
20 | Seed *int `json:"seed,omitempty"`
21 | Stop []string `json:"stop,omitempty"`
22 | Stream *bool `json:"stream,omitempty"`
23 | StreamOptions *StreamOptions `json:"stream_options,omitempty"`
24 | Temperature *float32 `json:"temperature,omitempty"`
25 | TopP *float32 `json:"top_p,omitempty"`
26 | Tools []Tool `json:"tools,omitempty"`
27 | ToolChoice json.RawMessage `json:"tool_choice,omitempty"`
28 | User *string `json:"user,omitempty"`
29 | }
30 |
31 | // ResponseFormat 定义响应格式的结构
32 | type ResponseFormat struct {
33 | Type string `json:"type"`
34 | }
35 |
36 | // StreamOptions 定义流选项的结构
37 | type StreamOptions struct {
38 | // 详细字段可以根据具体实现需求添加
39 | }
40 |
41 | // Tool 定义工具,如函数
42 | type Tool struct {
43 | Type string `json:"type"`
44 | Function *Function `json:"function,omitempty"`
45 | }
46 |
47 | // Function 定义函数的结构
48 | type Function struct {
49 | Name string `json:"name"`
50 | }
51 |
52 | // ToolChoice 定义工具选择的结构
53 | type ToolChoiceFunction struct {
54 | Type string `json:"type"`
55 | Function *Function `json:"function,omitempty"`
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/openai/openai_response.go:
--------------------------------------------------------------------------------
1 | package myopenai
2 |
3 | import "encoding/json"
4 |
5 | type OpenAIResponse struct {
6 | ID string `json:"id"`
7 | Object string `json:"object,omitempty"`
8 | Created int64 `json:"created,omitempty"`
9 | Model string `json:"model,omitempty"`
10 | SystemFingerprint string `json:"system_fingerprint,omitempty"`
11 | Choices []Choice `json:"choices,omitempty"`
12 | Usage *Usage `json:"usage,omitempty"`
13 |
14 | Error *ErrorDetail `json:"error,omitempty"`
15 | }
16 |
17 | // Choice 定义了响应中的选择项结构
18 | type Choice struct {
19 | Index int `json:"index"`
20 | Message ResponseMessage `json:"message"`
21 | LogProbs *json.RawMessage `json:"logprobs"` // 使用 RawMessage 以便处理可能为 null 的情况
22 | FinishReason string `json:"finish_reason"`
23 | }
24 |
25 | // FunctionCall 旧版工具调用
26 | type FunctionCall struct {
27 | Name string `json:"name,omitempty"`
28 | Arguments string `json:"arguments,omitempty"`
29 | }
30 |
31 | // ToolType 工具类型
32 | type ToolType string
33 |
34 | // ToolCall 工具调用
35 | type ToolCall struct {
36 | Index *int `json:"index,omitempty"`
37 | ID string `json:"id"`
38 | Type ToolType `json:"type"`
39 | Function FunctionCall `json:"function"`
40 | }
41 |
42 | // ResponseMessage Message 定义了对话中的消息结构
43 | type ResponseMessage struct {
44 | Role string `json:"role"`
45 | Content string `json:"content"`
46 | ToolCalls []ToolCall `json:"tool_calls,omitempty"`
47 | ToolCallID string `json:"tool_call_id,omitempty"`
48 | }
49 |
50 | // ResponseDelta Delta 定义了对话中的消息结构
51 | type ResponseDelta struct {
52 | Role string `json:"role"`
53 | Content string `json:"content"`
54 | ToolCalls []ToolCall `json:"tool_calls,omitempty"`
55 | }
56 |
57 | // Usage 定义了使用统计的结构
58 | type Usage struct {
59 | PromptTokens int `json:"prompt_tokens,omitempty"`
60 | CompletionTokens int `json:"completion_tokens,omitempty"`
61 | TotalTokens int `json:"total_tokens,omitempty"`
62 | }
63 |
64 | // ErrorDetail 包含具体的错误详情
65 | type ErrorDetail struct {
66 | Message string `json:"message,omitempty"` // 错误消息
67 | Type string `json:"type,omitempty"` // 错误类型
68 | Param interface{} `json:"param,omitempty"` // 参数,可能为 null,所以使用 interface{}
69 | Code interface{} `json:"code,omitempty"` // 错误代码,可能为 null,同样使用 interface{}
70 | }
71 |
72 | type OpenAIStreamResponse struct {
73 | ID string `json:"id,omitempty"`
74 | Object string `json:"object,omitempty"`
75 | Created int64 `json:"created,omitempty"`
76 | Model string `json:"model,omitempty"`
77 | SystemFingerprint string `json:"system_fingerprint,omitempty"`
78 | Choices []OpenAIStreamResponseChoice `json:"choices,omitempty"`
79 | Usage *Usage `json:"usage,omitempty"`
80 | Error *ErrorDetail `json:"error,omitempty"`
81 | }
82 |
83 | type OpenAIStreamResponseChoice struct {
84 | Index int `json:"index"`
85 | Delta ResponseDelta `json:"delta,omitempty"`
86 | Logprobs any `json:"logprobs,omitempty"`
87 | FinishReason any `json:"finish_reason,omitempty"`
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/simple_client/simple_client.go:
--------------------------------------------------------------------------------
1 | package simple_client
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "errors"
8 | "github.com/gin-gonic/gin"
9 | "github.com/sashabaranov/go-openai"
10 | "io"
11 | "net/http"
12 | "net/http/httptest"
13 | "simple-one-api/pkg/handler"
14 | )
15 |
16 | func init() {
17 |
18 | }
19 |
20 | type SimpleClient struct {
21 | }
22 |
23 | func NewSimpleClient(authToken string) *SimpleClient {
24 | //config := DefaultConfig(authToken)
25 | return NewSimpleClientWithConfig()
26 | }
27 |
28 | // NewClientWithConfig creates new OpenAI API client for specified config.
29 | func NewSimpleClientWithConfig() *SimpleClient {
30 | return &SimpleClient{
31 | //config: config,
32 | }
33 | }
34 |
35 | func (c *SimpleClient) CreateChatCompletion(
36 | ctx context.Context,
37 | request openai.ChatCompletionRequest,
38 | ) (response openai.ChatCompletionResponse, err error) {
39 | request.Stream = false
40 | reqBody, _ := json.Marshal(request)
41 | httpReq, _ := http.NewRequest("POST", "/v1/chat/completions", bytes.NewBuffer(reqBody))
42 | httpReq.Header.Set("Content-Type", "application/json")
43 |
44 | // 创建Gin的实例和配置路由
45 | ginc := gin.New()
46 | ginc.POST("/v1/chat/completions", func(ctx *gin.Context) {
47 | handler.HandleOpenAIRequest(ctx, &request)
48 | })
49 |
50 | // 创建响应记录器
51 | w := httptest.NewRecorder()
52 |
53 | // 使用ServeHTTP处理请求
54 | ginc.ServeHTTP(w, httpReq)
55 |
56 | // 解析响应
57 |
58 | if w.Code >= http.StatusBadRequest {
59 | err = errors.New(string(w.Body.Bytes()))
60 | return
61 | }
62 |
63 | err = json.Unmarshal(w.Body.Bytes(), &response)
64 |
65 | return
66 | }
67 |
68 | func (c *SimpleClient) CreateChatCompletionStream(
69 | ctx context.Context,
70 | request openai.ChatCompletionRequest,
71 | ) (stream *SimpleChatCompletionStream, err error) {
72 | request.Stream = true
73 | // 创建io.Pipe连接
74 | reader, writer := io.Pipe()
75 |
76 | recorder := httptest.NewRecorder()
77 |
78 | // 配置gin的上下文和请求
79 | ginc := gin.New()
80 | ginc.Use(func(ctx *gin.Context) {
81 | crw := NewCustomResponseWriter(writer)
82 | ctx.Writer = crw
83 | ctx.Next()
84 | })
85 | ginc.POST("/v1/chat/completions", func(ctx *gin.Context) {
86 | handler.HandleOpenAIRequest(ctx, &request)
87 | })
88 |
89 | // 模拟发送请求
90 | go func() {
91 | defer writer.Close()
92 | requestData, _ := json.Marshal(request)
93 | httpReq, _ := http.NewRequest("POST", "/v1/chat/completions", bytes.NewBuffer(requestData))
94 | httpReq.Header.Set("Content-Type", "application/json")
95 | ginc.ServeHTTP(recorder, httpReq)
96 | }()
97 |
98 | return NewSimpleChatCompletionStream(reader), nil
99 | }
100 |
--------------------------------------------------------------------------------
/pkg/simple_client/simple_stream_reader.go:
--------------------------------------------------------------------------------
1 | package simple_client
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "encoding/json"
7 | "fmt"
8 | "github.com/gin-gonic/gin"
9 | "github.com/sashabaranov/go-openai"
10 | "io"
11 | "net/http"
12 | "strings"
13 | )
14 |
15 | type CustomResponseWriter struct {
16 | gin.ResponseWriter
17 | writer io.Writer
18 | status int
19 | header http.Header
20 | body *bytes.Buffer
21 | }
22 |
23 | func NewCustomResponseWriter(w io.Writer) *CustomResponseWriter {
24 | return &CustomResponseWriter{
25 | writer: w,
26 | header: http.Header{},
27 | body: bytes.NewBuffer([]byte{}),
28 | }
29 | }
30 |
31 | func (crw *CustomResponseWriter) CloseNotify() <-chan bool {
32 | if notifier, ok := crw.writer.(http.CloseNotifier); ok {
33 | return notifier.CloseNotify()
34 | }
35 | // 如果 crw.writer 不支持 CloseNotifier,返回一个永不发送通知的通道
36 | c := make(chan bool)
37 | close(c)
38 | return c
39 | }
40 |
41 | func (crw *CustomResponseWriter) Write(data []byte) (int, error) {
42 | crw.body.Write(data) // Optionally store the body data
43 | return crw.writer.Write(data)
44 | }
45 |
46 | func (crw *CustomResponseWriter) WriteHeader(statusCode int) {
47 | crw.status = statusCode // Store status code
48 | crw.writer.Write([]byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, http.StatusText(statusCode))))
49 | }
50 |
51 | func (crw *CustomResponseWriter) WriteString(s string) (int, error) {
52 | return crw.Write([]byte(s))
53 | }
54 |
55 | func (crw *CustomResponseWriter) Header() http.Header {
56 | // Mimic the behavior of an http.ResponseWriter if needed
57 | return http.Header{}
58 | }
59 |
60 | func (crw *CustomResponseWriter) Status() int {
61 | return crw.status
62 | }
63 |
64 | func (crw *CustomResponseWriter) Size() int {
65 | return crw.body.Len()
66 | }
67 |
68 | func (crw *CustomResponseWriter) Flush() {
69 | if flusher, ok := crw.writer.(http.Flusher); ok {
70 | flusher.Flush()
71 | }
72 | }
73 |
74 | type SimpleChatCompletionStream struct {
75 | reader *bufio.Reader
76 | }
77 |
78 | func NewSimpleChatCompletionStream(reader io.Reader) *SimpleChatCompletionStream {
79 | return &SimpleChatCompletionStream{reader: bufio.NewReader(reader)}
80 | }
81 |
82 | func (scs *SimpleChatCompletionStream) Recv() (*openai.ChatCompletionStreamResponse, error) {
83 | var response openai.ChatCompletionStreamResponse
84 |
85 | line, err := scs.reader.ReadBytes('\n')
86 | if err != nil {
87 | if err == io.EOF {
88 | return nil, err
89 | }
90 | return nil, err
91 | }
92 |
93 | if len(line) == 1 && string(line) == "\n" {
94 | return nil, nil
95 | }
96 |
97 | if strings.Contains(string(line), "[DONE]") {
98 | return nil, io.EOF
99 | }
100 |
101 | data := strings.TrimSpace(string(line))
102 | if strings.HasPrefix(data, "data: ") {
103 | jsonData := strings.TrimPrefix(data, "data: ")
104 | if strings.HasPrefix(jsonData, `{"error":`) {
105 | if err := json.Unmarshal([]byte(jsonData), &response); err != nil {
106 | return &response, err
107 | }
108 | } else {
109 | if err := json.Unmarshal([]byte(jsonData), &response); err != nil {
110 | return &response, err
111 | }
112 | }
113 | return &response, nil
114 | }
115 |
116 | errData, _ := io.ReadAll(scs.reader)
117 |
118 | return &response, fmt.Errorf("unexpected data format: %s", string(errData))
119 | }
120 |
--------------------------------------------------------------------------------
/pkg/translation/llm_translate.go:
--------------------------------------------------------------------------------
1 | package translation
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "github.com/sashabaranov/go-openai"
9 | "go.uber.org/zap"
10 | "io"
11 | "simple-one-api/pkg/config"
12 | "simple-one-api/pkg/mylog"
13 | "simple-one-api/pkg/simple_client"
14 | )
15 |
16 | func createLLMTranslationPromptJson(srcText string, srcLang string, targetLang string) string {
17 | transReq := TranslationV1Request{
18 | Text: srcText,
19 | SourceLang: srcLang,
20 | TargetLang: targetLang,
21 | }
22 | reqJsonstr, _ := json.Marshal(transReq)
23 | return fmt.Sprintf("你是一个机器翻译接口,遵循以下输入输出协议,当接收到输入,直接给出输出即可,不要任何多余的回复\n输入协议(json格式):\n```\n{\"text\":\"Hello world!\",\"target_lang\":\"DE\"}\n```\n\n翻译结果直接输出:\n\nHallo, Welt!\n\n现在我的输入是:\n```\n%s\n```\n输出:\n", reqJsonstr)
24 |
25 | }
26 |
27 | var defaultLLMTransPrompt = "你是一个机器翻译接口,遵循以下输入输出协议,当接收到输入,直接给出输出即可,不要任何多余的回复\n输入:\n```\n将以下文本翻译为目标语言:DE\n文本:\n\n\nHello world!\n```\n\n翻译结果直接输出:\n\nHallo, Welt!\n\n现在我的输入是:\n```\n将以下文本翻译为目标语言:%s\n文本:\n\n\n%s\n```\n输出:"
28 |
29 | func createLLMTranslationPrompt(srcText string, srcLang string, targetLang string) string {
30 | prompt := defaultLLMTransPrompt
31 | if config.GTranslation.PromptTemplate != "" {
32 | prompt = config.GTranslation.PromptTemplate
33 | }
34 |
35 | return fmt.Sprintf(prompt, targetLang, srcText)
36 | }
37 |
38 | func LLMTranslate(srcText string, srcLang string, targetLang string) (string, error) {
39 |
40 | prompt := createLLMTranslationPrompt(srcText, srcLang, targetLang)
41 |
42 | var req openai.ChatCompletionRequest
43 | req.Stream = false
44 | req.Model = "random"
45 |
46 | message := openai.ChatCompletionMessage{
47 | Role: openai.ChatMessageRoleUser,
48 | Content: prompt,
49 | }
50 |
51 | req.Messages = append(req.Messages, message)
52 |
53 | client := simple_client.NewSimpleClient("")
54 |
55 | resp, err := client.CreateChatCompletion(context.Background(), req)
56 | if err != nil {
57 | mylog.Logger.Error("Error creating chat completion:", zap.Error(err))
58 | return "", err
59 | }
60 |
61 | if len(resp.Choices) > 0 {
62 | mylog.Logger.Info("Received chat response", zap.String("content", resp.Choices[0].Message.Content))
63 |
64 | return resp.Choices[0].Message.Content, nil
65 | }
66 |
67 | return "", errors.New("no result")
68 | }
69 |
70 | func LLMTranslateStream(srcText string, srcLang string, targetLang string, cb func(string)) (string, error) {
71 | var allResult string
72 | prompt := createLLMTranslationPrompt(srcText, srcLang, targetLang)
73 |
74 | var req openai.ChatCompletionRequest
75 | req.Stream = false
76 | req.Model = "random"
77 |
78 | message := openai.ChatCompletionMessage{
79 | Role: openai.ChatMessageRoleUser,
80 | Content: prompt,
81 | }
82 |
83 | req.Messages = append(req.Messages, message)
84 |
85 | client := simple_client.NewSimpleClient("")
86 |
87 | chatStream, err := client.CreateChatCompletionStream(context.Background(), req)
88 | if err != nil {
89 | mylog.Logger.Error("Error creating chat completion:", zap.Error(err))
90 | return "", err
91 | }
92 |
93 | for {
94 | var chatResp *openai.ChatCompletionStreamResponse
95 | chatResp, err = chatStream.Recv()
96 | if errors.Is(err, io.EOF) {
97 | mylog.Logger.Debug("Stream finished")
98 | return allResult, nil
99 | }
100 | if err != nil {
101 | mylog.Logger.Error("Error receiving chat response:", zap.Error(err))
102 | break
103 | }
104 |
105 | if chatResp == nil {
106 | continue
107 | }
108 |
109 | mylog.Logger.Info("Received chat response", zap.Any("chatResp", chatResp))
110 | if len(chatResp.Choices) > 0 {
111 | cb(chatResp.Choices[0].Delta.Content)
112 |
113 | allResult += chatResp.Choices[0].Delta.Content
114 | }
115 | }
116 |
117 | return allResult, err
118 | }
119 |
--------------------------------------------------------------------------------
/pkg/translation/translate_handler_v1.go:
--------------------------------------------------------------------------------
1 | package translation
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/gin-gonic/gin"
6 | "go.uber.org/zap"
7 | "net/http"
8 | "simple-one-api/pkg/mylog"
9 | "simple-one-api/pkg/utils"
10 | )
11 |
12 | // TranslationRequest 定义请求的结构体
13 | type TranslationV1Request struct {
14 | Text string `json:"text" binding:"required"`
15 | SourceLang string `json:"source_lang,omitempty"`
16 | TargetLang string `json:"target_lang" binding:"required"`
17 | Stream bool `json:"stream,omitempty"`
18 | }
19 |
20 | // TranslationResponse 定义响应的结构体
21 | type TranslationV1Response struct {
22 | Alternatives []string `json:"alternatives,omitempty"`
23 | Code int `json:"code"`
24 | Data string `json:"data"`
25 | ID int64 `json:"id,omitempty"`
26 | Method string `json:"method,omitempty"`
27 | SourceLang string `json:"source_lang,omitempty"`
28 | TargetLang string `json:"target_lang"`
29 | }
30 |
31 | // translateHandler 处理翻译请求的函数
32 | func TranslateV1Handler(c *gin.Context) {
33 | // 处理 Authorization 验证
34 | token := c.GetHeader("Authorization")
35 | if token == "" {
36 | token = c.Query("token")
37 | if token == "" {
38 | //c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"})
39 | //return
40 | }
41 | }
42 |
43 | // 绑定请求 JSON 数据
44 | var req TranslationV1Request
45 | if err := c.ShouldBindJSON(&req); err != nil {
46 | c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
47 | return
48 | }
49 |
50 | if req.Stream {
51 | utils.SetEventStreamHeaders(c)
52 |
53 | cb := func(dstText string) {
54 | tr := TranslationV1Response{
55 | Data: dstText,
56 | }
57 |
58 | trJsonData, _ := json.Marshal(tr)
59 |
60 | _, err := c.Writer.WriteString("data: " + string(trJsonData) + "\n\n")
61 | if err != nil {
62 | mylog.Logger.Error("Error binding JSON:", zap.Error(err))
63 | }
64 | c.Writer.(http.Flusher).Flush()
65 | }
66 |
67 | _, err := LLMTranslateStream(req.Text, req.SourceLang, req.TargetLang, cb)
68 | if err != nil {
69 | mylog.Logger.Error("Error binding JSON:", zap.Error(err))
70 | c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
71 | return
72 | }
73 |
74 | return
75 | } else {
76 | targetText, err := LLMTranslate(req.Text, req.SourceLang, req.TargetLang)
77 | if err != nil {
78 | c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
79 | return
80 | }
81 |
82 | response := TranslationV1Response{
83 | Code: 200,
84 | Data: targetText,
85 | ID: 8356681003,
86 | Method: "Pro",
87 | SourceLang: req.SourceLang,
88 | TargetLang: req.TargetLang,
89 | }
90 |
91 | c.JSON(http.StatusOK, response)
92 | return
93 | }
94 |
95 | return
96 | }
97 |
--------------------------------------------------------------------------------
/pkg/translation/translate_handler_v2.go:
--------------------------------------------------------------------------------
1 | package translation
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/gin-gonic/gin"
6 | "go.uber.org/zap"
7 | "net/http"
8 | "simple-one-api/pkg/mylog"
9 | "simple-one-api/pkg/utils"
10 | "sync"
11 | )
12 |
13 | type TranslationV2Request struct {
14 | Text []string `json:"text" binding:"required"`
15 | TargetLang string `json:"target_lang" binding:"required"`
16 | SourceLang string `json:"source_lang,omitempty"`
17 | Stream bool `json:"stream,omitempty"`
18 | }
19 |
20 | type TranslationV2Response struct {
21 | Translations []TranslationV2Result `json:"translations"`
22 | }
23 |
24 | type TranslationV2Result struct {
25 | DetectedSourceLanguage string `json:"detected_source_language"`
26 | Text string `json:"text"`
27 | }
28 |
29 | func translateStream(c *gin.Context, transReq *TranslationV2Request) error {
30 | utils.SetEventStreamHeaders(c)
31 |
32 | cb := func(dstText string) {
33 | var tr TranslationV2Response
34 | tResult := TranslationV2Result{
35 | Text: dstText,
36 | }
37 | tr.Translations = append(tr.Translations, tResult)
38 |
39 | trJsonData, _ := json.Marshal(tr)
40 |
41 | _, err := c.Writer.WriteString("data: " + string(trJsonData) + "\n\n")
42 | if err != nil {
43 | mylog.Logger.Error("Error binding JSON:", zap.Error(err))
44 | }
45 | c.Writer.(http.Flusher).Flush()
46 | }
47 |
48 | _, err := LLMTranslateStream(transReq.Text[0], transReq.SourceLang, transReq.TargetLang, cb)
49 | if err != nil {
50 | mylog.Logger.Error("Error binding JSON:", zap.Error(err))
51 | c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
52 | return err
53 | }
54 | return nil
55 | }
56 |
57 | func TranslateV2Handler(c *gin.Context) {
58 | var request TranslationV2Request
59 | if err := c.ShouldBindJSON(&request); err != nil {
60 | mylog.Logger.Error("Error binding JSON:", zap.Error(err))
61 | c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
62 | return
63 | }
64 |
65 | if request.Stream {
66 | err := translateStream(c, &request)
67 | if err != nil {
68 | mylog.Logger.Error("Error translating stream:", zap.Error(err))
69 | c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
70 | return
71 | }
72 | } else {
73 |
74 | var transResp TranslationV2Response
75 | var wg sync.WaitGroup
76 | var mu sync.Mutex
77 | sem := make(chan struct{}, 5)
78 |
79 | for _, srcText := range request.Text {
80 | wg.Add(1)
81 | sem <- struct{}{} // 占用一个并发槽
82 |
83 | go func(text string) {
84 | defer wg.Done()
85 | defer func() { <-sem }() // 释放一个并发槽
86 |
87 | var trv2 TranslationV2Result
88 | dstText, err := LLMTranslate(text, "", request.TargetLang)
89 | if err != nil {
90 | mylog.Logger.Error("Error translating stream:", zap.Error(err))
91 | return
92 | }
93 |
94 | trv2.Text = dstText
95 |
96 | mu.Lock()
97 | transResp.Translations = append(transResp.Translations, trv2)
98 | mu.Unlock()
99 | }(srcText)
100 | }
101 |
102 | wg.Wait()
103 | c.JSON(http.StatusOK, transResp)
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/pkg/utils/custom_transport_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "strings"
10 | )
11 |
12 | // CustomTransport 是一个自定义的 RoundTripper
13 | type CustomTransport struct {
14 | Transport http.RoundTripper
15 | }
16 |
17 | // RoundTrip 实现了 http.RoundTripper 接口
18 | func (c *CustomTransport) RoundTrip(req *http.Request) (*http.Response, error) {
19 | resp, err := c.Transport.RoundTrip(req)
20 | if err != nil {
21 | return nil, err
22 | }
23 |
24 | // 检查 HTTP 状态码,如果是错误状态码,读取响应体并返回错误
25 | if resp.StatusCode >= 400 {
26 | bodyBytes, readErr := io.ReadAll(resp.Body)
27 | if readErr != nil {
28 | return nil, fmt.Errorf("error reading error response body: %v", readErr)
29 | }
30 | resp.Body.Close()
31 | return nil, fmt.Errorf("HTTP error: %s, body: %s", resp.Status, string(bodyBytes))
32 | }
33 |
34 | // 创建一个新的响应体
35 | modifiedBody := &modifiedReadCloser{
36 | originalBody: resp.Body,
37 | reader: bufio.NewReader(resp.Body),
38 | }
39 | resp.Body = modifiedBody
40 |
41 | return resp, nil
42 | }
43 |
44 | // modifiedReadCloser 是一个自定义的 ReadCloser,用于修改响应体内容
45 | type modifiedReadCloser struct {
46 | originalBody io.ReadCloser
47 | buf *bytes.Buffer
48 | reader *bufio.Reader
49 | }
50 |
51 | func (m *modifiedReadCloser) Read(p []byte) (int, error) {
52 | // 如果缓冲区为空,从原始响应体读取数据并处理
53 | if m.buf == nil || m.buf.Len() == 0 {
54 | line, err := m.reader.ReadString('\n')
55 | if err != nil {
56 | if err == io.EOF {
57 | return 0, io.EOF
58 | }
59 | return 0, err
60 | }
61 | // 仅在 "data:" 而不是 "data: " 的情况下进行替换
62 | if strings.HasPrefix(line, "data:") && !strings.HasPrefix(line, "data: ") {
63 | line = strings.Replace(line, "data:", "data: ", 1)
64 | }
65 | m.buf = bytes.NewBufferString(line)
66 | }
67 | return m.buf.Read(p)
68 | }
69 |
70 | func (m *modifiedReadCloser) Close() error {
71 | return m.originalBody.Close()
72 | }
73 |
--------------------------------------------------------------------------------
/pkg/utils/file_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "path/filepath"
7 | "strings"
8 | )
9 |
10 | func GetAbsolutePath(path string) (string, error) {
11 | absPath, err := filepath.Abs(path)
12 | if err != nil {
13 | return "", err
14 | }
15 | return absPath, nil
16 | }
17 |
18 | func ResolveRelativePathToAbsolute(filename string) (string, error) {
19 | // 如果文件名是绝对路径,直接返回
20 | if filepath.IsAbs(filename) {
21 | return filename, nil
22 | }
23 |
24 | // 获取当前工作目录
25 | wd, err := os.Getwd()
26 | if err != nil {
27 | return "", fmt.Errorf("could not get current working directory: %w", err)
28 | }
29 |
30 | // 将相对路径转换为绝对路径
31 | absPath := filepath.Join(wd, filename)
32 |
33 | return absPath, nil
34 | }
35 |
36 | func GetAbsolutePathDir(filename string) (string, error) {
37 | // 如果文件名是绝对路径,直接返回其目录名
38 | if filepath.IsAbs(filename) {
39 | return filepath.Dir(filename), nil
40 | }
41 |
42 | // 获取当前工作目录
43 | wd, err := os.Getwd()
44 | if err != nil {
45 | return "", fmt.Errorf("could not get current working directory: %w", err)
46 | }
47 |
48 | // 将相对路径转换为绝对路径
49 | absPath := filepath.Join(wd, filename)
50 |
51 | // 返回绝对路径的目录名
52 | return filepath.Dir(absPath), nil
53 | }
54 |
55 | func GetFileNameAndType(filePath string) (string, string) {
56 | // 使用filepath.Base获取文件名(包含后缀)
57 | baseName := filepath.Base(filePath)
58 |
59 | // 使用filepath.Ext获取文件的后缀名
60 | fileType := filepath.Ext(baseName)
61 |
62 | // 去掉后缀名中的点
63 | fileType = strings.TrimPrefix(fileType, ".")
64 |
65 | // 获取文件名(不包含后缀)
66 | fileName := strings.TrimSuffix(baseName, fileType)
67 | fileName = strings.TrimSuffix(fileName, ".")
68 |
69 | return fileName, fileType
70 | }
71 |
72 | // IsSimpleFileName checks if the given file name is just a simple file name without any directory path.
73 | func IsSimpleFileName(fileName string) bool {
74 | // Check if the file name is an absolute path
75 | if strings.HasPrefix(fileName, "/") {
76 | return false
77 | }
78 |
79 | // Check if the file name contains any directory separators
80 | if strings.Contains(fileName, "/") {
81 | return false
82 | }
83 |
84 | return true
85 | }
86 |
87 | func FileExists(filename string) bool {
88 | _, err := os.Stat(filename)
89 | if os.IsNotExist(err) {
90 | return false
91 | }
92 | return true
93 | }
94 |
--------------------------------------------------------------------------------
/pkg/utils/gin_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "errors"
5 | "github.com/gin-gonic/gin"
6 | "net/http"
7 | "strings"
8 | )
9 |
10 | func SetEventStreamHeaders(c *gin.Context) {
11 | c.Writer.Header().Set("Content-Type", "text/event-stream")
12 | c.Writer.Header().Set("Cache-Control", "no-cache")
13 | c.Writer.Header().Set("Connection", "keep-alive")
14 | c.Writer.Header().Set("Transfer-Encoding", "chunked")
15 | c.Writer.Header().Set("X-Accel-Buffering", "no")
16 | }
17 |
18 | func SendOpenAIStreamEOFData(c *gin.Context) {
19 | c.Writer.WriteString("data: [DONE]\n\n")
20 | c.Writer.(http.Flusher).Flush()
21 | }
22 |
23 | // 从Authorization头部中获取API密钥
24 | func GetAPIKeyFromHeader(c *gin.Context) (string, error) {
25 | authHeader := c.GetHeader("Authorization")
26 | if authHeader == "" {
27 | return "", errors.New("invalid authorization header format")
28 | }
29 |
30 | parts := strings.Split(authHeader, " ")
31 | if len(parts) != 2 || parts[0] != "Bearer" {
32 | return "", errors.New("authorization header not found")
33 | }
34 | return parts[1], nil
35 | }
36 |
--------------------------------------------------------------------------------
/pkg/utils/map_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | // GetStringFromMap 试图从给定的 map 中提取指定键的字符串值。
4 | func GetStringFromMap(data map[string]interface{}, key string) (string, bool) {
5 | if value, exists := data[key]; exists {
6 | if strValue, ok := value.(string); ok {
7 | return strValue, true
8 | }
9 | return "", false
10 | }
11 | return "", false
12 | }
13 |
--------------------------------------------------------------------------------
/pkg/utils/math_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | func Min(a, b int) int {
4 | if a < b {
5 | return a
6 | }
7 | return b
8 | }
9 |
--------------------------------------------------------------------------------
/pkg/utils/pointer_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | // 辅助函数:获取指针值
4 | func GetString(ptr *string) string {
5 | if ptr != nil {
6 | return *ptr
7 | }
8 | return ""
9 | }
10 |
11 | func GetInt64(ptr *int64) int64 {
12 | if ptr != nil {
13 | return *ptr
14 | }
15 | return 0
16 | }
17 |
18 | func GetInt(ptr *int) int {
19 | if ptr != nil {
20 | return *ptr
21 | }
22 | return 0
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/utils/simple_custom_transport_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net/http"
7 | )
8 |
9 | // CustomTransport 是一个自定义的 RoundTripper
10 | type SimpleCustomTransport struct {
11 | Transport http.RoundTripper
12 | }
13 |
14 | // RoundTrip 实现了 http.RoundTripper 接口
15 | func (c *SimpleCustomTransport) RoundTrip(req *http.Request) (*http.Response, error) {
16 | resp, err := c.Transport.RoundTrip(req)
17 | if err != nil {
18 | return nil, err
19 | }
20 |
21 | // 检查 HTTP 状态码,如果是错误状态码,读取最多 1024 个字节的响应体并返回错误
22 | if resp.StatusCode >= 400 {
23 | bodyBytes := make([]byte, 1024)
24 | n, readErr := resp.Body.Read(bodyBytes)
25 | if readErr != nil && readErr != io.EOF {
26 | return nil, fmt.Errorf("error reading error response body: %v", readErr)
27 | }
28 | resp.Body.Close()
29 | return nil, fmt.Errorf("HTTP error: %s, body: %s", resp.Status, string(bodyBytes[:n]))
30 | }
31 |
32 | return resp, nil
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/utils/time_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import "time"
4 |
5 | // parseToUnixTime函数接收一个符合RFC3339Nano格式的日期时间字符串,并返回其对应的Unix时间戳(int类型)。
6 | func ParseRFC3339NanoToUnixTime(dateTimeStr string) (int64, error) {
7 | // 使用time.Parse解析符合RFC3339Nano格式的时间字符串
8 | t, err := time.Parse(time.RFC3339Nano, dateTimeStr)
9 | if err != nil {
10 | return 0, err // 如果解析错误,返回错误信息
11 | }
12 | return t.Unix(), nil // 返回Unix时间戳
13 | }
14 |
--------------------------------------------------------------------------------
/quick_build.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | REM 设置二进制文件的输出名称
4 | SET BINARY_NAME=simple-one-api.exe
5 |
6 | REM 编译项目
7 | echo Building %BINARY_NAME%...
8 | SET CGO_ENABLED=0
9 | go build -o %BINARY_NAME%
10 |
11 | echo Build completed.
--------------------------------------------------------------------------------
/quick_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 获取用户输入的平台和架构,默认为当前系统平台和架构
4 | GOOS=${1:-$(go env GOOS)}
5 | GOARCH=${2:-$(go env GOARCH)}
6 |
7 | # 设置二进制文件的输出名称
8 | BINARY_NAME="simple-one-api"
9 |
10 | # 编译项目
11 | echo "Building $BINARY_NAME for $GOOS/$GOARCH..."
12 | CGO_ENABLED=0 go build -o $BINARY_NAME
13 |
14 | echo "Build completed."
15 |
--------------------------------------------------------------------------------
/samples/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": false,
3 | "load_balancing": "random",
4 | "services": {
5 | "xinghuo": [
6 | {
7 | "models": ["spark-lite"],
8 | "enabled": true,
9 | "credentials": {
10 | "appid": "xxx",
11 | "api_key": "xxx",
12 | "api_secret": "xxx"
13 | }
14 | }
15 | ],
16 | "qianfan": [
17 | {
18 | "models": ["yi_34b_chat","ERNIE-Speed-8K","ERNIE-Speed-128K","ERNIE-Lite-8K","ERNIE-Lite-8K-0922","ERNIE-Tiny-8K"],
19 | "enabled": true,
20 | "credentials": {
21 | "api_key": "xxx",
22 | "secret_key": "xxx"
23 | }
24 | }
25 | ],
26 | "hunyuan": [
27 | {
28 | "models": ["hunyuan-lite"],
29 | "enabled": true,
30 | "credentials": {
31 | "secret_id": "xxx",
32 | "secret_key": "xxx"
33 | }
34 | }
35 | ],
36 | "openai": [
37 | {
38 | "models": ["deepseek-chat"],
39 | "enabled": true,
40 | "credentials": {
41 | "api_key": "xxx"
42 | },
43 | "server_url":"https://api.deepseek.com/v1"
44 | },
45 | {
46 | "models": ["@cf/meta/llama-2-7b-chat-int8"],
47 | "enabled": true,
48 | "credentials": {
49 | "api_key": "xxx"
50 | },
51 | "server_url": "https://api.cloudflare.com/client/v4/accounts/xxx/ai/v1/chat/completions"
52 | },
53 | {
54 | "models": ["glm-4","glm-3-turbo"],
55 | "enabled": true,
56 | "credentials": {
57 | "api_key": "xxx"
58 | },
59 | "server_url":"https://open.bigmodel.cn/api/paas/v4/chat/completions"
60 | }
61 | ],
62 | "minimax": [
63 | {
64 | "models": ["abab6-chat"],
65 | "enabled": true,
66 | "credentials": {
67 | "group_id": "1782658868262748467",
68 | "api_key": "xxx"
69 | },
70 | "server_url":"https://api.minimax.chat/v1/text/chatcompletion_pro"
71 | }
72 | ]
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/samples/config_aliyun.json:
--------------------------------------------------------------------------------
1 | {
2 | "load_balancing": "random",
3 | "services": {
4 | "openai": [
5 | {
6 | "models": ["qwen-plus"],
7 | "enabled": true,
8 | "credentials": {
9 | "api_key": "xxx"
10 | },
11 | "server_url":"https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
12 | }
13 | ]
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/samples/config_azure_openai.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "azure": [
6 | {
7 | "models": ["ada"],
8 | "enabled": true,
9 | "credentials": {
10 | "api_key": "xxx"
11 | },
12 | "server_url":"https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/completions?api-version=2024-02-01"
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/samples/config_cloudflare.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "openai": [
6 | {
7 | "models": [
8 | "@cf/meta/llama-2-7b-chat-int8"
9 | ],
10 | "enabled": true,
11 | "credentials": {
12 | "api_key": "xxx"
13 | },
14 | "server_url": "https://api.cloudflare.com/client/v4/accounts/xxx/ai/v1/chat/completions"
15 | }
16 | ]
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/samples/config_cozecn.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "cozecn": [
6 | {
7 | "models": ["xxx","xxx"],
8 | "enabled": true,
9 | "credentials": {
10 | "token": "xxx"
11 | },
12 | "server_url": "https://api.coze.cn/open_api/v2/chat"
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/samples/config_cozecom.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "cozecom": [
6 | {
7 | "models": ["xxx"],
8 | "enabled": true,
9 | "credentials": {
10 | "token": "xxx"
11 | },
12 | "server_url": "https://api.coze.com/open_api/v2/chat"
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/samples/config_deepseek.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": false,
3 | "load_balancing": "random",
4 | "services": {
5 | "openai": [
6 | {
7 | "models": ["deepseek-chat"],
8 | "enabled": true,
9 | "credentials": {
10 | "api_key": "xxx"
11 | },
12 | "server_url":"https://api.deepseek.com/v1"
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/samples/config_gemini.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "log_level": "prodj",
4 | "load_balancing": "random",
5 | "services": {
6 | "gemini": [
7 | {
8 | "models": ["gemini-1.5-flash"],
9 | "enabled": true,
10 | "credentials": {
11 | "api_key": "xxx"
12 | },
13 | "limit": {
14 | "rpm": 15,
15 | "timeout":120
16 | }
17 | }
18 | ]
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/samples/config_groq.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "openai": [
6 | {
7 | "models": ["llama3-70b-8192","llama3-8b-8192","gemma-7b-it","mixtral-8x7b-32768"],
8 | "enabled": true,
9 | "credentials": {
10 | "api_key": "xxx"
11 | },
12 | "server_url":"https://api.groq.com/openai/v1"
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/samples/config_hunyuan.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "debug": false,
4 | "load_balancing": "random",
5 | "services": {
6 | "hunyuan": [
7 | {
8 | "models": ["hunyuan-lite"],
9 | "enabled": true,
10 | "credentials": {
11 | "secret_id": "xxx",
12 | "secret_key": "xxx"
13 | }
14 | }
15 | ]
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/samples/config_huoshan.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "huoshan": [
6 | {
7 | "models": ["doubao32k"],
8 | "enabled": true,
9 | "credentials": {
10 | "access_key": "xxx",
11 | "secret_key": "xxx"
12 | },
13 | "model_map":{
14 | "doubao32k": "ep-20240612090709-hzjz5"
15 | },
16 | "server_url":"https://ark.cn-beijing.volces.com/api/v3"
17 | }
18 | ]
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/samples/config_llama_familly.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "debug": false,
4 | "load_balancing": "random",
5 | "services": {
6 | "openai": [
7 | {
8 | "models": ["Atom-13B-Chat","Atom-7B-Chat","Atom-1B-Chat","Llama3-Chinese-8B-Instruct"],
9 | "enabled": true,
10 | "credentials": {
11 | "api_key": "xxx"
12 | },
13 | "server_url":"https://api.atomecho.cn/v1"
14 | }
15 | ]
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/samples/config_ollama.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "ollama": [
6 | {
7 | "models": ["llama2"],
8 | "enabled": true,
9 | "server_url":"http://127.0.0.1:11434/api/chat"
10 | }
11 | ]
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/samples/config_qianfan.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": false,
3 | "load_balancing": "random",
4 | "services": {
5 | "qianfan": [
6 | {
7 | "models": ["yi_34b_chat","ERNIE-Speed-8K","ERNIE-Speed-128K","ERNIE-Lite-8K","ERNIE-Lite-8K-0922","ERNIE-Tiny-8K"],
8 | "enabled": true,
9 | "credentials": {
10 | "api_key": "xxx",
11 | "secret_key": "xxx"
12 | }
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/samples/config_siliconflow.cn.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9099",
3 | "load_balancing": "random",
4 | "services": {
5 | "openai": [
6 | {
7 | "models": ["deepseek-ai/DeepSeek-Coder-V2-Instruct",
8 | "deepseek-ai/deepseek-v2-chat",
9 | "deepseek-ai/deepseek-llm-67b-chat",
10 | "alibaba/Qwen2-72B-Instruct",
11 | "alibaba/Qwen2-57B-A14B-Instruct",
12 | "alibaba/Qwen2-7B-Instruct",
13 | "alibaba/Qwen2-1.5B-Instruct",
14 | "alibaba/Qwen1.5-110B-Chat",
15 | "alibaba/Qwen1.5-32B-Chat",
16 | "alibaba/Qwen1.5-14B-Chat",
17 | "alibaba/Qwen1.5-7B-Chat",
18 | "01-ai/Yi-1.5-6B-Chat",
19 | "01-ai/Yi-1.5-9B-Chat",
20 | "01-ai/Yi-1.5-34B-Chat",
21 | "zhipuai/chatglm3-6B",
22 | "zhipuai/glm4-9B-chat"],
23 | "enabled": true,
24 | "credentials": {
25 | "api_key": "xxx"
26 | },
27 | "model_redirect": {
28 | "deepseek-v2-chat": "deepseek-ai/deepseek-v2-chat",
29 | "Qwen2-72B-Instruct": "alibaba/Qwen2-72B-Instruct"
30 | },
31 | "server_url":"https://api.siliconflow.cn/v1/chat/completions"
32 | }
33 |
34 | ]
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/samples/config_xinghuo.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": false,
3 | "load_balancing": "random",
4 | "services": {
5 | "xinghuo": [
6 | {
7 | "models": ["spark-lite"],
8 | "enabled": true,
9 | "credentials": {
10 | "appid": "xxx",
11 | "api_key": "xxx",
12 | "api_secret": "xxx"
13 | }
14 | }
15 | ]
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/samples/config_zhipu.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_port": ":9090",
3 | "load_balancing": "random",
4 | "services": {
5 | "openai": [
6 | {
7 | "models": ["glm-4","glm-3-turbo"],
8 | "enabled": true,
9 | "credentials": {
10 | "api_key": "xxx"
11 | },
12 | "server_url":"https://open.bigmodel.cn/api/paas/v4/chat/completions"
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/test/simple_client_test/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "github.com/sashabaranov/go-openai"
8 | "io"
9 | "log"
10 | "simple-one-api/pkg/initializer" // 引入initializer包
11 | "simple-one-api/pkg/simple_client"
12 | )
13 |
14 | func testStream() error {
15 | prompt := "你好,大模型"
16 |
17 | var req openai.ChatCompletionRequest
18 | req.Stream = true
19 | req.Model = "random"
20 |
21 | message := openai.ChatCompletionMessage{
22 | Role: openai.ChatMessageRoleUser,
23 | Content: prompt,
24 | }
25 |
26 | req.Messages = append(req.Messages, message)
27 |
28 | client := simple_client.NewSimpleClient("")
29 |
30 | chatStream, err := client.CreateChatCompletionStream(context.Background(), req)
31 | if err != nil {
32 | fmt.Println(err)
33 | return err
34 | }
35 |
36 | for {
37 | chatResp, err := chatStream.Recv()
38 | if errors.Is(err, io.EOF) {
39 | fmt.Println("")
40 | return nil
41 | }
42 | if err != nil {
43 | fmt.Println(err)
44 | return err
45 | }
46 |
47 | if chatResp == nil {
48 | continue
49 | }
50 |
51 | fmt.Printf("%s", chatResp.Choices[0].Delta.Content)
52 | }
53 |
54 | fmt.Println("")
55 |
56 | return nil
57 | }
58 |
59 | func testNoneStream() {
60 | prompt := "你好,大模型"
61 |
62 | var req openai.ChatCompletionRequest
63 | req.Stream = true
64 | req.Model = "random"
65 |
66 | message := openai.ChatCompletionMessage{
67 | Role: openai.ChatMessageRoleUser,
68 | Content: prompt,
69 | }
70 |
71 | req.Messages = append(req.Messages, message)
72 |
73 | client := simple_client.NewSimpleClient("")
74 |
75 | resp, err := client.CreateChatCompletion(context.Background(), req)
76 | if err != nil {
77 | fmt.Println(err)
78 | return
79 | }
80 |
81 | if len(resp.Choices) > 0 {
82 | fmt.Println(resp.Choices[0].Message.Content)
83 | }
84 | }
85 |
86 | func main() {
87 | if err := initializer.Setup("../../myconfigs/config.json"); err != nil {
88 | log.Println(err)
89 | return
90 | }
91 | defer initializer.Cleanup()
92 |
93 | fmt.Println("stream mode===========")
94 | testStream()
95 | fmt.Println("none stream mode===========")
96 | testNoneStream()
97 | }
98 |
--------------------------------------------------------------------------------