├── 示例代码
├── 兼容openai的其它各种库.md
├── curl.md
├── 兼容openai的python库.md
├── 兼容openai的Node.js库.md
├── js.md
├── 流式示例stream.md
├── tts.speech.语音合成.md
├── gpt-4-vision.图片理解.md
├── 文字生成图片.md
├── translation(识别并翻译成英文).md
├── audio_transcriptions(语音识别).md
├── langchain的支持.md
├── object-c语言(支持苹果IOS).md
├── python.py
├── php.php
├── c++语言.md
├── java语言(支持android).md
├── 函数调用.md
└── c#语言(支持Unity).md
├── batch
├── batch_data.jsonl
└── batch示例.md
├── assistant.助手
└── assistant示例.md
├── fine-tune.微调
├── 微调示例.md
└── mydata.jsonl
├── README.md
├── NikaAPI接口.md
└── openai接口说明.md
/示例代码/兼容openai的其它各种库.md:
--------------------------------------------------------------------------------
1 | 参考本示例代码库中的另两个示例:兼容openai的python库,兼容openai的Node.js库
2 | - 核心有两点:
3 |
4 | 1、设置OPENAI_API_KEY环境变量为:Nika API增后台获取的your_user_token,替换官方的API_KEY: sk-******
5 |
6 | 2、设置OPENAI_API_BASE_URL环境变量为:`https://api.bqrlm.cn/v1`,替换官方的域名: `https://api.openai.com/v1`
7 |
8 |
--------------------------------------------------------------------------------
/示例代码/curl.md:
--------------------------------------------------------------------------------
1 | ```
2 | curl -H "Content-Type: application/json"
3 | -H "Authorization: Bearer $your_user_token"
4 | -XPOST https://api.bqrlm.cn/v1/chat/completions -d '{
5 | "messages": [
6 | {"role":"user","content":"请介绍一下你自己"},
7 | {"role":"assistant","content":"您好,我是小一机器人。我能够与人对话互动,回答问题,协助创作,高效便捷地帮助人们获取信息、知识和灵感。"},
8 | {"role":"user","content": "1+100="}
9 | ]
10 | }' | iconv -f utf-8 -t utf-8
11 | ```
12 |
--------------------------------------------------------------------------------
/batch/batch_data.jsonl:
--------------------------------------------------------------------------------
1 | {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 1000}}
2 | {"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 1000}}
--------------------------------------------------------------------------------
/示例代码/兼容openai的python库.md:
--------------------------------------------------------------------------------
1 | 安装openai的python库
2 | ```
3 | pip install openai
4 | ```
5 | 运行以下代码
6 | ```
7 | import os
8 | import openai
9 |
10 | openai.api_key = "您在Nika API的your_user_token"
11 | openai.api_base = "https://api.bqrlm.cn/v1/" #Nika API的base_url,注意加上最后的/
12 |
13 | chat_completion = openai.ChatCompletion.create(
14 | model="gpt-3.5-turbo",
15 | messages=[{ "role": "user", "content": "Hello world" }]
16 | )
17 | print(chat_completion.choices[0].message.content)
18 | ```
19 | 正常运行打印如下
20 | ```
21 | Hello there! How can I assist you today ?
22 | ```
--------------------------------------------------------------------------------
/示例代码/兼容openai的Node.js库.md:
--------------------------------------------------------------------------------
1 | 安装openai的Node.js库
2 | ```
3 | npm install openai
4 | ```
5 | 运行以下代码
6 | ```
7 | const { Configuration, OpenAIApi } = require("openai");
8 |
9 | const configuration = new Configuration({
10 | apiKey: "您的Nika APIkey",
11 | basePath: "https://api.bqrlm.cn/v1"
12 | });
13 | const openai = new OpenAIApi(configuration);
14 |
15 | const chatCompletion = await openai.createChatCompletion({
16 | model: "gpt-3.5-turbo",
17 | messages: [{role: "user", content: "Hello world"}],
18 | });
19 | console.log(chatCompletion.data.choices[0].message.content);
20 | ```
21 | 正常运行打印如下
22 | ```
23 | Hello there! How can I assist you today ?
24 | ```
25 |
--------------------------------------------------------------------------------
/示例代码/js.md:
--------------------------------------------------------------------------------
1 | js示例代码:(只需换成自己的key)
2 | ```
3 | // chatgpt.js文件
4 | // 请求接口
5 | import axios from "axios";
6 |
7 | your_user_token='xxxx';
8 | export const chatgpt = params => {
9 | return axios ({
10 | method: 'post',
11 | url: 'https://api.bqrlm.cn/v1/chat/completions',
12 | data: params,
13 | headers: {
14 | 'Content-Type': 'application/json',
15 | 'Authorization': 'Bearer '+your_user_token
16 | }
17 | }).then(res => res.data)
18 | }
19 | // test.js文件
20 | import {chatgpt} from "./chatgpt.js";
21 |
22 | // 调用chatgpt接口
23 | chatgpt({
24 | "messages": [
25 | {"role": "user", "content": "1+100="}
26 | ]
27 | }).then(res => {console.log(res)})
28 | ```
29 |
--------------------------------------------------------------------------------
/示例代码/流式示例stream.md:
--------------------------------------------------------------------------------
1 | 直接上代码,要替换成自己在Nika API的key
2 | ```
3 | import os
4 | import openai
5 | import requests
6 | import time
7 | import json
8 | import time
9 |
10 | API_SECRET_KEY = "xxxxx"; # 你在Nika API的key
11 | BASE_URL = "https://api.bqrlm.cn/v1"
12 |
13 | def stream_chat(prompt: str):
14 | openai.api_key = API_SECRET_KEY
15 | openai.api_base = BASE_URL
16 | for chunk in openai.ChatCompletion.create(
17 | model="gpt-3.5-turbo",
18 | messages=[{"role": "user", "content": prompt}],
19 | stream=True,
20 | ):
21 | content = chunk["choices"][0].get("delta", {}).get("content")
22 | if content is not None:
23 | print(content)
24 |
25 | if __name__ == '__main__':
26 | stream_chat("圆周率的前10位");
27 | ```
28 |
--------------------------------------------------------------------------------
/示例代码/tts.speech.语音合成.md:
--------------------------------------------------------------------------------
1 | 语音合成的示例。详细参数参考openai的官网说明:
2 | https://platform.openai.com/docs/api-reference/audio/createSpeech
3 | 要注意openai的python包的版本升级了。。。
4 |
5 | ```
6 | import os
7 | from openai import OpenAI
8 | import openai
9 | import requests
10 | import time
11 | import json
12 | import time
13 |
14 | API_SECRET_KEY = "xxxx"; #Nika API的key
15 | BASE_URL = "https://api.bqrlm.cn/v1/"; #Nika API的base_url
16 |
17 | # speech
18 | def tts(query):
19 | openai.api_key = API_SECRET_KEY
20 | openai.base_url = BASE_URL
21 | speech_file_path = "test.mp3";
22 | response = openai.audio.speech.create(
23 | model="tts-1",
24 | voice="alloy",
25 | input=query
26 | )
27 | response.stream_to_file(speech_file_path)
28 |
29 | if __name__ == '__main__':
30 | start = time.time();
31 | tts("今天是星期二");
32 | end = time.time()
33 | print('本次处理时间(s): ', end - start)
34 | ```
35 |
36 |
37 |
--------------------------------------------------------------------------------
/示例代码/gpt-4-vision.图片理解.md:
--------------------------------------------------------------------------------
1 | 1、使用图片url
2 | ```
3 | from openai import OpenAI
4 |
5 | API_SECRET_KEY = "xxxxxx";
6 | BASE_URL = "https://api.bqrlm.cn/v1/"
7 |
8 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
9 |
10 | response = client.chat.completions.create(
11 | model="gpt-4-vision-preview",
12 | messages=[
13 | {
14 | "role": "user",
15 | "content": [
16 | {"type": "text", "text": "What’s in this image?"},
17 | {
18 | "type": "image_url",
19 | "image_url": {
20 | "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
21 | }
22 | },
23 | ],
24 | }
25 | ],
26 | max_tokens=300,
27 | )
28 |
29 | print(response)
30 | #print(response.choices[0])
31 | ```
32 | 2、使用图片base64
33 | 直接将下面代码中的key和url换成我们Nika API的即可:
34 | key是你在Nika API后台拿到的密钥
35 | url是:`https://api.bqrlm.cn/v1/chat/completions`
36 |
37 | 
38 |
39 |
--------------------------------------------------------------------------------
/示例代码/文字生成图片.md:
--------------------------------------------------------------------------------
1 | ```
2 | import os
3 | import requests
4 | import time
5 | import json
6 |
7 | # 请求openai的API生成图片
8 | def images_generations():
9 | response = requests.post(
10 | # Nika API的API base_url,优质稳定的api
11 | "https://api.bqrlm.cn/v1/images/generations",
12 | headers={
13 | "Content-Type": "application/json",
14 | "Authorization": "Bearer $your_user_token" # 用您的Nika API的your_user_token替换此处
15 | },
16 | json={
17 | "model": "dall-e-3",
18 | "prompt": "a cat sitting on a mat", # 图像描述
19 | "n": 1, # 生成图像数量
20 | "size": "512x512", # 图像大小
21 | "response_format": "url" # 图像格式
22 | }
23 | )
24 | # 获取图像URL
25 | print(response.text)
26 | image_url = json.loads(response.text)["data"][0]["url"]
27 |
28 | # 下载图像
29 | response = requests.get(image_url)
30 |
31 | # 保存图像
32 | with open("cat.png", "wb") as f:
33 | f.write(response.content)
34 |
35 | if __name__ == '__main__':
36 | images_generations();
37 |
38 | ```
39 | 这段代码将发送一个请求到openai API,其中包含模型名称、图像描述、生成图像数量、图像大小和图像格式等参数。openai API将使用这些参数生成一个图像,并将图像URL作为响应返回。然后,您可以使用Python的requests库下载和保存生成的图像。
40 |
41 | 来欣赏一下吧
42 | 
43 |
--------------------------------------------------------------------------------
/示例代码/translation(识别并翻译成英文).md:
--------------------------------------------------------------------------------
1 | translations的示例代码:(只需换成自己的key)
2 | 介绍
3 | 语音转文本API基于我们先进的开源large-v2 Whisper模型提供了两个端点,分别是“transcriptions”(转录)和“translations”(翻译)。它们可以用于:
4 |
5 | 将音频转录为与音频语言相同的文本。
6 | 将音频翻译并转录为英文。
7 | 目前,文件上传限制为25 MB,支持以下输入文件类型:mp3、mp4、mpeg、mpga、m4a、wav和webm。
8 | ```
9 | import os
10 | import openai
11 | import requests
12 | import time
13 | import json
14 | import time
15 |
16 | API_SECRET_KEY = "你的Nika API获取的api_key";
17 | BASE_URL = "https://api.bqrlm.cn/v1"
18 |
19 | # audio_transcriptions
20 | def audio_transcriptions(file_name):
21 | openai.api_key = API_SECRET_KEY
22 | openai.api_base = BASE_URL
23 | audio_file = open(file_name, "rb")
24 | resp = openai.Audio.transcribe("whisper-1", audio_file)
25 | json_str = json.dumps(resp, ensure_ascii=False) #打出中文来
26 | print(json_str)
27 |
28 | def translation(file_name):
29 | openai.api_key = API_SECRET_KEY
30 | openai.api_base = BASE_URL
31 | audio_file = open(file_name, "rb")
32 | resp = openai.Audio.translate("whisper-1", audio_file)
33 | json_str = json.dumps(resp, ensure_ascii=False) #打出中文来
34 | print(json_str)
35 |
36 | if __name__ == '__main__':
37 | start = time.time();
38 | #audio_transcriptions("腾讯流量卡试音(1).wav");
39 | translation("腾讯流量卡试音(1).wav");
40 | end = time.time()
41 | print('本次处理时间(s): ', end - start)
42 | ```
43 |
--------------------------------------------------------------------------------
/示例代码/audio_transcriptions(语音识别).md:
--------------------------------------------------------------------------------
1 | audio_transcriptions的示例代码:(只需换成自己的key)
2 | 介绍
3 | 语音转文本API基于我们先进的开源large-v2 Whisper模型提供了两个端点,分别是“transcriptions”(转录)和“translations”(翻译)。它们可以用于:
4 |
5 | 将音频转录为与音频语言相同的文本。
6 | 将音频翻译并转录为英文。
7 | 目前,文件上传限制为25 MB,支持以下输入文件类型:mp3、mp4、mpeg、mpga、m4a、wav和webm。
8 | ```
9 | import os
10 | import openai
11 | import requests
12 | import time
13 | import json
14 | import time
15 |
16 | API_SECRET_KEY = "你的Nika API获取的api_key";
17 | BASE_URL = "https://api.bqrlm.cn/v1"
18 |
19 | # audio_transcriptions
20 | def audio_transcriptions(file_name):
21 | openai.api_key = API_SECRET_KEY
22 | openai.api_base = BASE_URL
23 | audio_file = open(file_name, "rb")
24 | resp = openai.Audio.transcribe("whisper-1", audio_file)
25 | json_str = json.dumps(resp, ensure_ascii=False) #打出中文来
26 | print(json_str)
27 |
28 | def translation(file_name):
29 | openai.api_key = API_SECRET_KEY
30 | openai.api_base = BASE_URL
31 | audio_file = open(file_name, "rb")
32 | resp = openai.Audio.translate("whisper-1", audio_file)
33 | json_str = json.dumps(resp, ensure_ascii=False) #打出中文来
34 | print(json_str)
35 |
36 | if __name__ == '__main__':
37 | start = time.time();
38 | #audio_transcriptions("腾讯流量卡试音(1).wav");
39 | translation("腾讯流量卡试音(1).wav");
40 | end = time.time()
41 | print('本次处理时间(s): ', end - start)
42 | ```
43 |
--------------------------------------------------------------------------------
/示例代码/langchain的支持.md:
--------------------------------------------------------------------------------
1 | 示例代码,使用LLM进行预测和embedding
2 | 核心其实在于key和url的设置
3 | 方法有:
4 | ```
5 | 1、使用环境变量来设置
6 | 2、使用变量来传入
7 | 3、使用手动设置环境变量
8 | ```
9 | ```
10 | import os
11 | import requests
12 | import time
13 | import json
14 | import time
15 | from langchain.llms import OpenAI
16 | from langchain.embeddings.openai import OpenAIEmbeddings
17 |
18 | API_SECRET_KEY = "你的Nika API的key";
19 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base-url
20 |
21 | os.environ["OPENAI_API_KEY"] = API_SECRET_KEY
22 | os.environ["OPENAI_API_BASE"] = BASE_URL
23 |
24 | # 根据你提供的输入来预测输出,也就是进行问答:
25 | def text():
26 | llm = OpenAI(temperature=0.9,model='gpt-3.5-turbo-instruct')
27 | text = "What would be a good company name for a company that makes colorful socks?"
28 | print(llm(text))
29 |
30 | def embedding():
31 | embeddings = OpenAIEmbeddings()
32 | #text = "This is a test document."
33 | #doc_result = embeddings.embed_documents([text]);
34 | doc_result = embeddings.embed_documents(
35 | [
36 | "Hi there!",
37 | "Oh, hello!",
38 | "What's your name?",
39 | "My friends call me World",
40 | "Hello World!"
41 | ]
42 | );
43 | print(doc_result)
44 | # 查询
45 | embedded_query = embeddings.embed_query("What was the name mentioned in the conversation?")
46 | print(embedded_query)
47 |
48 | if __name__ == '__main__':
49 | #text();
50 | embedding();
51 | ```
--------------------------------------------------------------------------------
/batch/batch示例.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | - **上传batch数据**
4 |
5 | ```python
6 | API_SECRET_KEY = "你的Nika API获取的api_key";
7 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
8 |
9 | # files
10 | def files():
11 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
12 | resp = client.files.create(
13 | file=open("test.jsonl", "rb"),
14 | purpose='batch'
15 | )
16 | print(resp)
17 | return resp.id
18 | ```
19 | - **创建batch**
20 |
21 | ```python
22 | API_SECRET_KEY = "你的Nika API获取的api_key";
23 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
24 |
25 | def batches(file_id):
26 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
27 | resp = client.batches.create(input_file_id=file_id,
28 | endpoint="/v1/chat/completions",
29 | completion_window="24h")
30 | print(resp)
31 | return resp.id
32 | ```
33 | - **检查batch是否完成**
34 |
35 | 要注意的是:
36 | 上一步提交完batch任务之后,是需要一段时间来执行结果的,时长取决于你的数据量大小、当下任务数、openai的算力是否充足等等。
37 |
38 |
39 | ```python
40 | API_SECRET_KEY = "你的Nika API获取的api_key";
41 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
42 |
43 | def retrieve(bid):
44 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
45 | resp = client.batches.retrieve(bid)
46 | print(resp)
47 | return resp.id
48 | ```
49 | - **获取batch结果**
50 |
51 |
52 |
53 |
54 | ```python
55 | API_SECRET_KEY = "你的Nika API获取的api_key";
56 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
57 |
58 | def get_result(fid):
59 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
60 | content = client.files.content(fid)
61 | print(content.text);
62 | ```
63 |
--------------------------------------------------------------------------------
/示例代码/object-c语言(支持苹果IOS).md:
--------------------------------------------------------------------------------
1 | 苹果系统,object-c语言的示例代码:
2 |
3 | ```
4 | #import
5 |
6 | int main(int argc, const char * argv[]) {
7 | @autoreleasepool {
8 | NSString *apiKey = @"你的小一的api-key";
9 | NSString *urlString = @"https://api.bqrlm.cn/v1/chat/completions";
10 |
11 | NSDictionary *parameters = @{
12 | @"model": @"gpt-3.5-turbo",
13 | @"messages": @[
14 | @{@"role": @"system", @"content": @"You are a helpful assistant."},
15 | @{@"role": @"user", @"content": @"Tell me a joke."}
16 | ]
17 | };
18 |
19 | NSData *jsonData = [NSJSONSerialization dataWithJSONObject:parameters options:0 error:nil];
20 |
21 | NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:[NSURL URLWithString:urlString]];
22 | request.HTTPMethod = @"POST";
23 | [request setValue:@"application/json" forHTTPHeaderField:@"Content-Type"];
24 | [request setValue:[NSString stringWithFormat:@"Bearer %@", apiKey] forHTTPHeaderField:@"Authorization"];
25 | request.HTTPBody = jsonData;
26 |
27 | NSURLSession *session = [NSURLSession sharedSession];
28 | NSURLSessionDataTask *dataTask = [session dataTaskWithRequest:request completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) {
29 | if (error) {
30 | NSLog(@"Error: %@", error);
31 | } else {
32 | NSDictionary *responseDict = [NSJSONSerialization JSONObjectWithData:data options:0 error:nil];
33 | NSLog(@"Response: %@", responseDict);
34 | }
35 | }];
36 |
37 | [dataTask resume];
38 |
39 | [[NSRunLoop currentRunLoop] run];
40 | }
41 | return 0;
42 | }
43 | ```
44 |
--------------------------------------------------------------------------------
/assistant.助手/assistant示例.md:
--------------------------------------------------------------------------------
1 | 注意:新版本的接口已支持流式,需要按照openai接口文档修改为stream模式
2 |
3 | - **创建一个assistant**
4 |
5 | ```python
6 | API_SECRET_KEY = "你的Nika API获取的api_key";
7 | BASE_URL = "https://api.bqrlm.cn/v1/"; #Nika API的base_url
8 |
9 | # assistant
10 | def create_assistant():
11 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
12 | assistant = client.beta.assistants.create(
13 | name="Math Tutor",
14 | instructions="You are a personal math tutor. Write and run code to answer math questions.",
15 | tools=[{"type": "code_interpreter"}],
16 | model="gpt-4-1106-preview"
17 | )
18 | print(assistant)
19 | ```
20 | - **创建一个thread(会话)**
21 |
22 | ```python
23 | API_SECRET_KEY = "你的Nika API获取的api_key";
24 | BASE_URL = "https://api.bqrlm.cn/v1/"; #Nika API的base_url
25 |
26 | # thread
27 | def create_thread():
28 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
29 | thread = client.beta.threads.create()
30 | print(thread)
31 | ```
32 | - **add a message**
33 |
34 | ```python
35 | API_SECRET_KEY = "你的Nika API获取的api_key";
36 | BASE_URL = "https://api.bqrlm.cn/v1/"; #Nika API的base_url
37 |
38 | def add_message(thread_id):
39 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
40 | message = client.beta.threads.messages.create(
41 | thread_id=thread_id, # 助手的会话id要从上一步获取得到
42 | role="user",
43 | content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
44 | )
45 | print(message)
46 | ```
47 |
48 | - **run这个助手**
49 | 新版assistant支持传递参数stream=true,流式直接返回结果
50 |
51 | ```python
52 | API_SECRET_KEY = "你的Nika API获取的api_key";
53 | BASE_URL = "https://api.bqrlm.cn/v1/"; #Nika API的base_url
54 |
55 | # run a assistant
56 | def run(assistant_id, thread_id):
57 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
58 | stream = client.beta.threads.runs.create(
59 | thread_id=thread_id, # 助手的会话id要从上一步获取得到
60 | assistant_id=assistant_id, # 助手的id要从上一步获取得到
61 | instructions="Please address the user as Jane Doe. The user has a premium account.",
62 | stream=True
63 | )
64 | for event in stream:
65 | print(event)
66 | ```
67 |
68 |
69 | 助手搭建告成!!
70 | 你就用指定模型和工具建立了一个自己的助手了
71 |
72 |
--------------------------------------------------------------------------------
/fine-tune.微调/微调示例.md:
--------------------------------------------------------------------------------
1 | 如果是python,注意openai包的版本要对,它升级了!!
2 | 要注意,关键是base_url要设置成Nika API的,如果这个不正确,其它肯定都不行。
3 | 所以一定要注意他在不同的包中base_url的设置方式,
4 | 目前已知的是:在老版本中的设置方式是:openai.api_base = BASE_URL,
5 | 而在新版本中的设置方式是:client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL),
6 | 别问为什么,问就是openai的锅
7 | 新版本的代码需要参考接口文档做相应的修改:https://apiai.apifox.cn
8 |
9 | - **准备训练数据**
10 | ```python
11 | API_SECRET_KEY = "你的Nika API获取的api_key";
12 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
13 |
14 | # files
15 | def files():
16 | openai.api_key = API_SECRET_KEY
17 | openai.api_base = BASE_URL
18 | resp = openai.File.create(
19 | file=open("mydata.jsonl", "rb"),
20 | purpose='fine-tune'
21 | )
22 | print(resp)
23 | ```
24 |
25 | - **上传训练数据**
26 |
27 | ```python
28 | API_SECRET_KEY = "你的Nika API获取的api_key";
29 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
30 |
31 | # jobs
32 | def jobs(file_id):
33 | openai.api_key = API_SECRET_KEY
34 | openai.api_base = BASE_URL
35 | resp = openai.FineTuningJob.create(training_file=file_id, model="gpt-3.5-turbo") #训练文件的id要从上一步获取得到
36 | print(resp)
37 | ```
38 | - **检查是否训练完成**
39 |
40 | 要注意的是:
41 | 上一步提交完训练任务之后,模型是需要一段时间来训练的,训练的时长取决于你的数据量大小、当下训练的任务数、openai的算力是否充足等等。
42 |
43 | ```python
44 | API_SECRET_KEY = "你的Nika API获取的api_key";
45 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
46 |
47 | # retrieve
48 | def retrieve(ftid):
49 | openai.api_key = API_SECRET_KEY
50 | openai.api_base = BASE_URL
51 | resp = openai.FineTuningJob.retrieve(ftid) #微调任务id要从上一步获取得到
52 | print(resp)
53 | ```
54 |
55 | - **使用微调模型**
56 |
57 | ```python
58 | API_SECRET_KEY = "你的Nika API获取的api_key";
59 | BASE_URL = "https://api.bqrlm.cn/v1"; #Nika API的base_url
60 |
61 | # chat
62 | def chat_completions(query):
63 | openai.api_key = API_SECRET_KEY
64 | openai.api_base = BASE_URL
65 | resp = openai.ChatCompletion.create(
66 | model="gpt-3.5-turbo", # 模型名字要从上一步获取得到
67 | messages=[
68 | {"role": "system", "content": "You are a helpful assistant."},
69 | {"role": "user", "content": query}
70 | ]
71 | )
72 | print(resp)
73 | ```
74 | 恭喜,大功告成!!
75 | 你就在gpt-3.5基础上训练了一个自己的模型了
76 |
--------------------------------------------------------------------------------
/示例代码/python.py:
--------------------------------------------------------------------------------
1 | 如果是python,注意openai包的版本要对,它升级了!!
2 | 要注意,关键是base_url要设置成Nika API的,如果这个不正确,其它肯定都不行。
3 | 所以一定要注意他在不同的包中base_url的设置方式,
4 | 目前已知的是:在老版本中的设置方式是:openai.api_base = BASE_URL,
5 | 而在新版本中的设置方式是:client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL),
6 | 别问为什么,问就是openai的锅
7 |
8 | 使用openai的官方包(新版):
9 | import os
10 | from openai import OpenAI
11 | import openai
12 | import requests
13 | import time
14 | import json
15 | import time
16 |
17 | API_SECRET_KEY = "xxxxxx";
18 | BASE_URL = "https://api.bqrlm.cn/v1/"
19 |
20 | # chat
21 | def chat_completions3(query):
22 | client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL)
23 | resp = client.chat.completions.create(
24 | model="gpt-3.5-turbo",
25 | messages=[
26 | {"role": "system", "content": "You are a helpful assistant."},
27 | {"role": "user", "content": query}
28 | ]
29 | )
30 | print(resp)
31 | #print(resp.choices[0].message.content)
32 |
33 |
34 | 使用openai的官方包(旧版):
35 | import os
36 | import openai
37 | import requests
38 | import time
39 | import json
40 | import time
41 |
42 | API_SECRET_KEY = "xxxxxx";
43 | BASE_URL = "https://api.bqrlm.cn/v1/"
44 | # chat
45 | def chat_completions2(query):
46 | openai.api_key = API_SECRET_KEY
47 | openai.api_base = BASE_URL
48 | resp = openai.ChatCompletion.create(
49 | model="gpt-3.5-turbo",
50 | messages=[
51 | {"role": "system", "content": "You are a helpful assistant."},
52 | {"role": "user", "content": query}
53 | ]
54 | )
55 | print(resp)
56 | #print(resp.choices[0].message.content)
57 |
58 | if __name__ == '__main__':
59 | chat_completions2("圆周率的前10位");
60 |
61 |
62 | 使用http请求:
63 | import os
64 | import requests
65 | import time
66 | import json
67 |
68 | def chat_completions():
69 | url="https://api.bqrlm.cn/v1/chat/completions"
70 | your_user_token = 'xxxxxxxxx'; # 你的your_user_token
71 | headers = {'Content-Type': 'application/json', 'Accept':'application/json',
72 | 'Authorization': "Bearer "+your_user_token}
73 | params = {'user':'张三',
74 | 'messages':[{'role':'user', 'content':'1+100='}]};
75 | r = requests.post(url, json.dumps(params), headers=headers)
76 | print(r)
77 | #print(r.json())
78 |
79 | if __name__ == '__main__':
80 | chat_completions();
81 |
82 |
--------------------------------------------------------------------------------
/示例代码/php.php:
--------------------------------------------------------------------------------
1 | // 设置请求头
2 | $your_user_token = 'xxxxxx'; // 你的your_user_token
3 | $headers = array(
4 | "Content-Type: application/json",
5 | "Authorization: Bearer ".$your_user_token
6 | );
7 | // 设置请求参数
8 | $params = array();
9 | $params['user'] = '张三';
10 | {
11 | $one = ["role" => 'user', "content" => "1+100="];
12 | $messages = array(); $messages[] = $one;
13 | $params['messages'] = $messages;
14 | }
15 | // 调用请求
16 | $cburl = 'https://api.bqrlm.cn/v1/chat/completions';
17 | $chatgpt_resp = Tool::_request('post', $cburl, $params, $headers);
18 | $data = json_decode($chatgpt_resp, true);
19 |
20 | 其中发送请求的Tool
21 | 方法如下:
22 | /**
23 | * 发送请求,包括请求头的
24 | * @param string $method 'get'|'post' 请求的方式
25 | * @param string $url URL
26 | * @param array|json $data post请求需要发送的数据
27 | * @param bool $ssl
28 | */
29 | public static function _request($method='get',$url,$data=array(),$headerArray=array(),$ssl=false){
30 | //初始化一个curl资源
31 | $curl = curl_init();
32 | //设置curl选项
33 | curl_setopt($curl,CURLOPT_URL,$url);//url
34 | //请求的代理信息
35 | $user_agent = isset($_SERVER['HTTP_USER_AGENT'])?$_SERVER['HTTP_USER_AGENT']: 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0 FirePHP/0.7.4';
36 | curl_setopt($curl,CURLOPT_USERAGENT,$user_agent);
37 | curl_setopt($curl,CURLOPT_HTTPHEADER, $headerArray);
38 | //referer头,请求来源
39 | curl_setopt($curl,CURLOPT_AUTOREFERER,true);
40 | curl_setopt($curl, CURLOPT_TIMEOUT, 300);//设置超时时间
41 | //SSL相关
42 | if($ssl){
43 | //禁用后,curl将终止从服务端进行验证;
44 | curl_setopt($curl,CURLOPT_SSL_VERIFYPEER,false);
45 | //检查服务器SSL证书是否存在一个公用名
46 | curl_setopt($curl,CURLOPT_SSL_VERIFYHOST,2);
47 | }
48 | //判断请求方式post还是get
49 | if(strtolower($method)=='post') {
50 | /**************处理post相关选项******************/
51 | //是否为post请求 ,处理请求数据
52 | curl_setopt($curl,CURLOPT_POST,true);
53 | curl_setopt($curl,CURLOPT_POSTFIELDS,json_encode($data, JSON_UNESCAPED_UNICODE));
54 | }
55 | //是否处理响应头
56 | curl_setopt($curl,CURLOPT_HEADER,false);
57 | //是否返回响应结果
58 | curl_setopt($curl,CURLOPT_RETURNTRANSFER,true);
59 |
60 | //发出请求
61 | $response = curl_exec($curl);
62 | if (false === $response) {
63 | echo '
', curl_error($curl), '
';
64 | return false;
65 | }
66 | //关闭curl
67 | curl_close($curl);
68 | return $response;
69 | }
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 优质稳定的OpenAI、Gemini、Claude等的API接口-For企业和开发者
2 |
3 | #### 介绍
4 | 为企业和开发者提供优质稳定的各类大模型的API接口。支持OpenAI等多种接口格式访问大模型
5 | 支持OpenAI的API接口,包括:batch,assistant, Fine-tuning,o1-preview,o1-mini,gpt-4o,gpt-4-vision,gpt-4等。
6 | (同时支持google gemini、claude、百度文心一言、阿里、讯飞星火、智谱ChatGLM、ChatGPT等)
7 | 要买openai、Gemini、Claude等的账号?
8 | 要美元的银行卡?
9 | 通通不用的,直接调用就行,简单直接,关键稳定好用!!
10 |
11 | - **项目主要优势**
12 | * 不限制使用,可以用微信充值和对公付款等,没有封号风险。
13 | * 不用买openai等的账号,不用美元的银行卡。
14 | * 强大的接口能力,支持openai所有接口和模型:支持batch,assistant,gpt-4o,gpt-4-vision,GPT-3.5,Embedding,Whisper,TTS, Fine-tuning,DALL-E-3等
15 | * 同时支持google gemini、claude、百度文心一言、阿里、讯飞星火、ChatGLM、DeepSeek等
16 | * 兼容OpenAI接口格式,可以做到平替。
17 | * 支持Batch,详见示例
18 | * 支持assistant api,可以创建自己的助手,详见示例
19 | * 支持fine-tune(微调),可以使用自己的数据来微调GPT的模型,详见示例
20 | * 支持对Embeddings支持,可以用接口运行Langchain、向量库、AutoGPT等应用
21 | * 支持对stream模式的支持,可以支持原生的各种应用
22 | * 支持文字生成图片,支持最新的DALL-E-3
23 | * 支持官方的Whisper模型,支持transcriptions和translations,可以做语音识别和翻译
24 | * 支持函数调用(function_call),详见示例
25 | * 支持官方的tts,支持tts-1模型和tts-1-hd模型,可以做语音合成,详见示例
26 | * 更多特性支持,敬请期待。也可直接向我们提交需求哦
27 |
28 | - **项目地址**
29 | 1、项目官方网址:https://api.bqrlm.cn
30 | Nika API-大模型的API接口服务商,支持百度文心一言、阿里、讯飞星火、清华ChatGLM、ChatGPT等
31 | 开发者单独的Secret Key、调用记录、微调模型、余额查询、示例代码等可以从管理后台中获取。
32 | 2、微信交流群(如果你也对本项目感兴趣,欢迎加入微信群交流):
33 | 
34 |
35 |
36 |
37 |
38 | - **注意事项**
39 | 1、以下所有接口的base_url: `https://api.bqrlm.cn/` (支持https)
40 | 2、API通过HTTP请求调用。每次请求,需要在HTTP头中携带用户的API_KEY,用于认证。 开发者单独的API_KEY,请从Nika API管理后台获得。
41 | 请求头形如:
42 | ```
43 | Content-Type: application/json
44 | Authorization: Bearer 你在Nika API的key
45 | ```
46 | 3、注意不要涉及黄色、暴力、政治等敏感话题!互联网非法外之地,请注意言辞。一旦发现,账号将被永久封禁。
47 |
48 | - **典型用法1**
49 | 1、在python中,使用官方的openai的包,设置api_key为:Nika API后台获取的API_KEY,替换官方的API_KEY: sk-******
50 | 2、设置base_url为:`https://api.bqrlm.cn/v1/`, 替换官方的域名: `https://api.openai.com/v1/`
51 |
52 | 1、在langchain中,设置OPENAI_API_KEY环境变量为:Nika API后台获取的API_KEY,替换官方的API_KEY: sk-******
53 | 2、设置OPENAI_API_BASE_URL环境变量为:`https://api.bqrlm.cn/v1/`, 替换官方的域名: `https://api.openai.com/v1/`
54 |
55 | - **典型用法3**
56 | 1、在任何语言中,直接发送http请求,请求头中指定key:Nika API后台获取的API_KEY,替换官方的API_KEY: sk-******
57 | 2、请求的url同官方格式,只需要将base_url:`https://api.bqrlm.cn/v1/`, 替换官方的域名: `https://api.openai.com/v1/`
58 |
59 | ## 更详细的API说明 ##
60 | 1、[OpenAI的API说明](https://github.com/lxl-sql/nika-api/blob/main/openai%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E.md)
61 |
62 | 2、[Nika API接口说明](https://apiai.apifox.cn)
63 |
64 |
--------------------------------------------------------------------------------
/示例代码/c++语言.md:
--------------------------------------------------------------------------------
1 | c++语言的示例代码:(只需换成自己的key)
2 | ```
3 | #include
4 | #include
5 | #include
6 | #include // You'll need to have a JSON library like jsoncpp
7 |
8 | size_t WriteCallback(void* contents, size_t size, size_t nmemb, void* userp) {
9 | ((std::string*)userp)->append((char*)contents, size * nmemb);
10 | return size * nmemb;
11 | }
12 |
13 | int main() {
14 | CURL* curl;
15 | CURLcode res;
16 | std::string apiKey = "你在Nika API获取的api-key";
17 | std::string urlString = "https://api.bqrlm.cn/v1/chat/completions";
18 |
19 | // Initialize the Curl library
20 | curl_global_init(CURL_GLOBAL_ALL);
21 | curl = curl_easy_init();
22 |
23 | if (curl) {
24 | struct curl_slist* headers = NULL;
25 | headers = curl_slist_append(headers, "Content-Type: application/json");
26 | headers = curl_slist_append(headers, ("Authorization: Bearer " + apiKey).c_str());
27 |
28 | // Prepare request data
29 | Json::Value requestData;
30 | requestData["model"] = "gpt-3.5-turbo";
31 |
32 | Json::Value message1;
33 | message1["role"] = "system";
34 | message1["content"] = "You are a helpful assistant.";
35 |
36 | Json::Value message2;
37 | message2["role"] = "user";
38 | message2["content"] = "Tell me a joke.";
39 |
40 | requestData["messages"].append(message1);
41 | requestData["messages"].append(message2);
42 |
43 | Json::StreamWriterBuilder writer;
44 | std::string jsonString = Json::writeString(writer, requestData);
45 |
46 | curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
47 | curl_easy_setopt(curl, CURLOPT_URL, urlString.c_str());
48 | curl_easy_setopt(curl, CURLOPT_POST, 1L);
49 | curl_easy_setopt(curl, CURLOPT_POSTFIELDS, jsonString.c_str());
50 | curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, jsonString.length());
51 |
52 | // Response handling
53 | std::string response;
54 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback);
55 | curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response);
56 |
57 | // Perform the request
58 | res = curl_easy_perform(curl);
59 |
60 | // Check for errors
61 | if (res != CURLE_OK) {
62 | std::cerr << "Curl error: " << curl_easy_strerror(res) << std::endl;
63 | } else {
64 | std::cout << "Response: " << response << std::endl;
65 | }
66 |
67 | // Clean up
68 | curl_slist_free_all(headers);
69 | curl_easy_cleanup(curl);
70 | }
71 | ```
72 | curl_global_cleanup();
73 |
74 | return 0;
75 | }
76 |
--------------------------------------------------------------------------------
/示例代码/java语言(支持android).md:
--------------------------------------------------------------------------------
1 | java语言的示例代码:(只需换成自己的key)
2 | ```
3 | package gpt.bqrlm.utils;
4 | import java.io.BufferedReader;
5 | import java.io.DataOutputStream;
6 | import java.io.InputStreamReader;
7 | import java.net.HttpURLConnection;
8 | import java.net.URL;
9 | import java.io.IOException;
10 |
11 | public class ChatDemo
12 | {
13 | public static void main(String[] args) throws Exception
14 | {
15 | String url = "https://api.bqrlm.cn/v1/chat/completions";
16 | String key = "你在Nika API获取的API_SECRET_KEY";
17 | GPTConnectorServer(url, key);
18 | }
19 |
20 | public static void GPTConnectorServer(String paramUrl, String paramKey) throws IOException {
21 | // 设置 API 密钥和模型 ID
22 | String apiKey = paramKey;
23 | String modelId = "gpt-3.5-turbo";
24 |
25 | // 构建 API 请求 URL
26 | String apiUrl = paramUrl;
27 |
28 | // 构建请求数据
29 | String requestData = "{\"model\": \"" + modelId + "\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"Translate the following English text to French: 'Hello, how are you?'\"}]}";
30 |
31 | // 创建 URL 对象
32 | URL url = new URL(apiUrl);
33 | HttpURLConnection connection = (HttpURLConnection) url.openConnection();
34 |
35 | // 设置请求方法为 POST
36 | connection.setRequestMethod("POST");
37 |
38 | // 设置请求头部
39 | connection.setRequestProperty("Authorization", "Bearer " + apiKey);
40 | connection.setRequestProperty("Content-Type", "application/json");
41 | connection.setRequestProperty("Accept", "application/json");
42 |
43 | // 启用输入输出流
44 | connection.setDoOutput(true);
45 |
46 | // 将请求数据写入输出流
47 | try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) {
48 | outputStream.write(requestData.getBytes());
49 | outputStream.flush();
50 | }
51 |
52 | // 获取响应
53 | int responseCode = connection.getResponseCode();
54 | if (responseCode == HttpURLConnection.HTTP_OK) {
55 | // 读取响应数据
56 | try (BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
57 | String line;
58 | StringBuilder response = new StringBuilder();
59 | while ((line = reader.readLine()) != null) {
60 | response.append(line);
61 | }
62 | System.out.println("API 响应:\n" + response.toString());
63 | }
64 | } else {
65 | System.err.println("API 请求失败,响应码: " + responseCode);
66 | }
67 | // 关闭连接
68 | connection.disconnect();
69 | }
70 | }
71 |
72 |
73 | ```
74 |
--------------------------------------------------------------------------------
/fine-tune.微调/mydata.jsonl:
--------------------------------------------------------------------------------
1 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
2 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
3 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
4 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
5 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
6 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
7 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
8 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
9 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
10 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
11 | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
--------------------------------------------------------------------------------
/NikaAPI接口.md:
--------------------------------------------------------------------------------
1 | # Nika API接口
2 |
3 | #### 说明
4 | 1、Nika API自有的接口,区别于openai的接口,一般用来操作和查询Nika API账户相关,比如查询余额等
5 |
6 | - **注意事项**
7 | 注意事项!!
8 | ```
9 | 现在主要发现是有3个问题,
10 | 1、要加一个请求头,api接口文档中有说明:
11 | curl -H "Content-Type: application/json" -H "Authorization: Bearer $your_user_token" -XPOST https://api.bqrlm.cn/v1/chat/completions -d '{"messages": [{"role":"user","content":"请介绍一下你自己"}]}' | iconv -f utf-8 -t utf-8
12 | 2、messages传的不对,messages是array
13 | 3、your_user_token传的不对,不能再传openai的key了,你要传你从Nika API拿到的key(不需要有openai的key)
14 | ```
15 | 注:
16 | 1、以下所有接口的base_url: `https://api.bqrlm.cn/` (支持https)
17 | 2、API通过HTTP请求调用。每次请求,需要在HTTP头中携带用户的your_user_token,用于认证。 开发者单独的your_user_token,请从Nika API管理后台获得。
18 | 请求头形如:
19 | ```
20 | Content-Type: application/json
21 | Authorization: Bearer $your_user_token
22 | ```
23 |
24 | #### 1、获取个人资料
25 |
26 | * 获取账户相关信息:余额等。
27 | * 接口名称:获取个人资料
28 | * HTTP 方法:GET
29 | * 路径:`/api/user/self``
30 | * 鉴权要求:用户
31 | * 功能简介:获取当前用户的详细信息,包含权限、配额、设置等
32 |
33 | 💡 请求示例:
34 | ```javascript
35 | const response = await fetch('/api/user/self', {
36 | method: 'GET',
37 | headers: {
38 | 'Content-Type': 'application/json',
39 | 'Authorization': 'Bearer your_user_token'
40 | }
41 | });
42 | const data = await response.json();
43 | ```
44 |
45 | ✅ 成功响应示例:
46 | ```json
47 | {
48 | "success": true,
49 | "message": "",
50 | "data": {
51 | "id": 1,
52 | "username": "testuser",
53 | "display_name": "Test User",
54 | "role": 1,
55 | "status": 1,
56 | "email": "user@example.com",
57 | "group": "default",
58 | "quota": 1000000,
59 | "used_quota": 50000,
60 | "request_count": 100,
61 | "aff_code": "ABC123",
62 | "aff_count": 5,
63 | "aff_quota": 10000,
64 | "aff_history_quota": 50000,
65 | "inviter_id": 0,
66 | "linux_do_id": "",
67 | "setting": "{}",
68 | "stripe_customer": "",
69 | "sidebar_modules": "{\"chat\":{\"enabled\":true}}",
70 | "permissions": {
71 | "can_view_logs": true,
72 | "can_manage_tokens": true
73 | }
74 | }
75 | }
76 | ```
77 |
78 | ❗ 失败响应示例:
79 | ```json
80 | {
81 | "success": false,
82 | "message": "获取用户信息失败"
83 | }
84 | ```
85 |
86 | 🧾 字段说明:
87 | * `id` (数字): 用户 ID
88 | * `username` (字符串): 用户名
89 | * `display_name` (字符串): 显示名称
90 | * `role` (数字): 用户角色,1=普通用户,10=管理员,100=Root 用户
91 | * `status` (数字): 用户状态,1=正常,2=禁用
92 | * `email` (字符串): 邮箱地址
93 | * `group` (字符串): 所属分组
94 | * `quota` (数字): 总配额
95 | * `used_quota` (数字): 已使用配额
96 | * `request_count` (数字): 请求次数
97 | * `aff_code` (字符串): 推荐码
98 | * `aff_count` (数字): 推荐人数
99 | * `aff_quota` (数字): 推荐奖励配额
100 | * `aff_history_quota` (数字): 历史推荐配额
101 | * `inviter_id` (数字): 邀请人 ID
102 | * `linux_do_id` (字符串): LinuxDo 账户 ID
103 | * `setting` (字符串): 用户设置 JSON 字符串
104 | * `stripe_customer` (字符串): Stripe 客户 ID
105 | * `sidebar_modules` (字符串): 侧边栏模块配置 JSON 字符串
106 | * `permissions` (对象): 用户权限信息
--------------------------------------------------------------------------------
/示例代码/函数调用.md:
--------------------------------------------------------------------------------
1 | 直接上代码,要替换成自己在Nika API的key
2 |
3 | ```
4 | import os
5 | import openai
6 | import requests
7 | import time
8 | import json
9 | import time
10 |
11 | API_SECRET_KEY = "xxxx";
12 | BASE_URL = "https://api.bqrlm.cn/v1"
13 |
14 | openai.api_key = API_SECRET_KEY
15 | openai.api_base = BASE_URL
16 |
17 | # Example dummy function hard coded to return the same weather
18 | # In production, this could be your backend API or an external API
19 | def get_current_weather(location, unit="fahrenheit"):
20 | """Get the current weather in a given location"""
21 | weather_info = {
22 | "location": location,
23 | "temperature": "72",
24 | "unit": unit,
25 | "forecast": ["sunny", "windy"],
26 | }
27 | return json.dumps(weather_info)
28 |
29 | def run_conversation():
30 | # Step 1: send the conversation and available functions to GPT
31 | messages = [{"role": "user", "content": "What's the weather like in Boston?"}]
32 | functions = [
33 | {
34 | "name": "get_current_weather",
35 | "description": "Get the current weather in a given location",
36 | "parameters": {
37 | "type": "object",
38 | "properties": {
39 | "location": {
40 | "type": "string",
41 | "description": "The city and state, e.g. San Francisco, CA",
42 | },
43 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
44 | },
45 | "required": ["location"],
46 | },
47 | }
48 | ]
49 | response = openai.ChatCompletion.create(
50 | model="gpt-3.5-turbo-0613",
51 | messages=messages,
52 | functions=functions,
53 | # function_call="auto", # auto is default, but we'll be explicit
54 | function_call={"name": "get_current_weather"}, # auto is default, but we'll be explicit
55 | )
56 | response_message = response["choices"][0]["message"]
57 |
58 | # Step 2: check if GPT wanted to call a function
59 | if response_message.get("function_call"):
60 | # Step 3: call the function
61 | # Note: the JSON response may not always be valid; be sure to handle errors
62 | available_functions = {
63 | "get_current_weather": get_current_weather,
64 | } # only one function in this example, but you can have multiple
65 | function_name = response_message["function_call"]["name"]
66 | function_to_call = available_functions[function_name]
67 | function_args = json.loads(response_message["function_call"]["arguments"])
68 | function_response = function_to_call(
69 | location=function_args.get("location"),
70 | unit=function_args.get("unit"),
71 | )
72 |
73 | # Step 4: send the info on the function call and function response to GPT
74 | response_message["content"]="Testing" #注意这里要传入content,content不能为空,否则openai会报错
75 | messages.append(response_message) # extend conversation with assistant's reply
76 | messages.append(
77 | {
78 | "role": "function",
79 | "name": function_name,
80 | "content": function_response,
81 | }
82 | ) # extend conversation with function response
83 | second_response = openai.ChatCompletion.create(
84 | model="gpt-3.5-turbo-0613",
85 | messages=messages,
86 | ) # get a new response from GPT where it can see the function response
87 | return second_response
88 |
89 | #print(run_conversation())
90 |
91 | if __name__ == '__main__':
92 | run_conversation();
93 |
94 | ```
95 |
--------------------------------------------------------------------------------
/示例代码/c#语言(支持Unity).md:
--------------------------------------------------------------------------------
1 | Unity的c#示例代码:(只需换成自己的key)
2 | ```
3 | using System;
4 | using System.Collections;
5 | using System.Text;
6 | using UnityEngine;
7 | using UnityEngine.Networking;
8 | using LitJson; //这个需要百度下载一个LitJson库然后放入Assets目录下
9 |
10 | public class ChatGPTScripts : MonoBehaviour
11 | {
12 | private string postUrl = "https://api.bqrlm.cn/v1/chat/completions";
13 | private const string user = "user";
14 | private const string messages = "messages";
15 |
16 | private void Start()
17 | {
18 | StartCoroutine(Post());
19 | }
20 | IEnumerator Post()
21 | {/**//**/
22 | WWWForm form = new WWWForm();/**/
23 |
24 | // 配置数据
25 | string apiSecretKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
26 | JsonData data = new JsonData();
27 | data[user] = "测试者";
28 |
29 | // messages
30 | JsonData messageDatas = new JsonData();
31 | messageDatas.SetJsonType(JsonType.Array);
32 |
33 | // 单个 message
34 | JsonData messageData = new JsonData();
35 | messageData["role"] = "user";
36 | messageData["content"] = "请介绍一下你自己";
37 |
38 | // 存入 message
39 | messageDatas.Add(messageData);
40 |
41 | // 配置内容
42 | data[messages] = messageDatas;
43 |
44 | // 编码 JSON
45 | var dataBytes = Encoding.Default.GetBytes(data.ToJson());
46 | UnityWebRequest request = UnityWebRequest.Post(postUrl, form);
47 | request.uploadHandler = new UploadHandlerRaw(dataBytes);
48 |
49 | // 发送 https
50 | request.SetRequestHeader("Content-Type", "application/json");
51 | request.SetRequestHeader("Authorization", "Bearer "+apiSecretKey);
52 | yield return request.SendWebRequest();
53 | if(request.isHttpError || request.isNetworkError)
54 | {
55 | Debug.LogError(request.error);
56 | }
57 | else
58 | {
59 | string receiveContent = request.downloadHandler.text;
60 | Debug.Log(receiveContent);
61 | }
62 | }
63 | ```
64 |
65 | .net的c#示例
66 | ```
67 | using System;
68 | using System.Net.Http;
69 | using System.Text;
70 | using System.Threading.Tasks;
71 | using Newtonsoft.Json.Linq;
72 |
73 | class ChatGPT
74 | {
75 | private static readonly string postUrl = "https://api.bqrlm.cn/v1/chat/completions";
76 | private const string user = "user";
77 | private const string messages = "messages";
78 |
79 | static async Task Main(string[] args)
80 | {
81 | await PostAsync();
82 | }
83 |
84 | static async Task PostAsync()
85 | {
86 | // 配置数据
87 | string apiSecretKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
88 | var data = new JObject();
89 | data[user] = "测试者";
90 |
91 | // messages
92 | var messageDatas = new JArray();
93 |
94 | // 单个 message
95 | var messageData = new JObject
96 | {
97 | ["role"] = "user",
98 | ["content"] = "请介绍一下你自己"
99 | };
100 |
101 | // 存入 message
102 | messageDatas.Add(messageData);
103 |
104 | // 配置内容
105 | data[messages] = messageDatas;
106 |
107 | // 编码 JSON
108 | var jsonData = data.ToString();
109 | var dataBytes = Encoding.UTF8.GetBytes(jsonData);
110 |
111 | using (HttpClient client = new HttpClient())
112 | {
113 | var requestContent = new ByteArrayContent(dataBytes);
114 | requestContent.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue("application/json");
115 | client.DefaultRequestHeaders.Authorization = new System.Net.Http.Headers.AuthenticationHeaderValue("Bearer", apiSecretKey);
116 |
117 | HttpResponseMessage response = await client.PostAsync(postUrl, requestContent);
118 |
119 | if (response.IsSuccessStatusCode)
120 | {
121 | string receiveContent = await response.Content.ReadAsStringAsync();
122 | ExtractContent(receiveContent);
123 | }
124 | else
125 | {
126 | Console.WriteLine($"Error: {response.StatusCode}, {response.ReasonPhrase}");
127 | }
128 | }
129 | }
130 |
131 | static void ExtractContent(string jsonResponse)
132 | {
133 | JObject json = JObject.Parse(jsonResponse);
134 | string content = json["choices"][0]["message"]["content"].ToString();
135 | Console.WriteLine("Content: " + content);
136 | }
137 | }
138 | ```
139 |
140 |
--------------------------------------------------------------------------------
/openai接口说明.md:
--------------------------------------------------------------------------------
1 | # openai接口说明
2 |
3 | #### 说明
4 | 1、接口请求规范完全和openai一样,应当直接以openai的接口文档为准:https://platform.openai.com/docs/api-reference/introduction
5 | (因为OpenAI接口也时常更新,建议直接以上面OpenAI的官方文档为准)
6 | 2、支持所有的openai的接口
7 | 3、Nika API仅在外层增加两个字段:code,msg,用来表示当下接口的状态。code=0表示接口调用成功,非0表示失败,msg表明错误信息(当然也可以不用管,直接以openai的返回为准)
8 | 示例:
9 | ```
10 | {
11 | "code": 0,
12 | "msg": "",
13 | "id": "as-bcmt5ct4iy",
14 | "created": 1680167072,
15 | "choices":[{"message":{"role":"assistant","content":"1+100=101"},"finish_reason":"stop","index":0}],
16 | "usage": {
17 | "prompt_tokens": 470,
18 | "completion_tokens": 198,
19 | "total_tokens": 668
20 | }
21 | }
22 | ```
23 |
24 | - **注意事项**
25 | 注意事项!!
26 | ```
27 | 现在主要发现是有几个问题,
28 | 1、要加一个请求头,api接口文档中有说明:
29 | curl -H "Content-Type: application/json" -H "Authorization: Bearer 你在Nika API的key" -XPOST https://api.bqrlm.cn/v1/chat/completions -d '{"messages": [{"role":"user","content":"请介绍一下你自己"}]}' | iconv -f utf-8 -t utf-8
30 | 2、messages传的不对,messages是array
31 | 3、your_user_token传的不对,不能再传openai的key了,你要传你从Nika API拿到的key(不需要有openai的key)
32 | 4、如果是python,注意openai包的版本要对,它升级了!!要注意,关键是base_url要设置成Nika API的,如果这个不正确,其它肯定都不行。所以一定要注意他在不同的包中base_url的设置方式,目前已知的是:在老版本中的设置方式是:openai.api_base = BASE_URL,而在新版本中的设置方式是:client = OpenAI(api_key=API_SECRET_KEY, base_url=BASE_URL),别问为什么,问就是openai的锅
33 | ```
34 | 注:
35 | 1、以下所有接口的base_url: `https://api.bqrlm.cn/` (支持https)
36 | 2、API通过HTTP请求调用。每次请求,需要在HTTP头中携带用户的your_user_token,用于认证。 开发者单独的your_user_token,请从Nika API管理后台获得。
37 | 请求头形如:
38 | ```
39 | Content-Type: application/json
40 | Authorization: Bearer 你在Nika API的key
41 | ```
42 |
43 | (更详细API文档地址:https://apiai.apifox.cn)
44 | #### 示例接口1、创建chat
45 |
46 | 调用本接口,发起一次对话请求
47 |
48 | - **请求URL**
49 | > `v1/chat/completions`
50 |
51 | - **请求方式**
52 | >**POST**
53 |
54 | - **Header参数**
55 | >
56 | | 名称 | 值 | 参数说明 |
57 | | :-------- | :--------| :------ |
58 | | Content-Type| application/json| |
59 | | Authorization| Bearer 你在Nika API的key| 开发者单独的your_user_token,请从Nika API管理后台获得。 |
60 |
61 | - **请求参数**
62 | >
63 | | 请求参数 | 参数类型 | 是否必须 |参数说明 |
64 | | :-------- | :--------| :------ | :------ |
65 | | model| string| 是| 大模型的类别,包括但不限于:gpt-4-1106-preview,gpt-4-vision-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-4, gpt-4-0314, gpt-4-0613, gpt-4-32k, gpt-4-32k-0613, gpt-3.5-turbo, gpt-3.5-turbo-0613, gpt-3.5-turbo-16k, gpt-3.5-turbo-16k-0613,还有claude,gemini等,还有众多国内模型。所有模型列表(保持更新):https://apiai.apifox.cn|
66 | | messages| List(message)| 是| 聊天上下文信息。说明:
(1)messages成员不能为空,1个成员表示单轮对话,多个成员表示多轮对话。
(2)最后一个message为当前请求的信息,前面的message为历史对话信息。
(3)必须为奇数个成员,成员中message的role必须依次为user、assistant。
|
67 | | stream| bool| 否| 是否以流式接口的形式返回数据,默认false。|
68 | | user| string| 否| 表示最终用户的唯一标识符,可以监视和检测滥用行为,防止接口恶意调用。|
69 |
70 | 其它高级参数:
71 | >
72 | | 请求参数 | 参数类型 | 是否必须 |参数说明 |
73 | | :-------- | :--------| :------ | :------ |
74 | | temperature| number| 否| What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.We generally recommend altering this or top_p but not both.,默认:1。|
75 | | top_p| number| 否| An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.We generally recommend altering this or temperature but not both.,默认:1。|
76 | | n| number| 否| How many chat completion choices to generate for each input message. 默认:1。|
77 | | stop| string| 否| Up to 4 sequences where the API will stop generating further tokens.,默认null。|
78 | | max_tokens| number| 否| The maximum number of tokens to generate in the chat completion.The total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens.默认: 不限制。|
79 | | presence_penalty| number| 否| Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.,默认:0。|
80 | | frequency_penalty| number| 否| Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.,默认:0。|
81 | | logit_bias| map| 否| Modify the likelihood of specified tokens appearing in the completion.Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.,默认:null。|
82 |
83 | - **message说明**
84 | >
85 | | 名称 | 类型 | 描述 |
86 | | :-------- | :--------| :------ |
87 | | role| string| The role of the messages author. One of system, user, assistant, or function.
user: 表示用户
assistant: 表示对话助手
function:表示函数调用
|
88 | | content| string| 对话内容,不能为空。|
89 |
90 | - **返回参数**
91 | >
92 | | 返回参数 | 参数类型 | 参数说明 |
93 | | :-------- | :--------| :------ |
94 | | code| int| 执行结果code,0表示成功,其它表示失败,失败信息见msg字段|
95 | | msg| String| 执行结果消息|
96 | | id| string| 本轮对话的id。|
97 | | created| int| 时间戳。|
98 | | choices| List(choice)| 对话返回结果。|
99 | | usage| usage| token统计信息,token数 = 汉字数+单词数*1.3 (仅为估算逻辑)。|
100 |
101 | - **choice说明**
102 | >
103 | | 名称 | 类型 | 描述 |
104 | | :-------- | :--------| :------ |
105 | | message| message| 见上文message说明。|
106 | | index| int| 当前choice的序号。|
107 | | finish_reason| string| 结束原因。|
108 |
109 | - **usage说明**
110 | >
111 | | 名称 | 类型 | 描述 |
112 | | :-------- | :--------| :------ |
113 | | prompt_tokens| int| 问题tokens数。|
114 | | completion_tokens| int| 回答tokens数。|
115 | | total_tokens| int| tokens总数。|
116 |
117 | - **请求示例**
118 | >
119 | 更多示例见本页:https://github.com/xing61/xiaoyi-robot/tree/main/%E7%A4%BA%E4%BE%8B%E4%BB%A3%E7%A0%81
120 | ```
121 | curl -H "Content-Type: application/json"
122 | -H "Authorization: Bearer $your_user_token"
123 | -XPOST https://api.bqrlm.cn/v1/chat/completions -d '{
124 | "messages": [
125 | {"role":"user","content":"请介绍一下你自己"},
126 | {"role":"assistant","content":"您好,我是Nika API机器人。我能够与人对话互动,回答问题,协助创作,高效便捷地帮助人们获取信息、知识和灵感。"},
127 | {"role":"user","content": "1+100="}
128 | ]
129 | }' | iconv -f utf-8 -t utf-8
130 | ```
131 | ```
132 | php示例代码:
133 | // 设置请求头
134 | $your_user_token = 'xxxxxxxxxxxxxxxxxx'; // 你的your_user_token
135 | $headers = array(
136 | "Content-Type: application/json",
137 | "Authorization: Bearer ".$your_user_token
138 | );
139 | // 设置请求参数
140 | $params = array();
141 | $params['user'] = '张三';
142 | {
143 | $one = ["role" => 'user', "content" => "1+100="];
144 | $messages = array(); $messages[] = $one;
145 | $params['messages'] = $messages;
146 | }
147 | // 调用请求
148 | $cburl = 'https://api.bqrlm.cn/v1/chat/completions';
149 | $chatgpt_resp = Tool::_request('post', $cburl, $params, $headers);
150 | $data = json_decode($chatgpt_resp, true);
151 | ```
152 | ```
153 | python使用官方库示例代码:
154 | import os
155 | import openai
156 |
157 | openai.api_key = "您的your_user_token"
158 | openai.base_url = "https://api.bqrlm.cn/v1" # 要注意openai的版本号,目前最新的是base_url,旧版可能是api_base
159 |
160 | chat_completion = openai.ChatCompletion.create(
161 | model="gpt-3.5-turbo",
162 | messages=[{ "role": "user", "content": "Hello world" }]
163 | )
164 | print(chat_completion.choices[0].message.content)
165 | ```
166 | ```
167 | python示例代码:
168 | import os
169 | import requests
170 | import time
171 | import json
172 |
173 | def chat_completions():
174 | url="https://api.bqrlm.cn/v1/chat/completions"
175 | your_user_token = 'xxxxxxxxx'; # 你的your_user_token
176 | headers = {'Content-Type': 'application/json', 'Accept':'application/json',
177 | 'Authorization': "Bearer "+your_user_token}
178 | params = {'user':'张三',
179 | 'messages':[{'role':'user', 'content':'1+100='}]};
180 | r = requests.post(url, json.dumps(params), headers=headers)
181 | print(r.json())
182 |
183 | if __name__ == '__main__':
184 | chat_completions();
185 | ```
186 | - **返回示例**
187 | >
188 | ```
189 | {
190 | "code": 0,
191 | "msg": "",
192 | "id": "as-bcmt5ct4iy",
193 | "created": 1680167072,
194 | "choices":[{"message":{"role":"assistant","content":"1+100=101"},"finish_reason":"stop","index":0}],
195 | "usage": {
196 | "prompt_tokens": 470,
197 | "completion_tokens": 198,
198 | "total_tokens": 668
199 | }
200 | }
201 | ```
202 |
203 |
204 |
--------------------------------------------------------------------------------