├── .npmignore ├── .gitignore ├── media └── demo.gif ├── tests ├── __pycache__ │ └── openai.cpython-310.pyc ├── chat.ts ├── openai_test.py ├── chat-mpt.ts ├── qianwen.ts ├── history.ts ├── chatgpt.ts ├── modelscope.ts ├── openai.ts ├── mpt.ts └── falcon.ts ├── src ├── index.ts ├── client.d.ts ├── spaces.json ├── types.ts ├── bin │ ├── cli.ts │ └── server.ts ├── utils.ts ├── chat.ts └── client.ts ├── .editorconfig ├── examples ├── index.ts ├── internal.ts ├── stream.ts ├── custom.ts └── temperature.ts ├── .vscode └── settings.json ├── Dockerfile ├── CHANGELOG.md ├── package.json ├── API_CN.md ├── API.md ├── README_CN.md ├── README.md ├── LICENSE └── tsconfig.json /.npmignore: -------------------------------------------------------------------------------- 1 | /src 2 | /examples -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /dist 2 | /node_modules 3 | /.local-pack -------------------------------------------------------------------------------- /media/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weaigc/gradio-chatbot/HEAD/media/demo.gif -------------------------------------------------------------------------------- /tests/__pycache__/openai.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weaigc/gradio-chatbot/HEAD/tests/__pycache__/openai.cpython-310.pyc -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export { client, post_data, upload_files, duplicate } from "./client"; 2 | export type { SpaceStatus } from "./types"; 3 | export * from './chat'; -------------------------------------------------------------------------------- /tests/chat.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot('0'); 4 | 5 | async function start() { 6 | console.log(await bot.chat('你好')); 7 | console.log(await bot.chat('1+1')); 8 | console.log(await bot.chat('再+2')); 9 | } 10 | 11 | start(); -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | [*] 7 | indent_style = space 8 | indent_size = 2 9 | end_of_line = lf 10 | charset = utf-8 11 | trim_trailing_whitespace = false 12 | insert_final_newline = false -------------------------------------------------------------------------------- /examples/index.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot(); // 调用默认模型 4 | 5 | async function start() { 6 | console.log(await bot.chat('你好')); 7 | console.log(await bot.chat('1+1')); 8 | console.log(await bot.chat('再+2')); 9 | } 10 | 11 | start(); -------------------------------------------------------------------------------- /examples/internal.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot('6'); 4 | 5 | async function start() { 6 | console.log(await bot.chat('hello')); 7 | console.log(await bot.chat('1+1')); 8 | console.log(await bot.chat('再+2')); 9 | } 10 | 11 | start(); -------------------------------------------------------------------------------- /tests/openai_test.py: -------------------------------------------------------------------------------- 1 | import openai 2 | openai.api_key = "dummy" 3 | openai.api_base = "http://127.0.0.1:8080/v1" 4 | 5 | # create a chat completion 6 | chat_completion = openai.ChatCompletion.create(model="10", messages=[{"role": "user", "content": "Hello"}]) 7 | 8 | # print the completion 9 | print(chat_completion.choices[0].message.content) -------------------------------------------------------------------------------- /tests/chat-mpt.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot({ 4 | url: 'https://huggingface.co/spaces/mosaicml/mpt-30b-chat', 5 | }); 6 | 7 | async function start() { 8 | console.log(await bot.chat('你好')); 9 | console.log(await bot.chat('1+1')); 10 | console.log(await bot.chat('再+2')); 11 | } 12 | 13 | start(); -------------------------------------------------------------------------------- /tests/qianwen.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot({ 4 | url: 'https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary', 5 | }); 6 | 7 | async function start() { 8 | console.log(await bot.chat('你好')); 9 | console.log(await bot.chat('1+1')); 10 | console.log(await bot.chat('再+2')); 11 | } 12 | 13 | start(); -------------------------------------------------------------------------------- /examples/stream.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot(); // 调用默认模型 4 | 5 | async function start() { 6 | let index = 0; 7 | const response = await bot.chat('hello', { 8 | onMessage(msg) { 9 | process.stdout.write(msg.slice(index)); 10 | index = msg.length; 11 | } 12 | }); 13 | process.stdout.write('\n'); 14 | console.log('response', response); 15 | } 16 | 17 | start(); -------------------------------------------------------------------------------- /src/client.d.ts: -------------------------------------------------------------------------------- 1 | export interface Config { 2 | auth_required: boolean | undefined; 3 | auth_message: string; 4 | components: any[]; 5 | css: string | null; 6 | dependencies: any[]; 7 | dev_mode: boolean; 8 | enable_queue: boolean; 9 | layout: any; 10 | mode: "blocks" | "interface"; 11 | root: string; 12 | theme: string; 13 | title: string; 14 | version: string; 15 | is_space: boolean; 16 | is_colab: boolean; 17 | show_api: boolean; 18 | stylesheets: string[]; 19 | path: string; 20 | } -------------------------------------------------------------------------------- /tests/history.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot('https://huggingface.co/spaces/artificialguybr/qwen-14b-chat-demo'); 4 | 5 | async function start() { 6 | bot.history = [ 7 | [ 8 | "你好", 9 | "你好!很高兴能为你提供帮助。" 10 | ], 11 | [ 12 | "1+1", 13 | "1+1=2" 14 | ] 15 | ] 16 | console.log(await bot.chat('你好')); 17 | console.log(await bot.chat('1+1')); 18 | console.log(await bot.chat('再+2')); 19 | } 20 | 21 | start(); -------------------------------------------------------------------------------- /examples/custom.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot({ 4 | url: 'https://huggingface.co/spaces/h2oai/h2ogpt-chatbot', 5 | fnIndex: 35, 6 | }); // 调用自定义 ChatBot 模型 7 | 8 | async function start() { 9 | let index = 0; 10 | const response = await bot.chat('hello', { 11 | onMessage(msg) { 12 | process.stdout.write(msg.slice(index)); 13 | index = msg.length; 14 | } 15 | }); 16 | console.log('response', response); 17 | } 18 | 19 | start(); -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.detectIndentation": false, 3 | "editor.tabSize": 2, 4 | "editor.insertSpaces": true, 5 | "MicroPython.executeButton": [ 6 | { 7 | "text": "▶", 8 | "tooltip": "运行", 9 | "alignment": "left", 10 | "command": "extension.executeFile", 11 | "priority": 3.5 12 | } 13 | ], 14 | "MicroPython.syncButton": [ 15 | { 16 | "text": "$(sync)", 17 | "tooltip": "同步", 18 | "alignment": "left", 19 | "command": "extension.execute", 20 | "priority": 4 21 | } 22 | ] 23 | } -------------------------------------------------------------------------------- /tests/chatgpt.ts: -------------------------------------------------------------------------------- 1 | import { client } from '../src'; 2 | 3 | async function start() { 4 | const app = await client('yuntian-deng-chatgpt.hf.space'); 5 | const inputs = ["你好", 1, 1, 0, [], null]; 6 | const fn_index = 5; 7 | app.submit(fn_index, inputs) 8 | .on('data', (event) => { 9 | console.log('process data', JSON.stringify(event.data)); 10 | }) 11 | .on('status', (event) => { 12 | console.log('stage', event.stage); 13 | }); 14 | const res = await app.predict(fn_index, inputs); 15 | console.log('result', JSON.stringify(res)); 16 | } 17 | 18 | start(); -------------------------------------------------------------------------------- /examples/temperature.ts: -------------------------------------------------------------------------------- 1 | import { GradioChatBot } from '../src'; 2 | 3 | const bot = new GradioChatBot({ 4 | url: '0', // or 'https://huggingface.co/spaces/yuntian-deng/ChatGPT' 5 | args: ["", 1, 0.7, 1, [], null], // 0.7 is the temperature argument 6 | }); 7 | 8 | async function start() { 9 | let index = 0; 10 | const response = await bot.chat('hello', { 11 | onMessage(msg) { 12 | process.stdout.write(msg.slice(index)); 13 | index = msg.length; 14 | } 15 | }); 16 | process.stdout.write('\n'); 17 | console.log('response', response); 18 | } 19 | 20 | start(); -------------------------------------------------------------------------------- /tests/modelscope.ts: -------------------------------------------------------------------------------- 1 | import { client } from '../src'; 2 | 3 | async function start() { 4 | const app = await client('https://modelscope.cn/api/v1/studio/AI-ModelScope/ChatGLM6B-unofficial/gradio'); 5 | const inputs = ["你叫什么名字",null]; 6 | const fn_index = 0; 7 | app.submit(fn_index, inputs) 8 | .on('data', (event) => { 9 | console.log('process data', JSON.stringify(event.data)); 10 | }) 11 | .on('status', (event) => { 12 | console.log('stage', event.stage); 13 | }); 14 | const res = await app.predict(fn_index, inputs); 15 | console.log('result', JSON.stringify(res)); 16 | } 17 | 18 | start(); -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:18 2 | 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN npm i -g gradio-chatbot 6 | 7 | # Set up a new user named "user" with user ID 1000 8 | RUN useradd -o -u 1000 user 9 | 10 | # Switch to the "user" user 11 | USER user 12 | 13 | # Set home to the user's home directory 14 | ENV HOME=/home/user \ 15 | PATH=/home/user/.local/bin:$PATH 16 | 17 | # Set the working directory to the user's home directory 18 | WORKDIR $HOME/app 19 | 20 | # Copy the current directory contents into the container at $HOME/app setting the owner to the user 21 | COPY --chown=user . $HOME/app 22 | 23 | EXPOSE 8000 24 | 25 | CMD [ "chatbot-server" ] -------------------------------------------------------------------------------- /tests/openai.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | 3 | const openai = new OpenAI({ 4 | apiKey: 'my api key', // defaults to process.env["OPENAI_API_KEY"] 5 | baseURL: 'http://127.0.0.1:8080/v1' 6 | }); 7 | 8 | async function start() { 9 | const completion = await openai.chat.completions.create({ 10 | messages: [ 11 | { role: 'user', content: '1+1' }, 12 | { role: 'assistant', content: '1 + 1 = 2' }, 13 | { role: 'user', content: '再+2' }, 14 | ], 15 | model: '10', 16 | stream: true, 17 | }); 18 | for await (const part of completion) { 19 | process.stdout.write(part.choices[0]?.delta?.content || ''); 20 | } 21 | } 22 | 23 | start() -------------------------------------------------------------------------------- /tests/mpt.ts: -------------------------------------------------------------------------------- 1 | import { client } from '../src'; 2 | 3 | async function start() { 4 | const app = await client('mosaicml-mpt-30b-chat.hf.space'); 5 | const inputs = [ 6 | "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.", 7 | [ 8 | [ 9 | "Hello", 10 | "" 11 | ] 12 | ] 13 | ]; 14 | const fn_index = 3; 15 | app.submit(fn_index, inputs) 16 | .on('data', (event) => { 17 | console.log('process data', JSON.stringify(event.data)); 18 | }) 19 | .on('status', (event) => { 20 | console.log('stage', event.stage); 21 | }); 22 | const res = await app.predict(fn_index, inputs); 23 | console.log('result', JSON.stringify(res)); 24 | } 25 | 26 | start(); -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | ## 0.0.21(2023/9/14) 3 | - [feature] Support using openai calls 4 | 5 | ## 0.0.17 (2023/8/20) 6 | - [chore] replace internal model 9 to Qwen 7B Chat 7 | - [fix] Fix the issue of abnormal output of model 8 8 | 9 | ## 0.0.15 (2023/8/4) 10 | - [feature] support Qwen 7B Chat 11 | 12 | ## 0.0.14 (2023/7/17) 13 | - [feature] support browser api 14 | 15 | ## 0.0.13 (2023/7/17) 16 | - [feature] add a build-in model: llama2 17 | 18 | ## 0.0.12 (2023/7/17) 19 | - [feature] support api server 20 | - [feature] change default model to gpt4free 21 | - [feature] add a dockerfile for quick start 22 | 23 | ## 0.0.11 (2023/7/15) 24 | - [feature] support gpt4free 25 | 26 | ## 0.0.9 (2023/7/10) 27 | - [bugfix] markdown transpile issue 28 | 29 | ## 0.0.1 (2023/07/02) 30 | - Initial Commit -------------------------------------------------------------------------------- /src/spaces.json: -------------------------------------------------------------------------------- 1 | [ 2 | "https://huggingface.co/spaces/yizhangliu/chatGPT", 3 | "https://huggingface.co/spaces/justest/gpt4free", 4 | "https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI", 5 | "https://huggingface.co/spaces/mosaicml/mpt-30b-chat", 6 | "https://huggingface.co/spaces/HuggingFaceH4/falcon-chat", 7 | "https://huggingface.co/spaces/HuggingFaceH4/starchat-playground", 8 | "https://huggingface.co/spaces/mikeee/chatglm2-6b-4bit", 9 | "https://huggingface.co/spaces/multimodalart/ChatGLM-6B", 10 | "https://chat.lmsys.org/", 11 | "https://huggingface.co/spaces/mikeee/qwen-7b-chat", 12 | "https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary", 13 | "https://modelscope.cn/studios/AI-ModelScope/ChatGLM6B-unofficial/summary", 14 | "https://modelscope.cn/studios/Fengshenbang/Ziya_LLaMA_13B_v1_online/summary", 15 | "https://modelscope.cn/studios/damo/role_play_chat/summary" 16 | ] -------------------------------------------------------------------------------- /tests/falcon.ts: -------------------------------------------------------------------------------- 1 | import { client } from '../src'; 2 | 3 | async function start() { 4 | const app = await client('huggingfaceh4-falcon-chat.hf.space'); 5 | const inputs = ["hello",[],"The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins.\n",0.8,0.9]; 6 | const fn_index = 1; 7 | app.submit(fn_index, inputs) 8 | .on('data', (event) => { 9 | console.log('process data', JSON.stringify(event.data)); 10 | }) 11 | .on('status', (event) => { 12 | console.log('stage', event.stage); 13 | }); 14 | const res = await app.predict(fn_index, inputs); 15 | console.log('result', JSON.stringify(res)); 16 | } 17 | 18 | start(); -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gradio-chatbot", 3 | "version": "0.0.28", 4 | "description": "A tool that can automatically convert huggingface and modelscope spaces to free API.", 5 | "main": "./dist/index.js", 6 | "engines": { 7 | "node": ">=18" 8 | }, 9 | "bin": { 10 | "chatbot": "./dist/bin/cli.js", 11 | "gradio-chatbot": "./dist/bin/cli.js", 12 | "chatbot-server": "./dist/bin/server.js" 13 | }, 14 | "scripts": { 15 | "build": "rimraf -rf ./dist && tsc -p ./", 16 | "test": "tsx ./tests/index", 17 | "dev": "tsx ./src/bin/server.ts", 18 | "debug": "cross-env DEBUG=gradio-chatbot tsx ./src/bin/cli.ts", 19 | "prepublishOnly": "npm run build" 20 | }, 21 | "author": "weaigc", 22 | "license": "Apache-2.0", 23 | "dependencies": { 24 | "cookie-parser": "^1.4.6", 25 | "cors": "^2.8.5", 26 | "debug": "^4.3.4", 27 | "express": "^4.18.2", 28 | "express-async-errors": "^3.1.1", 29 | "ifw": "^0.0.2", 30 | "semiver": "^1.1.0", 31 | "weaigc-turndown": "^7.1.3" 32 | }, 33 | "devDependencies": { 34 | "@types/debug": "^4.1.8", 35 | "@types/express": "^4.17.17", 36 | "@types/node": "^20.3.3", 37 | "cross-env": "^7.0.3", 38 | "openai": "^4.6.0", 39 | "rimraf": "^5.0.1", 40 | "tsx": "^3.12.7", 41 | "type-fest": "^3.12.0", 42 | "typescript": "^5.1.6" 43 | }, 44 | "repository": { 45 | "type": "git", 46 | "url": "git+https://github.com/weaigc/gradio-client.git" 47 | }, 48 | "keywords": [ 49 | "gradio-client", 50 | "huggingface", 51 | "modelscope", 52 | "ai", 53 | "llm", 54 | "large", 55 | "model", 56 | "inference" 57 | ], 58 | "bugs": { 59 | "url": "https://github.com/weaigc/gradio-client/issues" 60 | }, 61 | "homepage": "https://github.com/weaigc/gradio-client#readme" 62 | } 63 | -------------------------------------------------------------------------------- /API_CN.md: -------------------------------------------------------------------------------- 1 | # 以下为 API 文档 2 | 3 | ## GradioChatBot 4 | GradioChatBot 是一个用于和 Gradio 模型进行交互的类。它可以通过构造函数传入一些选项,然后调用chat方法发起会话,或者调用 `reset` 方法重置会话历史。 5 | 6 | ### 构造函数 7 | ``` 8 | import { GradioChatBot } from 'gradio-chatbot'; 9 | const bot = new GradioChatBot(options); 10 | ``` 11 | * options: 一个字符串或者一个对象,包含以下可选属性: 12 | * url: 一个字符串,也可以是 [spaces.json](./src/spaces.json) 索引号。表示需要对接的空间 URL,默认为 [spaces.json](./src/spaces.json) 里的第一项。 13 | * endpoint: 可选。Huggingface 空间域名(此,域名必须以 `hf.space` 结尾)。 14 | * historySize: 可选。一个数字,表示会话历史的最大长度。默认为 `10`。 15 | * fnIndex: 可选。一个数字,表示 Gradio 模型的函数索引。。 16 | * args: 可选。一个数组,表示 Gradio 模型的函数参数。默认为空数组。 17 | * inputIndex: 可选。一个数字,表示 Gradio 模型的输入参数索引。。 18 | * parseHtml: 可选。一个布尔值,表示是否将Gradio模型输出的 `html` 转为 `markdown`。默认为 `true`。 19 | * session_hash: 可选。一个字符串,表示会话的唯一标识。默认为随机生成的哈希值。 20 | * hf_token: 可选。一个字符串,表示 `Hugging Face` 的 API 令牌。默认为空字符串。 21 | 22 | > 当 `options` 为字符串时,则表示 `url` 属性。 23 | 24 | ### chat 25 | 发起对话 26 | ``` 27 | await bot.chat(input, options); 28 | ``` 29 | * input: 一个字符串,表示用户的输入内容。 30 | * options: 一个对象,包含以下可选属性: 31 | * onMessage: 收到Gradio模型的流式输出时的回调函数。接收一个字符串参数,表示流式输出的内容。 32 | * onError: 发生错误时的回调函数。接收一个字符串参数,表示错误信息。 33 | 34 | ### reset 35 | 清空会话历史,并重新生成会话标识。 36 | ``` 37 | await bot.reset(); 38 | ``` 39 | 40 | 41 | ## 示例 42 | ```ts 43 | import { GradioChatBot } from 'gradio-chatbot'; 44 | 45 | // 创建一个 GradioChatBot 实例 46 | const bot = new GradioChatBot({ 47 | url: '0', 48 | historySize: 5, 49 | }); 50 | 51 | // 发起会话 52 | const response = await bot.chat("Hi, how are you?", { 53 | onMessage: (msg) => { 54 | console.log("Bot:", msg); 55 | // Bot: I'm fine, thank you. How can I help you? 56 | }, 57 | }); 58 | 59 | // 再次发起会话 60 | await bot.chat("What can you do?", { 61 | onMessage: (msg) => { 62 | console.log("Bot:", msg); 63 | // Bot: I can chat with you and answer some questions. 64 | }, 65 | }); 66 | 67 | // 重置会话历史 68 | await bot.reset(); 69 | ``` 70 | 71 | ## generateHash 72 | 生成会话 ID 工具方法。当需要手动控制会话 ID 时,及会话记录时,可以使用此方法生成随机 ID。 73 | ``` 74 | import { generateHash } from 'gradio-chatbot'; 75 | generateHash(); 76 | ``` -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | export interface Config { 2 | auth_required: boolean | undefined; 3 | auth_message: string; 4 | components: any[]; 5 | css: string | null; 6 | dependencies: any[]; 7 | dev_mode: boolean; 8 | enable_queue: boolean; 9 | layout: any; 10 | mode: "blocks" | "interface"; 11 | root: string; 12 | root_url?: string; 13 | theme: string; 14 | title: string; 15 | version: string; 16 | is_space: boolean; 17 | is_colab: boolean; 18 | show_api: boolean; 19 | stylesheets: string[]; 20 | path: string; 21 | } 22 | 23 | export interface Payload { 24 | data: Array; 25 | fn_index?: number; 26 | event_data?: unknown; 27 | time?: Date; 28 | } 29 | 30 | export interface PostResponse { 31 | error?: string; 32 | [x: string]: any; 33 | } 34 | export interface UploadResponse { 35 | error?: string; 36 | files?: Array; 37 | } 38 | 39 | export interface Status { 40 | queue: boolean; 41 | code?: string; 42 | success?: boolean; 43 | stage: "pending" | "error" | "complete" | "generating"; 44 | size?: number; 45 | position?: number; 46 | eta?: number; 47 | message?: string; 48 | progress_data?: Array<{ 49 | progress: number | null; 50 | index: number | null; 51 | length: number | null; 52 | unit: string | null; 53 | desc: string | null; 54 | }>; 55 | time?: Date; 56 | } 57 | 58 | export interface SpaceStatusNormal { 59 | status: "sleeping" | "running" | "building" | "error" | "stopped"; 60 | detail: 61 | | "SLEEPING" 62 | | "RUNNING" 63 | | "RUNNING_BUILDING" 64 | | "BUILDING" 65 | | "NOT_FOUND"; 66 | load_status: "pending" | "error" | "complete" | "generating"; 67 | message: string; 68 | } 69 | export interface SpaceStatusError { 70 | status: "space_error"; 71 | detail: "NO_APP_FILE" | "CONFIG_ERROR" | "BUILD_ERROR" | "RUNTIME_ERROR"; 72 | load_status: "error"; 73 | message: string; 74 | discussions_enabled: boolean; 75 | } 76 | export type SpaceStatus = SpaceStatusNormal | SpaceStatusError; 77 | 78 | export type status_callback_function = (a: Status) => void; 79 | export type SpaceStatusCallback = (a: SpaceStatus) => void; 80 | 81 | export type EventType = "data" | "status"; 82 | 83 | export interface EventMap { 84 | data: Payload; 85 | status: Status; 86 | } 87 | 88 | export type Event = { 89 | [P in K]: EventMap[P] & { type: P; endpoint: string; fn_index: number }; 90 | }[K]; 91 | export type EventListener = (event: Event) => void; 92 | export type ListenerMap = { 93 | [P in K]?: EventListener[]; 94 | }; 95 | export interface FileData { 96 | name: string; 97 | orig_name?: string; 98 | size?: number; 99 | data: string; 100 | blob?: File; 101 | is_file?: boolean; 102 | mime_type?: string; 103 | alt_text?: string; 104 | } -------------------------------------------------------------------------------- /API.md: -------------------------------------------------------------------------------- 1 | # Below is the API document 2 | 3 | ## GradioChatBot 4 | GradioChatBot is a class for interacting with Gradio models. It can pass in some options through the constructor, then call the chat method to start a conversation, or call the `reset` method to reset the conversation history. 5 | 6 | ### Constructor 7 | ``` 8 | import { GradioChatBot } from 'gradio-chatbot'; 9 | const bot = new GradioChatBot(options); 10 | ``` 11 | * options: A string or an object, containing the following optional properties: 12 | * url: A string, or an index number of [spaces.json](./src/spaces.json). Indicates the space URL to be docked, defaulting to the first item in [spaces.json](./src/spaces.json). 13 | * endpoint: Optional. Huggingface space domain name (this domain name must end with `hf.space`). 14 | * historySize: Optional. A number, indicating the maximum length of the conversation history. Default is `10`. 15 | * fnIndex: Optional. A number, indicating the function index of the Gradio model. 16 | * args: Optional. An array, indicating the function arguments of the Gradio model. Default is an empty array. 17 | * inputIndex: Optional. A number, indicating the input argument index of the Gradio model. 18 | * parseHtml: Optional. A boolean value, indicating whether to convert the `html` output by the Gradio model to `markdown`. Default is `true`. 19 | * session_hash: Optional. A string, indicating the unique identifier of the session. Default is a randomly generated hash value. 20 | * hf_token: Optional. A string, indicating the API token of `Hugging Face`. Default is an empty string. 21 | 22 | > When `options` is a string, it means the `url` property. 23 | 24 | ### chat 25 | Start a conversation 26 | ``` 27 | await bot.chat(input, options); 28 | ``` 29 | * input: A string, indicating the user's input content. 30 | * options: An object, containing the following optional properties: 31 | * onMessage: The callback function when receiving streaming output from the Gradio model. Receives a string parameter, indicating the content of the streaming output. 32 | * onError: The callback function when an error occurs. Receives a string parameter, indicating the error message. 33 | 34 | ### reset 35 | Clears the conversation history and regenerates the session identifier. 36 | ``` 37 | await bot.reset(); 38 | ``` 39 | 40 | 41 | ## Example 42 | ```ts 43 | import { GradioChatBot } from 'gradio-chatbot'; 44 | 45 | // Create a GradioChatBot instance 46 | const bot = new GradioChatBot({ 47 | url: '0', 48 | historySize: 5, 49 | }); 50 | 51 | // Start a conversation 52 | const response = await bot.chat("Hi, how are you?", { 53 | onMessage: (msg) => { 54 | console.log("Bot:", msg); 55 | // Bot: I'm fine, thank you. How can I help you? 56 | }, 57 | }); 58 | 59 | // Start another conversation 60 | await bot.chat("What can you do?", { 61 | onMessage: (msg) => { 62 | console.log("Bot:", msg); 63 | // Bot: I can chat with you and answer some questions. 64 | }, 65 | }); 66 | 67 | // Reset conversation history 68 | await bot.reset(); 69 | ``` 70 | 71 | ## generateHash 72 | A utility method for generating session IDs. When you need to manually control the session ID, and the session record, you can use this method to generate a random ID. 73 | ``` 74 | import { generateHash } from 'gradio-chatbot'; 75 | const session_hash =generateHash(); 76 | const bot = new GradioChatBot({ 77 | url: '0', 78 | historySize: 5, 79 | session_hash, 80 | }); 81 | ``` -------------------------------------------------------------------------------- /src/bin/cli.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import * as readline from 'node:readline/promises' 4 | import { stdin as input, stdout as output } from 'node:process' 5 | import { GradioChatBot, spaces } from '../'; 6 | 7 | class Spinner { 8 | tick: number = 300; 9 | processing = false; 10 | index = 0; 11 | tid: any; 12 | chars = { 13 | output: ['-', '\\', '|', '/'], 14 | input: ['│', ' '], 15 | } 16 | currentMode: 'input' | 'output' = 'output'; 17 | setMode(mode: 'input' | 'output') { 18 | this.currentMode = mode; 19 | if (mode === 'input') { 20 | this.tick = 900; 21 | } else { 22 | this.tick = 300; 23 | } 24 | } 25 | 26 | start() { 27 | this.processing = true; 28 | if (this.tid) return; 29 | this.spin(); 30 | } 31 | 32 | spin() { 33 | this.tid = setTimeout(() => { 34 | if (!this.processing) return; 35 | const chars = this.chars[this.currentMode]; 36 | this.index = ++this.index % chars.length; 37 | const char = chars[this.index]; 38 | process.stdout.write(char); 39 | process.stdout.moveCursor(-1, 0); 40 | this.spin(); 41 | }, this.tick); 42 | } 43 | 44 | write(text: string) { 45 | if(text.charAt(0) === '\n') { 46 | process.stdout.write(' '); 47 | } 48 | process.stdout.write(text); 49 | } 50 | 51 | stop() { 52 | this.processing = false; 53 | this.tid = null; 54 | } 55 | } 56 | 57 | class RL { 58 | rl: ReturnType; 59 | constructor(readonly options: Parameters[0]) { 60 | this.rl = readline.createInterface(options); 61 | } 62 | async question(prompt: string) { 63 | this.rl.setPrompt(prompt); 64 | this.rl.prompt(true); 65 | const lines = []; 66 | let closeTid: NodeJS.Timeout; 67 | for await (const input of this.rl) { 68 | clearTimeout(closeTid); 69 | closeTid = setTimeout(() => { 70 | if (input === '') { 71 | process.stdout.write('\n'); 72 | } 73 | this.close(); 74 | }, 500); 75 | lines.push(input); 76 | } 77 | return lines.join('\n'); 78 | } 79 | close() { 80 | this.rl?.close(); 81 | this.rl = null; 82 | } 83 | } 84 | 85 | export async function cli() { 86 | const model = process.argv[2] || '0'; 87 | if (!/^https?:\/\//.test(model) && !spaces[model]) { 88 | process.stdout.write(`> An tool that can automatically convert huggingface and modelscope spaces to free API. 89 | 90 | Usage: 91 | npx gradio-chatbot 92 | npx gradio-chatbot Index 93 | npx gradio-chatbot URL 94 | 95 | `); 96 | process.stdout.write('Index\tSpaces URL\n'); 97 | process.stdout.write(spaces.map((space, index) => `${index}\t${(space as any)?.url || space}`).join('\n')); 98 | process.stdout.write(` 99 | 100 | More information: https://github.com/weaigc/gradio-chatbot 101 | `) 102 | return; 103 | } 104 | const bot = new GradioChatBot(model); 105 | 106 | let lastLength = 0; 107 | const spinner = new Spinner(); 108 | while (true) { 109 | const prompt = await new RL({ input, output }).question('Man: '); 110 | if (!prompt.trim()) break; 111 | spinner.start(); 112 | spinner.write('Bot: '); 113 | 114 | const response = await bot.chat(prompt, { 115 | onMessage: (msg: string) => { 116 | spinner.write(msg.slice(lastLength)); 117 | lastLength = msg.length; 118 | }, 119 | }).catch(e => { 120 | return `Invoke space error: ${e}`; 121 | }); 122 | spinner.write(response.slice(lastLength)); 123 | lastLength = 0; 124 | spinner.write('\n'); 125 | spinner.stop(); 126 | } 127 | } 128 | 129 | cli(); 130 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "./types"; 2 | 3 | export function determine_protocol(endpoint: string): { 4 | ws_protocol: "ws" | "wss"; 5 | http_protocol: "http:" | "https:"; 6 | host: string; 7 | } { 8 | if (endpoint.startsWith("http")) { 9 | const { protocol, host } = new URL(endpoint); 10 | 11 | if (host.endsWith("hf.space")) { 12 | return { 13 | ws_protocol: "wss", 14 | host, 15 | http_protocol: protocol as "http:" | "https:" 16 | }; 17 | } else { 18 | return { 19 | ws_protocol: protocol === "https:" ? "wss" : "ws", 20 | http_protocol: protocol as "http:" | "https:", 21 | host 22 | }; 23 | } 24 | } 25 | 26 | // default to secure if no protocol is provided 27 | return { 28 | ws_protocol: "wss", 29 | http_protocol: "https:", 30 | host: endpoint 31 | }; 32 | } 33 | 34 | export const RE_SPACE_NAME = /^[^\/]*\/[^\/]*$/; 35 | export const RE_SPACE_DOMAIN = /.*hf\.space\/{0,1}$/; 36 | export const MD_SPACE_DOMAIN = /^https:\/\/(www\.)?modelscope\.cn\//; 37 | export async function process_endpoint( 38 | app_reference: string, 39 | token?: `hf_${string}` 40 | ): Promise<{ 41 | space_id: string | false; 42 | host: string; 43 | ws_protocol: "ws" | "wss"; 44 | http_protocol: "http:" | "https:"; 45 | }> { 46 | const headers: { Authorization?: string } = {}; 47 | if (token) { 48 | headers.Authorization = `Bearer ${token}`; 49 | } 50 | 51 | const _app_reference = app_reference.trim(); 52 | 53 | if (RE_SPACE_NAME.test(_app_reference)) { 54 | try { 55 | const res = await fetch( 56 | `https://huggingface.co/api/spaces/${_app_reference}/host`, 57 | { headers } 58 | ); 59 | 60 | if (res.status !== 200) 61 | throw new Error("Space metadata could not be loaded."); 62 | const _host = (await res.json()).host; 63 | 64 | return { 65 | space_id: app_reference, 66 | ...determine_protocol(_host) 67 | }; 68 | } catch (e: any) { 69 | throw new Error("Space metadata could not be loaded." + e.message); 70 | } 71 | } 72 | 73 | if (RE_SPACE_DOMAIN.test(_app_reference)) { 74 | const { ws_protocol, http_protocol, host } = 75 | determine_protocol(_app_reference); 76 | 77 | return { 78 | space_id: host.replace(".hf.space", ""), 79 | ws_protocol, 80 | http_protocol, 81 | host 82 | }; 83 | } 84 | if (MD_SPACE_DOMAIN.test(_app_reference)) { 85 | const app_uri = new URL(_app_reference); 86 | 87 | return { 88 | space_id: false, 89 | ws_protocol: 'wss', 90 | http_protocol: 'https:', 91 | host: `${app_uri.host}${app_uri.pathname}`, 92 | }; 93 | } 94 | 95 | return { 96 | space_id: false, 97 | ...determine_protocol(_app_reference) 98 | }; 99 | } 100 | 101 | export function map_names_to_ids(fns: Config["dependencies"]) { 102 | let apis: Record = {}; 103 | 104 | fns.forEach(({ api_name }, i) => { 105 | if (api_name) apis[api_name] = i; 106 | }); 107 | 108 | return apis; 109 | } 110 | 111 | const RE_DISABLED_DISCUSSION = 112 | /^(?=[^]*\b[dD]iscussions{0,1}\b)(?=[^]*\b[dD]isabled\b)[^]*$/; 113 | export async function discussions_enabled(space_id: string) { 114 | try { 115 | const r = await fetch( 116 | `https://huggingface.co/api/spaces/${space_id}/discussions`, 117 | { 118 | method: "HEAD" 119 | } 120 | ); 121 | const error = r.headers.get("x-error-message"); 122 | 123 | if (error && RE_DISABLED_DISCUSSION.test(error)) return false; 124 | else return true; 125 | } catch (e: any) { 126 | return false; 127 | } 128 | } 129 | 130 | export async function get_space_hardware( 131 | space_id: string, 132 | token: `hf_${string}` 133 | ) { 134 | const headers: { Authorization?: string } = {}; 135 | if (token) { 136 | headers.Authorization = `Bearer ${token}`; 137 | } 138 | 139 | try { 140 | const res = await fetch( 141 | `https://huggingface.co/api/spaces/${space_id}/runtime`, 142 | { headers } 143 | ); 144 | 145 | if (res.status !== 200) 146 | throw new Error("Space hardware could not be obtained."); 147 | 148 | const { hardware } = await res.json(); 149 | 150 | return hardware; 151 | } catch (e: any) { 152 | throw new Error(e.message); 153 | } 154 | } 155 | 156 | export async function set_space_hardware( 157 | space_id: string, 158 | new_hardware: typeof hardware_types[number], 159 | token: `hf_${string}` 160 | ) { 161 | const headers: { Authorization?: string } = {}; 162 | if (token) { 163 | headers.Authorization = `Bearer ${token}`; 164 | } 165 | 166 | try { 167 | const res = await fetch( 168 | `https://huggingface.co/api/spaces/${space_id}/hardware`, 169 | { headers, body: JSON.stringify(new_hardware) } 170 | ); 171 | 172 | if (res.status !== 200) 173 | throw new Error( 174 | "Space hardware could not be set. Please ensure the space hardware provided is valid and that a Hugging Face token is passed in." 175 | ); 176 | 177 | const { hardware } = await res.json(); 178 | 179 | return hardware; 180 | } catch (e: any) { 181 | throw new Error(e.message); 182 | } 183 | } 184 | 185 | export async function set_space_timeout( 186 | space_id: string, 187 | timeout: number, 188 | token: `hf_${string}` 189 | ) { 190 | const headers: { Authorization?: string } = {}; 191 | if (token) { 192 | headers.Authorization = `Bearer ${token}`; 193 | } 194 | 195 | try { 196 | const res = await fetch( 197 | `https://huggingface.co/api/spaces/${space_id}/hardware`, 198 | { headers, body: JSON.stringify({ seconds: timeout }) } 199 | ); 200 | 201 | if (res.status !== 200) 202 | throw new Error( 203 | "Space hardware could not be set. Please ensure the space hardware provided is valid and that a Hugging Face token is passed in." 204 | ); 205 | 206 | const { hardware } = await res.json(); 207 | 208 | return hardware; 209 | } catch (e: any) { 210 | throw new Error(e.message); 211 | } 212 | } 213 | 214 | export const hardware_types = [ 215 | "cpu-basic", 216 | "cpu-upgrade", 217 | "t4-small", 218 | "t4-medium", 219 | "a10g-small", 220 | "a10g-large", 221 | "a100-large" 222 | ] as const; -------------------------------------------------------------------------------- /src/bin/server.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import express from 'express'; 4 | import cors from 'cors' 5 | import assert from 'assert'; 6 | import 'express-async-errors'; 7 | import { GradioChatBot } from '../'; 8 | 9 | export type Role = 'user' | 'assistant' | 'system' 10 | export type Action = 'next' | 'variant'; 11 | 12 | export interface APIMessage { 13 | role: Role 14 | content: string 15 | } 16 | 17 | export interface APIRequest { 18 | model: string 19 | action: Action 20 | prompt?: string | string[] 21 | messages: APIMessage[] 22 | stream?: boolean 23 | } 24 | 25 | export interface APIResponse { 26 | whisper?: string 27 | usage?: { 28 | prompt_tokens: number; 29 | completion_tokens: number; 30 | total_tokens: number; 31 | } 32 | choices: { 33 | text: string 34 | delta?: APIMessage 35 | message: APIMessage 36 | finish_reason?: string 37 | }[] 38 | } 39 | 40 | const PORT = isNaN(parseInt(process.env.PORT, 10)) ? 8000 : parseInt(process.env.PORT, 10); 41 | const app = express(); 42 | app.use(cors()); 43 | app.use(express.json()); 44 | app.use(function (err, req, res, next) { 45 | res 46 | .status(err.status || 500) 47 | .send({ message: err.message, stack: err.stack }); 48 | }); 49 | 50 | function parseOpenAIMessage(request: APIRequest) { 51 | const history: [string, string][] = []; 52 | request.messages?.forEach((message) => { 53 | if (message.role === 'user') { 54 | history.push([message.content, '']); 55 | } else if (history.length) { 56 | history.at(-1)[1] = message.content; 57 | } 58 | }) 59 | const prompts = [request.messages?.reverse().find((message) => message.role === 'user')?.content]; 60 | if (typeof request.prompt === 'string') { 61 | prompts.push(request.prompt); 62 | } else if (Array.isArray(request.prompt)) { 63 | prompts.push(...request.prompt); 64 | } 65 | return { 66 | history, 67 | prompt: prompts.filter(Boolean).join('\n'), 68 | model: request.model, 69 | stream: request.stream, 70 | }; 71 | } 72 | 73 | function responseOpenAIMessage(content: string, input: string, finish_reason = null): APIResponse { 74 | const message: APIMessage = { 75 | role: 'assistant', 76 | content, 77 | }; 78 | return { 79 | whisper: input, 80 | usage: { 81 | prompt_tokens: input.length, 82 | completion_tokens: content.length, 83 | total_tokens: 100000000, 84 | }, 85 | choices: [{ 86 | delta: message, 87 | message, 88 | finish_reason, 89 | text: content, 90 | }], 91 | }; 92 | } 93 | 94 | app.post(['/', '/api/conversation'], async (req, res) => { 95 | const { prompt, model, history, stream } = parseOpenAIMessage(req.body); 96 | 97 | const chatbot = new GradioChatBot({ 98 | url: model, 99 | historySize: 20, 100 | }); 101 | chatbot.history = history; 102 | const isStream = stream || req.headers.accept?.includes('text/event-stream'); 103 | if (isStream) { 104 | res.set('Content-Type', 'text/event-stream; charset=utf-8'); 105 | } 106 | assert(prompt, 'messages can\'t be empty!'); 107 | let lastLength = 0; 108 | const content = await chatbot.chat(prompt, { 109 | onMessage(msg) { 110 | if (isStream) { 111 | res.write(`data: ${JSON.stringify(responseOpenAIMessage(msg.slice(lastLength), prompt))}\n\n`); 112 | lastLength = msg.length 113 | } 114 | } 115 | }).catch(error => { 116 | console.log('Error:', error) 117 | return error; 118 | }); 119 | if (isStream) { 120 | res.write(`data: [DONE]\n\n`); 121 | } else { 122 | const response = responseOpenAIMessage(content, prompt, 'stop'); 123 | res.json(response); 124 | } 125 | }); 126 | 127 | app.post(/.*\/completions$/, async (req, res) => { 128 | const { prompt, model, history, stream } = parseOpenAIMessage(req.body); 129 | const chatbot = new GradioChatBot({ 130 | url: model, 131 | historySize: 20, 132 | }); 133 | chatbot.history = history; 134 | const isStream = stream || req.headers.accept?.includes('text/event-stream'); 135 | if (isStream) { 136 | res.set('Content-Type', 'text/event-stream; charset=utf-8'); 137 | } 138 | let lastLength = 0; 139 | assert(prompt, 'messages can\'t be empty!'); 140 | const content = await chatbot.chat(prompt, { 141 | onMessage(msg) { 142 | if (isStream) { 143 | res.write(`data: ${JSON.stringify(responseOpenAIMessage(msg.slice(lastLength), prompt))}\n\n`); 144 | lastLength = msg.length; 145 | } 146 | } 147 | }).catch(error => { 148 | console.log('Error:', error) 149 | return error; 150 | }); 151 | if (isStream) { 152 | res.end(`data: [DONE]\n\n`); 153 | } else { 154 | const response = responseOpenAIMessage(content, prompt, 'stop'); 155 | res.json(response); 156 | } 157 | }); 158 | 159 | app.get(['/', '/api/conversation'], async (req, res) => { 160 | const { text, model } = req.query || {}; 161 | if (!text) { 162 | return res.status(500).write('text can\'t be empty!'); 163 | } 164 | res.set('Cache-Control', 'no-cache'); 165 | res.set('Content-Type', 'text/event-stream; charset=utf-8'); 166 | let lastLength = 0; 167 | const chatbot = new GradioChatBot({ 168 | url: String(model || '0'), 169 | historySize: 20, 170 | }); 171 | const content = await chatbot.chat(String(text), { 172 | onMessage: (msg) => { 173 | res.write(msg.slice(lastLength)); 174 | lastLength = msg.length; 175 | } 176 | }).catch(error => { 177 | console.log('Error:', error) 178 | return error; 179 | }); 180 | res.end(content.slice(lastLength)); 181 | }); 182 | 183 | app.listen(Math.max(Math.min(65535, PORT), 80), '0.0.0.0'); 184 | console.log(`\nServer start successful, serve link: http://localhost:${PORT}/api/conversation?text=hello\n`); 185 | 186 | /** 187 | 188 | curl http://127.0.0.1:8000/api/completions \ 189 | -H "accept: text/event-stream" \ 190 | -H "Content-Type: application/json" \ 191 | -d '{ 192 | "model": "gpt-3.5", 193 | "messages": [{"role": "user", "content": "hello"}] 194 | }' 195 | */ -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # Gradio Chatbot 4 | 5 | > 一个可以将 [Huggingface Spaces](https://huggingface.co/spaces)、[魔搭创空间](https://www.modelscope.cn/studios) 及 Gradio ChatBot 自动转成免费 API 的 Npm 包。理论上支持所有带 chatbot 的空间,目前完美支持了 [GPT4Free,ChatGPT,Llama 2,Vicuna,MPT-30B,Falcon,ChatGLM,通义千问](#模型列表) 等众多模型空间。 6 | 7 | [![NPM](https://img.shields.io/npm/v/gradio-chatbot.svg)](https://www.npmjs.com/package/gradio-chatbot) 8 | [![Apache 2.0 License](https://img.shields.io/github/license/saltstack/salt)](https://github.com/weaigc/gradio-chatbot/blob/main/license) 9 | 10 |
11 | 12 | > 由于目前 Huggingface 上的 [ChatGPT](https://huggingface.co/spaces/yuntian-deng/ChatGPT) 空间压力过大,导致调用延时明显变长。如果你有自己的 ChatGPT 账号,推荐使用 [gpt-web](https://github.com/weaigc/gpt-web)。 13 | 14 | - [快速上手](#快速上手) 15 | - [NPM](#npm) 16 | - [Docker](#docker) 17 | - [安装](#安装) 18 | - [使用](#使用) 19 | - [CLI模式](#CLI模式) 20 | - [API接口](#API接口) 21 | - [API函数](#API函数) 22 | - [OpenAI](#使用-openai-调用) 23 | - [API文档](#API文档) 24 | - [模型列表](#模型列表) 25 | - [兼容性](#兼容性) 26 | - [更新日志](#更新日志) 27 | - [鸣谢](#鸣谢) 28 | - [License](#license) 29 | 30 | ## 快速上手 31 | 32 | ### NPM 33 | 34 | * 体验 ChatGPT 35 | 36 | ```bash 37 | npx gradio-chatbot 38 | # or 39 | npm install -g gradio-chatbot 40 | chatbot 41 | ``` 42 | 43 | * 体验 Llama2 44 | ``` 45 | chatbot 2 46 | # 或者 47 | chatbot https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat 48 | ``` 49 | 50 | > 更多用法请输入 chatbot help 51 | 52 | 53 | ### Docker 54 | ``` 55 | docker build . -t gradio-server 56 | docker run --rm -it -p 8000:8000 gradio-server 57 | ``` 58 | 59 | [![asciicast](./media/demo.gif)](https://asciinema.org/a/0ki5smP795eyXdXGlx53UDmTB) 60 | 61 | 62 | ## 安装 63 | 64 | 你可以使用 npm 或者 yarn 来安装 gradio-chatbot,Node 版本需要 >= 18。 65 | 66 | ```bash 67 | npm install gradio-chatbot 68 | # or 69 | yarn add gradio-chatbot 70 | ``` 71 | 72 | ## 使用 73 | 目前支持三种模式。 74 | 75 | ### CLI模式 76 | 参考 [快速上手](#快速上手)。 77 | 78 | ### API服务 79 | 为了方便使用,提供了两种形式的接口。 80 | * 流式输出,直接访问 http://localhost:8000/api/conversation?model=0&text=hello 即可。 81 | * 非流式输出,调用方式同 ChatGPT API。以下为调用示例。 82 | 83 | ``` 84 | curl http://127.0.0.1:8000/api/conversation \ 85 | -H "Content-Type: application/json" \ 86 | -d '{ 87 | "model": "gpt-3.5-turbo", 88 | "messages": [{"role": "user", "content": "hello"}], 89 | }' 90 | ``` 91 | 92 | ### API函数 93 | ```ts 94 | import { GradioChatBot } from 'gradio-chatbot' 95 | 96 | const bot = new GradioChatBot(); 97 | 98 | async function start() { 99 | const message = await bot.chat('hello', { 100 | onMessage(partialMsg) { 101 | console.log('stream output:', partialMsg); 102 | } 103 | }); 104 | console.log('message', message); 105 | } 106 | 107 | start(); 108 | ``` 109 | 110 | 你也可以把你想要转换的空间地址输入进去,如 https://huggingface.co/spaces/h2oai/h2ogpt-chatbot 111 | ```ts 112 | import { GradioChatBot } from 'gradio-chatbot' 113 | 114 | const bot = new GradioChatBot({ 115 | url: 'https://huggingface.co/spaces/h2oai/h2ogpt-chatbot', 116 | fnIndex: 35, 117 | }); // 调用自定义 ChatBot 模型 118 | 119 | async function start() { 120 | console.log(await bot.chat('Hello')); 121 | } 122 | 123 | start(); 124 | ``` 125 | 126 | 除此之外,Npm 包里面已经内置了 10 个流行的 [Huggingface Spaces](https://huggingface.co/spaces)、[魔搭创空间](https://www.modelscope.cn/studios),你可以直接[传入模型序号使用](#模型列表)。 127 | ```ts 128 | import { GradioChatBot } from 'gradio-chatbot'; 129 | 130 | const bot = new GradioChatBot('1'); // 使用内置1号模型 131 | async function start() { 132 | console.log(await bot.chat('Tell me about ravens.')); 133 | } 134 | 135 | start(); 136 | ``` 137 | 138 | 更多示例请前往目录: [Examples](./examples/) 139 | 140 | > 注意:Huggingface 上的部分模型可能会收集你输入的信息,如果你对数据安全有要求,建议不要使用,使用自己搭建的模型是一个更好的选择。 141 | 142 | ### 使用 OpenAI 调用 143 | #### Python 144 | ``` 145 | import openai 146 | openai.api_key = "dummy" 147 | openai.api_base = "http://127.0.0.1:8080/v1" 148 | 149 | # create a chat completion 150 | chat_completion = openai.ChatCompletion.create(model="10", messages=[{"role": "user", "content": "Hello"}]) 151 | 152 | # print the completion 153 | print(chat_completion.choices[0].message.content) 154 | ``` 155 | > 更多使用说明参考 https://github.com/openai/openai-python 156 | 157 | #### Node.js 158 | ``` 159 | import OpenAI from 'openai'; 160 | 161 | const openai = new OpenAI({ 162 | baseURL: 'http://127.0.0.1:8080/v1' 163 | }); 164 | 165 | async function main() { 166 | const stream = await openai.chat.completions.create({ 167 | model: '10', 168 | messages: [{ role: 'user', content: 'Hello' }], 169 | stream: true, 170 | }); 171 | for await (const part of stream) { 172 | process.stdout.write(part.choices[0]?.delta?.content || ''); 173 | } 174 | } 175 | 176 | main(); 177 | ``` 178 | > 更多使用说明参考 https://github.com/openai/openai-node 179 | 180 | ## API文档 181 | 182 | 参见 [API 文档](./API_CN.md) 183 | 184 | ## 模型列表 185 | 186 | 调用序号 | 类型 | 说明 | 模型 187 | -----|-----|------|------- 188 | 0 | Huggingface Spaces | ChatGPT | https://huggingface.co/spaces/yizhangliu/chatGPT 189 | 1 | Huggingface Spaces | GPT Free | https://huggingface.co/spaces/justest/gpt4free 190 | 2 | Huggingface Spaces | Llama2 Spaces | https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI 191 | 3 | Huggingface Spaces | MosaicML MPT-30B-Chat | https://huggingface.co/spaces/mosaicml/mpt-30b-chat 192 | 4 | Huggingface Spaces | Falcon Chat | https://huggingface.co/spaces/HuggingFaceH4/falcon-chat 193 | 5 | Huggingface Spaces | Star Chat | https://huggingface.co/spaces/HuggingFaceH4/starchat-playground 194 | 6 | Huggingface Spaces | ChatGLM2 | https://huggingface.co/spaces/mikeee/chatglm2-6b-4bit 195 | 7 | Huggingface Spaces | ChatGLM | https://huggingface.co/spaces/multimodalart/ChatGLM-6B 196 | 8 | Huggingface Spaces | Vicuna (此模型国内不可访问,请配置代理后使用) | https://chat.lmsys.org/ 197 | 9 | Huggingface Spaces | 通义千问 7B | https://huggingface.co/spaces/mikeee/qwen-7b-chat 198 | 10 | 魔搭 | 通义千问 | https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary 199 | 11 | 魔搭 | ChatGLM2 | https://modelscope.cn/studios/AI-ModelScope/ChatGLM6B-unofficial/summary 200 | 12 | 魔搭 | 姜子牙V1.1 | https://modelscope.cn/studios/Fengshenbang/Ziya_LLaMA_13B_v1_online/summary 201 | 13 | 魔搭 | 达魔院出品的角色对话机器人 | https://modelscope.cn/studios/damo/role_play_chat/summary 202 | 203 | > 国内访问推荐使用魔搭社区提供的模型,访问速度更快更稳定。 204 | > 更多好用模型欢迎在 [issue](https://github.com/weaigc/gradio-chatbot/issues) 区提交贡献。 205 | 206 | 207 | ## 兼容性 208 | 209 | - 此 Npm 包需要 `node >= 18`. 210 | 211 | ## 更新日志 212 | 查看 [CHANGELOG.md](./CHANGELOG.md) 213 | 214 | ## 鸣谢 215 | 216 | - Huge thanks to [@gradio/client](https://github.com/gradio-app/gradio/tree/main/client/js) 217 | - [OpenAI](https://openai.com) for creating [ChatGPT](https://openai.com/blog/chatgpt/) 🔥 218 | 219 | 220 | ## License 221 | 222 | Apache 2.0 © [LICENSE](https://github.com/weaigc/gradio-chatbot/blob/main/LICENSE). 223 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # Gradio Chatbot 4 | 5 | > A tool that can automatically convert [Huggingface Spaces](https://huggingface.co/spaces), [Modelscope Studios](https://www.modelscope.cn/studios) and Gradio ChatBot into free APIs. It basically supports any space with a chatbot, and currently perfectly supports many model spaces such as [GPT4Free, ChatGPT, Llama 2, Vicuna, MPT-30B, Falcon, ChatGLM, Qwen and so on](#model-list). 6 | 7 | English | [中文](README_CN.md) 8 | 9 | [![NPM](https://img.shields.io/npm/v/gradio-chatbot.svg)](https://www.npmjs.com/package/gradio-chatbot) 10 | [![Apache 2.0 License](https://img.shields.io/github/license/saltstack/salt)](https://github.com/weaigc/gradio-chatbot/blob/main/license) 11 | 12 | Online Demo: https://weaigc.github.io/gradio-chatbot 13 |
14 | 15 | > Due to the current high demand on the [ChatGPT](https://huggingface.co/spaces/yuntian-deng/ChatGPT) space on Huggingface, there is a noticeable delay in response time. If you have your own ChatGPT account, it is recommended to use [gpt-web](https://github.com/weaigc/gpt-web). 16 | 17 | 18 | - [Quick Start](#quick-start) 19 | - [Installation](#installation) 20 | - [NPM](#npm) 21 | - [Docker](#docker) 22 | - [Usage](#usage) 23 | - [CLI Mode](#cli-mode) 24 | - [API Service](#api-service) 25 | - [API Function](#api-function) 26 | - [OpenAI](#using-openai) 27 | - [API Docs](#api-document) 28 | - [Model List](#model-list) 29 | - [Compatibility](#compatibility) 30 | - [ChangeLog](#change-log) 31 | - [Credits](#credits) 32 | - [License](#license) 33 | 34 | ## Quick Start 35 | 36 | ### NPM 37 | * Experience a free ChatGPT. 38 | 39 | ```bash 40 | npx gradio-chatbot 41 | # or 42 | npm install -g gradio-chatbot 43 | # call the default model 44 | chatbot 45 | ``` 46 | 47 | * Experience Llama2. 48 | ```bash 49 | chatbot 2 50 | # or 51 | chatbot https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat 52 | ``` 53 | 54 | > More usage just type `chatbot help` 55 | 56 | ### Docker 57 | ``` 58 | docker build . -t gradio-server 59 | docker run --rm -it -p 8000:8000 gradio-server 60 | ``` 61 | 62 | [![asciicast](./media/demo.gif)](https://asciinema.org/a/0ki5smP795eyXdXGlx53UDmTB) 63 | 64 | 65 | ## Installation 66 | 67 | You can use npm or yarn to install gradio-chatbot. Node version 18 or higher is required. 68 | 69 | ```bash 70 | npm install gradio-chatbot 71 | # or 72 | yarn add gradio-chatbot 73 | ``` 74 | 75 | ## Usage 76 | Currently supports three modes. 77 | 78 | ### CLI mode 79 | Refer to [Quickstart](#Quickstart). 80 | 81 | ### API Service 82 | To make it easy to use, two forms of interfaces are provided. 83 | 84 | Streaming output, simply visit http://localhost:8000/api/conversation?model=0&text=hello. 85 | Non-streaming output, the calling method is the same as ChatGPT API. The following is an example of a call. 86 | 87 | ### API Function 88 | ```ts 89 | import { GradioChatBot } from 'gradio-chatbot' 90 | 91 | const bot = new GradioChatBot(); 92 | 93 | async function start() { 94 | const message = await bot.chat('hello', { 95 | onMessage(partialMsg) { 96 | console.log('stream output:', partialMsg); 97 | } 98 | }); 99 | console.log('message', message); 100 | } 101 | 102 | start(); 103 | ``` 104 | 105 | You can also input the spatial address you want to convert, such as https://huggingface.co/spaces/h2oai/h2ogpt-chatbot. 106 | ```ts 107 | import { GradioChatBot } from 'gradio-chatbot' 108 | 109 | const bot = new GradioChatBot({ 110 | url: 'https://huggingface.co/spaces/h2oai/h2ogpt-chatbot', 111 | fnIndex: 35, 112 | }); // 调用自定义 ChatBot 模型 113 | 114 | async function start() { 115 | console.log(await bot.chat('Hello')); 116 | } 117 | ``` 118 | 119 | In addition, the NPM package has built-in support for 10 popular spaces from [Hugging Face Spaces](https://huggingface.co/spaces) and [Modelscope Studios](https://www.modelscope.cn/studios). You can directly use the model index to access them. Please refer to the [Model List](#model-list) for more details. 120 | ```ts 121 | import { GradioChatBot } from 'gradio-chatbot'; 122 | 123 | const bot = new GradioChatBot('1'); 124 | async function start() { 125 | console.log(await bot.chat('Tell me about ravens.')); 126 | } 127 | 128 | start(); 129 | ``` 130 | 131 | For more examples, please visit the directory: [Examples](./examples/) . 132 | 133 | > Note: Some models on Hugging Face may collect the information you input. If you have data security concerns, it is recommended not to use them, and using self-hosted models is a better choice. 134 | 135 | ### Using OpenAI 136 | #### Python 137 | ``` 138 | import openai 139 | openai.api_key = "dummy" 140 | openai.api_base = "http://127.0.0.1:8080/v1" 141 | 142 | # create a chat completion 143 | chat_completion = openai.ChatCompletion.create(model="10", messages=[{"role": "user", "content": "Hello"}]) 144 | 145 | # print the completion 146 | print(chat_completion.choices[0].message.content) 147 | ``` 148 | > For more usage instructions, please refer to https://github.com/openai/openai-python 149 | 150 | #### Node.js 151 | ``` 152 | import OpenAI from 'openai'; 153 | 154 | const openai = new OpenAI({ 155 | baseURL: 'http://127.0.0.1:8080/v1' 156 | }); 157 | 158 | async function main() { 159 | const stream = await openai.chat.completions.create({ 160 | model: '10', 161 | messages: [{ role: 'user', content: 'Hello' }], 162 | stream: true, 163 | }); 164 | for await (const part of stream) { 165 | process.stdout.write(part.choices[0]?.delta?.content || ''); 166 | } 167 | } 168 | 169 | main(); 170 | ``` 171 | > For more usage instructions, please refer to https://github.com/openai/openai-node 172 | 173 | ## API Document 174 | 175 | See [API Document](./API.md) 176 | 177 | ## Model List 178 | 179 | Index | Type | Description | Model 180 | -----|-----|------|------- 181 | 0 | Huggingface Spaces | ChatGPT | https://huggingface.co/spaces/yizhangliu/chatGPT 182 | 1 | Huggingface Spaces | GPT Free | https://huggingface.co/spaces/justest/gpt4free 183 | 2 | Huggingface Spaces | Llama2 Spaces | https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI 184 | 3 | Huggingface Spaces | MosaicML MPT-30B-Chat | https://huggingface.co/spaces/mosaicml/mpt-30b-chat 185 | 4 | Huggingface Spaces | Falcon Chat | https://huggingface.co/spaces/HuggingFaceH4/falcon-chat 186 | 5 | Huggingface Spaces | Star Chat | https://huggingface.co/spaces/HuggingFaceH4/starchat-playground 187 | 6 | Huggingface Spaces | ChatGLM2 | https://huggingface.co/spaces/mikeee/chatglm2-6b-4bit 188 | 7 | Huggingface Spaces | ChatGLM | https://huggingface.co/spaces/multimodalart/ChatGLM-6B 189 | 8 | Huggingface Spaces | Vicuna | https://chat.lmsys.org/ 190 | 9 | Huggingface Spaces | Qwen 7B Chat | https://huggingface.co/spaces/mikeee/qwen-7b-chat 191 | 10 | ModelScope | Qwen 7B Chat | https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary 192 | 11 | ModelScope | ChatGLM2 | https://modelscope.cn/studios/AI-ModelScope/ChatGLM6B-unofficial/summary 193 | 12 | ModelScope | Jiang Ziya V1.1 | https://modelscope.cn/studios/Fengshenbang/Ziya_LLaMA_13B_v1_online/summary 194 | 13 | ModelScope | Character Dialogue Chatbot developed by Alibaba DAMO Academy | https://modelscope.cn/studios/damo/role_play_chat/summary 195 | 196 | > More useful models are welcome to contribute in the [issue](https://github.com/weaigc/gradio-chatbot/issues) section. 197 | 198 | 199 | ## Change Log 200 | See [CHANGELOG.md](./CHANGELOG.md) 201 | 202 | ## Compatibility 203 | 204 | - This package supports `node >= 18`. 205 | 206 | ## Credits 207 | 208 | - Huge thanks to [@gradio/client](https://github.com/gradio-app/gradio/tree/main/client/js) 209 | - [OpenAI](https://openai.com) for creating [ChatGPT](https://openai.com/blog/chatgpt/) 🔥 210 | 211 | 212 | ## License 213 | 214 | Apache 2.0 © [LICENSE](https://github.com/weaigc/gradio-chatbot/blob/main/LICENSE). 215 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2023] [weaigc] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Visit https://aka.ms/tsconfig to read more about this file */ 4 | 5 | /* Projects */ 6 | // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */ 7 | // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ 8 | // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */ 9 | // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */ 10 | // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ 11 | // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ 12 | 13 | /* Language and Environment */ 14 | "target": "esnext", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ 15 | // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ 16 | // "jsx": "preserve", /* Specify what JSX code is generated. */ 17 | // "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */ 18 | // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ 19 | // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */ 20 | // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ 21 | // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */ 22 | // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */ 23 | // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ 24 | // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ 25 | // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ 26 | 27 | /* Modules */ 28 | "module": "commonjs", /* Specify what module code is generated. */ 29 | "rootDir": "./src", /* Specify the root folder within your source files. */ 30 | // "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */ 31 | // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ 32 | // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ 33 | // "rootDirs": ["./src"], /* Allow multiple folders to be treated as one when resolving modules. */ 34 | // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */ 35 | // "types": [], /* Specify type package names to be included without being referenced in a source file. */ 36 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ 37 | // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */ 38 | // "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */ 39 | // "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */ 40 | // "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */ 41 | // "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */ 42 | "resolveJsonModule": true, /* Enable importing .json files. */ 43 | // "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */ 44 | // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */ 45 | 46 | /* JavaScript Support */ 47 | // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ 48 | // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ 49 | // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ 50 | 51 | /* Emit */ 52 | "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ 53 | // "declarationMap": true, /* Create sourcemaps for d.ts files. */ 54 | // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ 55 | // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ 56 | // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ 57 | // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */ 58 | "outDir": "./dist", /* Specify an output folder for all emitted files. */ 59 | // "removeComments": true, /* Disable emitting comments. */ 60 | // "noEmit": true, /* Disable emitting files from a compilation. */ 61 | // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ 62 | // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */ 63 | // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ 64 | // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ 65 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ 66 | // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ 67 | // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ 68 | // "newLine": "crlf", /* Set the newline character for emitting files. */ 69 | // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */ 70 | // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */ 71 | // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ 72 | // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */ 73 | // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ 74 | // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ 75 | 76 | /* Interop Constraints */ 77 | // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ 78 | // "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */ 79 | // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ 80 | "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */ 81 | // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ 82 | "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */ 83 | 84 | /* Type Checking */ 85 | "strict": false, /* Enable all strict type-checking options. */ 86 | // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ 87 | // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */ 88 | // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ 89 | // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */ 90 | // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ 91 | // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ 92 | // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ 93 | // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ 94 | // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ 95 | // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ 96 | // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ 97 | // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ 98 | // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ 99 | // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ 100 | // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ 101 | // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ 102 | // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ 103 | // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ 104 | 105 | /* Completeness */ 106 | // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ 107 | "skipLibCheck": true /* Skip type checking all .d.ts files. */ 108 | }, 109 | "exclude": ["examples", "tests"], 110 | } 111 | -------------------------------------------------------------------------------- /src/chat.ts: -------------------------------------------------------------------------------- 1 | import type { MergeExclusive } from 'type-fest'; 2 | import TurndownService from 'weaigc-turndown'; 3 | import Debug from 'debug'; 4 | import assert from 'assert'; 5 | import { client } from './client'; 6 | import spaces from './spaces.json'; 7 | export { spaces }; 8 | 9 | const debug = Debug('gradio-chatbot'); 10 | 11 | export const generateHash = () => { 12 | return Math.random().toString(36).substring(2); 13 | }; 14 | 15 | const turndownService = new TurndownService({ 16 | rules: { 17 | emphasis: { 18 | filter: ['br'], 19 | replacement: () => { 20 | return '\n'; 21 | } 22 | } 23 | } 24 | }); 25 | 26 | type GradioAutoOptions = MergeExclusive<{ 27 | url?: string; 28 | }, { 29 | endpoint?: string; 30 | }> & { 31 | historySize?: number; 32 | fnIndex?: number; 33 | args?: unknown[]; 34 | inputIndex?: number; 35 | historyIndex?: number; 36 | parseHtml?: boolean; 37 | session_hash?: string; 38 | hf_token?: string; 39 | }; 40 | 41 | type ChatOptions = { 42 | onMessage?: (msg: string) => void; 43 | onError?: (error: string) => void; 44 | } 45 | 46 | function resolveEndpoint(url: string) { 47 | debug('url', url); 48 | const uri = new URL(url); 49 | debug('resolve', uri.hostname); 50 | if (uri.hostname.endsWith('modelscope.cn')) { 51 | assert(/^\/studios\/([^/]+)\/([^/]+)/.test(uri.pathname), 'not a valid modelscope studio url'); 52 | const scope = RegExp.$1; 53 | const name = RegExp.$2; 54 | return `https://${uri.hostname}/api/v1/studio/${scope}/${name}/gradio`; 55 | } else if (/^https:\/\/huggingface\.co\/spaces\/([^/]+)\/([^/]+)/.test(url)) { 56 | return `${RegExp.$1}-${RegExp.$2}.hf.space`.toLowerCase().replace(/_/g, '-'); 57 | } else { 58 | return uri.host; 59 | } 60 | } 61 | 62 | const traverseContent = (data: any): any => { 63 | if (!Array.isArray(data)) { 64 | return data?.value || data; 65 | } 66 | return traverseContent(data.at(-1)); 67 | } 68 | 69 | const findValidSubmitByType = (components: any[], dependencies: any[], type: string) => { 70 | const id = components.find(com => com.type === 'button' && com.props.value === 'Submit')?.id; 71 | let index = dependencies.findIndex(dep => dep.targets?.includes?.(id)); 72 | return index === -1 ? dependencies.findIndex( 73 | (dep = {}) => 74 | dep.inputs?.length 75 | && dep.outputs?.length 76 | && dep.backend_fn 77 | && dep.trigger === type 78 | ) : index; 79 | } 80 | 81 | const findValidSubmitByButton = (components: any[], dependencies: any[]) => { 82 | const id = components.find(com => com.type === 'button')?.id; 83 | if (!id) return -1; 84 | return dependencies.findIndex(dep => dep.inputs?.length > 0 && dep.targets?.includes?.(id)); 85 | } 86 | 87 | export class GradioChatBot { 88 | private options: GradioAutoOptions; 89 | history: [string, string][] = []; 90 | historyComponentId: number; 91 | session_hash: string; 92 | private instance_map: any; 93 | constructor(opts: string | GradioAutoOptions = '0') { 94 | if (['string', 'number'].includes(typeof opts)) { 95 | this.options = { url: String(opts) }; 96 | } else { 97 | // @ts-ignore 98 | this.options = opts; 99 | } 100 | assert(this.options.endpoint || this.options.url, 'endpoint and url must specify one of them'); 101 | if (!isNaN(this.options.url as any)) { 102 | const index = parseInt(this.options.url!, 10); 103 | assert(index < spaces.length, `The model index range is [0 - ${spaces.length - 1}].`) 104 | let config: string | GradioAutoOptions = spaces[index]; 105 | if (typeof config === 'string') { 106 | config = { url: config }; 107 | } 108 | Object.assign(this.options, config); 109 | } 110 | 111 | if (!this.options.endpoint) { 112 | this.options.endpoint = resolveEndpoint(this.options.url!) 113 | debug('endpoint', this.options.endpoint) 114 | } 115 | this.session_hash = this.options.session_hash || generateHash(); 116 | if (!this.options.historySize) { 117 | this.options.historySize = 10; 118 | } 119 | 120 | this.options.parseHtml = this.options.parseHtml !== false; 121 | } 122 | 123 | private parseInputs = (fnIndex: number, config: any, skip_text = false) => { 124 | const { components, dependencies } = config; 125 | 126 | const submitFn = dependencies[fnIndex]; 127 | const inputs = submitFn?.inputs.map(id => this.instance_map[id].props.value); 128 | 129 | debug('fnIndex', fnIndex); 130 | let textInputIndex = skip_text ? 0 : submitFn?.inputs.indexOf(submitFn?.targets?.[0]); 131 | if (textInputIndex < 0) { 132 | textInputIndex = submitFn?.inputs.findIndex((id: number) => 133 | components?.find((com: any) => 134 | id === com.id 135 | && (com.type === 'textbox' || com.example_input) 136 | )); 137 | } 138 | const historyComponentId = this.historyComponentId || components?.find((com: any) => com.type === 'chatbot')?.id 139 | 140 | assert(textInputIndex > -1, 'Cannot find the input box'); 141 | 142 | debug('inputIndex', textInputIndex); 143 | return [inputs, textInputIndex, submitFn?.inputs.indexOf(historyComponentId)]; 144 | } 145 | 146 | private html2Markdown(text: string) { 147 | text = this.options.parseHtml ? turndownService.turndown(text || '') : text; 148 | return text?.replace?.(/�/g, '').trim().replace(/▌$/, ''); 149 | } 150 | 151 | async reset() { 152 | this.history = []; 153 | this.instance_map = null; 154 | this.session_hash = generateHash(); 155 | } 156 | 157 | async chat(input: string, options?: ChatOptions): Promise { 158 | assert(input, 'input can\'t be empty!'); 159 | return new Promise(async (resolve, reject) => { 160 | try { 161 | let { endpoint, fnIndex, args = [], hf_token } = this.options; 162 | 163 | const app = await client(endpoint!, { session_hash: this.session_hash, hf_token: hf_token as any, normalise_files: true }); 164 | 165 | const { components, dependencies } = app.config; 166 | let instance_map = this.instance_map; 167 | if (!instance_map) { 168 | instance_map = components.reduce((acc, next) => { 169 | acc[next.id] = next; 170 | return acc; 171 | }, {} as { [id: number]: any }); 172 | this.instance_map = instance_map; 173 | } 174 | 175 | fnIndex = fnIndex ?? findValidSubmitByType(components, dependencies, 'submit'); 176 | if (fnIndex < 0) { 177 | fnIndex = Math.max(findValidSubmitByButton(components, dependencies), findValidSubmitByType(components, dependencies, 'click')); 178 | } 179 | if (fnIndex < 0) { 180 | fnIndex = Math.max(findValidSubmitByType(components, dependencies, 'chatbot'), 0); 181 | } 182 | assert(fnIndex !== -1, 'Failed to parse this space, you may need to specify the fnIndex manually!'); 183 | 184 | let [inps, inpIndex, hisIndex] = this.parseInputs(fnIndex, app.config); 185 | 186 | if (!args?.length) { 187 | args = inps; 188 | } 189 | let { inputIndex = inpIndex, historyIndex = hisIndex } = this.options; 190 | 191 | if (inputIndex > -1) { 192 | args[inputIndex] = input; 193 | } 194 | 195 | if (historyIndex > -1) { 196 | args[historyIndex] = this.history 197 | } 198 | 199 | debug('args', fnIndex, JSON.stringify(args)); 200 | 201 | const fn_status = []; 202 | let _error_id = -1; 203 | let messages: { fn_index: number, type: string, message: string, id: number }[] = []; 204 | const MESSAGE_QUOTE_RE = /^'([^]+)'$/; 205 | let submit_map: Map> = new Map(); 206 | 207 | const handle_update = (data: any, fn_index: number) => { 208 | const outputs = dependencies[fn_index].outputs; 209 | data?.forEach((value: any, i: number) => { 210 | const output = instance_map[outputs[i]]; 211 | output.props.value_is_output = true; 212 | if ( 213 | typeof value === "object" && 214 | value !== null && 215 | value.__type__ === "update" 216 | ) { 217 | for (const [update_key, update_value] of Object.entries(value)) { 218 | if (update_key === "__type__") { 219 | continue; 220 | } else { 221 | output.props[update_key] = update_value; 222 | } 223 | } 224 | } else { 225 | output.props.value = value; 226 | if (process.env.DEBUG) { 227 | debug('value', output.type, JSON.stringify(value)); 228 | } 229 | if (output.type === 'chatbot' && value) { 230 | this.history = value.slice(-this.options.historySize); 231 | this.historyComponentId = output.id 232 | output.props.value = this.history; 233 | const message = value?.at(-1)?.at(-1); 234 | options?.onMessage?.(this.html2Markdown(message)); 235 | } 236 | } 237 | }); 238 | } 239 | 240 | const trigger_api_call = async ( 241 | dep_index: number, 242 | data = null, 243 | event_data: unknown = null 244 | ) => { 245 | let dep = dependencies[dep_index]; 246 | const current_status = fn_status[dep_index]; 247 | 248 | messages = messages.filter(({ fn_index }) => fn_index !== dep_index); 249 | if (dep.cancels) { 250 | await Promise.all( 251 | dep.cancels.map(async (fn_index) => { 252 | const submission = submit_map.get(fn_index); 253 | submission?.cancel(); 254 | return submission; 255 | }) 256 | ); 257 | } 258 | 259 | if (current_status === "pending" || current_status === "generating") { 260 | return; 261 | } 262 | 263 | let payload = { 264 | fn_index: dep_index, 265 | data: data || dep.inputs.map((id) => instance_map[id].props.value), 266 | event_data: dep.collects_event_data ? event_data : null 267 | }; 268 | const make_prediction = () => { 269 | const submission = app.submit(payload.fn_index, payload.data as unknown[], payload.event_data) 270 | .on("data", ({ data, fn_index }) => { 271 | handle_update(data, fn_index); 272 | }) 273 | .on("status", ({ fn_index, ...status }) => { 274 | fn_status[fn_index] = status.stage; 275 | debug('status', status.stage); 276 | if (status.stage === "complete") { 277 | let end = true; 278 | dependencies.map(async (dep, i) => { 279 | if (dep.trigger_after === fn_index) { 280 | end = false; 281 | trigger_api_call(i); 282 | } 283 | }); 284 | 285 | submission.destroy(); 286 | if (end) { 287 | const message = this.history?.at(-1)?.at(-1); 288 | resolve(this.html2Markdown(message)); 289 | } 290 | } 291 | 292 | if (status.stage === "error") { 293 | if (status.message) { 294 | const _message = status.message.replace( 295 | MESSAGE_QUOTE_RE, 296 | (_, b) => b 297 | ); 298 | messages = [ 299 | { 300 | type: "error", 301 | message: _message, 302 | id: ++_error_id, 303 | fn_index 304 | }, 305 | ...messages 306 | ]; 307 | } 308 | 309 | dependencies.map(async (dep, i) => { 310 | if ( 311 | dep.trigger_after === fn_index && 312 | !dep.trigger_only_on_success 313 | ) { 314 | trigger_api_call(i); 315 | } 316 | }); 317 | options?.onError?.(status.message || 'error'); 318 | reject(status.message || 'error'); 319 | submission.destroy(); 320 | } 321 | }); 322 | 323 | submit_map.set(dep_index, submission); 324 | } 325 | if (dep.frontend_fn) { 326 | dep 327 | .frontend_fn( 328 | payload.data.concat( 329 | dep.outputs.map((id) => instance_map[id].props.value) 330 | ) 331 | ) 332 | .then((v: []) => { 333 | if (dep.backend_fn) { 334 | payload.data = v; 335 | make_prediction(); 336 | } else { 337 | handle_update(v, dep_index); 338 | } 339 | }); 340 | } else { 341 | if (dep.backend_fn) { 342 | make_prediction(); 343 | } 344 | } 345 | } 346 | 347 | trigger_api_call(fnIndex, args); 348 | } catch (e) { 349 | reject(e); 350 | } 351 | }); 352 | }; 353 | } 354 | -------------------------------------------------------------------------------- /src/client.ts: -------------------------------------------------------------------------------- 1 | import semiver from "semiver"; 2 | import type { Config } from "./types"; 3 | 4 | import { 5 | process_endpoint, 6 | RE_SPACE_NAME, 7 | map_names_to_ids, 8 | discussions_enabled, 9 | get_space_hardware, 10 | set_space_hardware, 11 | set_space_timeout, 12 | hardware_types 13 | } from "./utils"; 14 | 15 | import type { 16 | EventType, 17 | EventListener, 18 | ListenerMap, 19 | Event, 20 | Payload, 21 | PostResponse, 22 | UploadResponse, 23 | Status, 24 | SpaceStatus, 25 | SpaceStatusCallback, 26 | FileData 27 | } from "./types"; 28 | 29 | import { fetch, WebSocket, Blob } from "ifw"; 30 | 31 | declare global { 32 | interface Window { 33 | __gradio_mode__: "app" | "website"; 34 | launchGradio: Function; 35 | launchGradioFromSpaces: Function; 36 | gradio_config: Config; 37 | scoped_css_attach: (link: HTMLLinkElement) => void; 38 | __is_colab__: boolean; 39 | } 40 | } 41 | 42 | type event = ( 43 | eventType: K, 44 | listener: EventListener 45 | ) => SubmitReturn; 46 | type predict = ( 47 | endpoint: string | number, 48 | data?: unknown[], 49 | event_data?: unknown 50 | ) => Promise; 51 | 52 | type client_return = { 53 | predict: predict; 54 | config: Config; 55 | submit: ( 56 | endpoint: string | number, 57 | data?: unknown[], 58 | event_data?: unknown 59 | ) => SubmitReturn; 60 | view_api: (c?: Config) => Promise>; 61 | }; 62 | 63 | type SubmitReturn = { 64 | on: event; 65 | off: event; 66 | cancel: () => Promise; 67 | destroy: () => void; 68 | }; 69 | 70 | const QUEUE_FULL_MSG = "This application is too busy. Keep trying!"; 71 | const BROKEN_CONNECTION_MSG = "Connection errored out."; 72 | 73 | export async function post_data( 74 | url: string, 75 | body: unknown, 76 | token?: `hf_${string}` 77 | ): Promise<[PostResponse, number]> { 78 | const headers: { 79 | Authorization?: string; 80 | "Content-Type": "application/json"; 81 | } = { "Content-Type": "application/json" }; 82 | if (token) { 83 | headers.Authorization = `Bearer ${token}`; 84 | } 85 | try { 86 | var response = await fetch(url, { 87 | method: "POST", 88 | body: JSON.stringify(body), 89 | headers 90 | }); 91 | } catch (e) { 92 | return [{ error: BROKEN_CONNECTION_MSG }, 500]; 93 | } 94 | const output = await response.json() as PostResponse; 95 | return [output, response.status]; 96 | } 97 | 98 | export async function upload_files( 99 | root: string, 100 | files: Array, 101 | token?: `hf_${string}` 102 | ): Promise { 103 | const headers: { 104 | Authorization?: string; 105 | } = {}; 106 | if (token) { 107 | headers.Authorization = `Bearer ${token}`; 108 | } 109 | 110 | const formData = new FormData(); 111 | files.forEach((file) => { 112 | formData.append("files", file); 113 | }); 114 | try { 115 | var response = await fetch(`${root}/upload`, { 116 | method: "POST", 117 | // @ts-ignore 118 | body: formData, 119 | headers 120 | }); 121 | } catch (e) { 122 | return { error: BROKEN_CONNECTION_MSG }; 123 | } 124 | // @ts-ignore 125 | const output: UploadResponse["files"] = await response.json(); 126 | return { files: output }; 127 | } 128 | 129 | export async function duplicate( 130 | app_reference: string, 131 | options: { 132 | hf_token: `hf_${string}`; 133 | private?: boolean; 134 | status_callback: SpaceStatusCallback; 135 | hardware?: typeof hardware_types[number]; 136 | timeout?: number; 137 | } 138 | ) { 139 | const { hf_token, private: _private, hardware, timeout } = options; 140 | 141 | if (hardware && !hardware_types.includes(hardware)) { 142 | throw new Error( 143 | `Invalid hardware type provided. Valid types are: ${hardware_types 144 | .map((v) => `"${v}"`) 145 | .join(",")}.` 146 | ); 147 | } 148 | const headers = { 149 | Authorization: `Bearer ${hf_token}` 150 | }; 151 | 152 | const user = ( 153 | await ( 154 | await fetch(`https://huggingface.co/api/whoami-v2`, { 155 | headers 156 | }) 157 | ).json() as any 158 | ).name; 159 | 160 | const space_name = app_reference.split("/")[1]; 161 | const body: { 162 | repository: string; 163 | private?: boolean; 164 | } = { 165 | repository: `${user}/${space_name}` 166 | }; 167 | 168 | if (_private) { 169 | body.private = true; 170 | } 171 | 172 | try { 173 | const response = await fetch( 174 | `https://huggingface.co/api/spaces/${app_reference}/duplicate`, 175 | { 176 | method: "POST", 177 | headers: { "Content-Type": "application/json", ...headers }, 178 | body: JSON.stringify(body) 179 | } 180 | ); 181 | 182 | if (response.status === 409) { 183 | return client(`${user}/${space_name}`, options); 184 | } else { 185 | const duplicated_space = await response.json() as any; 186 | 187 | let original_hardware; 188 | 189 | if (!hardware) { 190 | original_hardware = await get_space_hardware(app_reference, hf_token); 191 | } 192 | 193 | const requested_hardware = hardware || original_hardware || "cpu-basic"; 194 | await set_space_hardware( 195 | `${user}/${space_name}`, 196 | requested_hardware, 197 | hf_token 198 | ); 199 | 200 | await set_space_timeout( 201 | `${user}/${space_name}`, 202 | timeout || 300, 203 | hf_token 204 | ); 205 | return client(duplicated_space.url, options); 206 | } 207 | } catch (e: any) { 208 | throw new Error(e); 209 | } 210 | } 211 | 212 | export async function client( 213 | app_reference: string, 214 | options: { 215 | hf_token?: `hf_${string}`; 216 | status_callback?: SpaceStatusCallback; 217 | normalise_files?: boolean; 218 | session_hash?: string; 219 | } = { 220 | normalise_files: true, 221 | session_hash: Math.random().toString(36).substring(2), 222 | } 223 | ): Promise { 224 | return new Promise(async (res) => { 225 | const { status_callback, hf_token, normalise_files, session_hash } = options; 226 | const return_obj = { 227 | predict, 228 | submit, 229 | view_api 230 | // duplicate 231 | }; 232 | const transform_files = normalise_files ?? true; 233 | 234 | const { ws_protocol, http_protocol, host, space_id } = 235 | await process_endpoint(app_reference, hf_token); 236 | // const session_hash = Math.random().toString(36).substring(2); 237 | const last_status: Record = {}; 238 | let config: Config; 239 | let api_map: Record = {}; 240 | 241 | let jwt: false | string = false; 242 | 243 | if (hf_token && space_id) { 244 | jwt = await get_jwt(space_id, hf_token); 245 | } 246 | 247 | async function config_success(_config: Config) { 248 | config = _config; 249 | api_map = map_names_to_ids(_config?.dependencies || []); 250 | try { 251 | api = await view_api(config); 252 | } catch (e: any) { 253 | console.error(`Could not get api details: ${e.message}`); 254 | } 255 | 256 | return { 257 | config, 258 | ...return_obj 259 | }; 260 | } 261 | let api: ApiInfo; 262 | async function handle_space_sucess(status: SpaceStatus) { 263 | if (status_callback) status_callback(status); 264 | if (status.status === "running") 265 | try { 266 | config = await resolve_config(`${http_protocol}//${host}`, hf_token); 267 | 268 | const _config: any = await config_success(config); 269 | res(_config); 270 | } catch (e) { 271 | if (status_callback) { 272 | status_callback({ 273 | status: "error", 274 | message: "Could not load this space.", 275 | load_status: "error", 276 | detail: "NOT_FOUND" 277 | }); 278 | } 279 | } 280 | } 281 | 282 | try { 283 | config = await resolve_config(`${http_protocol}//${host}`, hf_token); 284 | const _config: any = await config_success(config); 285 | res(_config); 286 | } catch (e) { 287 | console.log('e', e); 288 | if (space_id) { 289 | check_space_status( 290 | space_id, 291 | RE_SPACE_NAME.test(space_id) ? "space_name" : "subdomain", 292 | handle_space_sucess 293 | ); 294 | } else { 295 | if (status_callback) 296 | status_callback({ 297 | status: "error", 298 | message: "Could not load this space.", 299 | load_status: "error", 300 | detail: "NOT_FOUND" 301 | }); 302 | } 303 | } 304 | 305 | /** 306 | * Run a prediction. 307 | * @param endpoint - The prediction endpoint to use. 308 | * @param status_callback - A function that is called with the current status of the prediction immediately and every time it updates. 309 | * @return Returns the data for the prediction or an error message. 310 | */ 311 | function predict(endpoint: string, data: unknown[], event_data?: unknown) { 312 | let data_returned = false; 313 | let status_complete = false; 314 | return new Promise((res, rej) => { 315 | const app = submit(endpoint, data, event_data); 316 | 317 | app 318 | .on("data", (d) => { 319 | data_returned = true; 320 | if (status_complete) { 321 | app.destroy(); 322 | } 323 | res(d); 324 | }) 325 | .on("status", (status) => { 326 | if (status.stage === "error") rej(status); 327 | if (status.stage === "complete" && data_returned) { 328 | app.destroy(); 329 | } 330 | if (status.stage === "complete") { 331 | status_complete = true; 332 | } 333 | }); 334 | }); 335 | } 336 | 337 | function submit( 338 | endpoint: string | number, 339 | data: unknown[], 340 | event_data?: unknown 341 | ): SubmitReturn { 342 | let fn_index: number; 343 | let api_info: EndpointInfo; 344 | 345 | if (typeof endpoint === "number") { 346 | fn_index = endpoint; 347 | api_info = api?.unnamed_endpoints[fn_index]; 348 | } else { 349 | const trimmed_endpoint = endpoint.replace(/^\//, ""); 350 | 351 | fn_index = api_map[trimmed_endpoint]; 352 | api_info = api?.named_endpoints[endpoint.trim()]; 353 | } 354 | 355 | if (typeof fn_index !== "number") { 356 | throw new Error( 357 | "There is no endpoint matching that name of fn_index matching that number." 358 | ); 359 | } 360 | 361 | const _endpoint = typeof endpoint === "number" ? "/predict" : endpoint; 362 | let payload: Payload; 363 | let complete: false | Record = false; 364 | const listener_map: ListenerMap = {}; 365 | let websocket: WebSocket; 366 | 367 | handle_blob( 368 | `${http_protocol}//${host + config.path}`, 369 | data, 370 | api_info, 371 | hf_token 372 | ).then((_payload) => { 373 | payload = { data: _payload || [], event_data, fn_index }; 374 | if (skip_queue(fn_index, config)) { 375 | fire_event({ 376 | type: "status", 377 | endpoint: _endpoint, 378 | stage: "pending", 379 | queue: false, 380 | fn_index, 381 | time: new Date() 382 | }); 383 | 384 | post_data( 385 | `${http_protocol}//${host + config.path}/run${ 386 | _endpoint.startsWith("/") ? _endpoint : `/${_endpoint}` 387 | }`, 388 | { 389 | ...payload, 390 | session_hash 391 | }, 392 | hf_token 393 | ) 394 | .then(([output, status_code]) => { 395 | const data = transform_files 396 | ? transform_output( 397 | output.data, 398 | api_info, 399 | config.root, 400 | config.root_url 401 | ) 402 | : output.data; 403 | if (status_code == 200) { 404 | fire_event({ 405 | type: "data", 406 | endpoint: _endpoint, 407 | fn_index, 408 | data: data, 409 | time: new Date() 410 | }); 411 | 412 | fire_event({ 413 | type: "status", 414 | endpoint: _endpoint, 415 | fn_index, 416 | stage: "complete", 417 | eta: output.average_duration, 418 | queue: false, 419 | time: new Date() 420 | }); 421 | } else { 422 | fire_event({ 423 | type: "status", 424 | stage: "error", 425 | endpoint: _endpoint, 426 | fn_index, 427 | message: output.error, 428 | queue: false, 429 | time: new Date() 430 | }); 431 | } 432 | }) 433 | .catch((e) => { 434 | fire_event({ 435 | type: "status", 436 | stage: "error", 437 | message: e.message, 438 | endpoint: _endpoint, 439 | fn_index, 440 | queue: false, 441 | time: new Date() 442 | }); 443 | }); 444 | } else { 445 | fire_event({ 446 | type: "status", 447 | stage: "pending", 448 | queue: true, 449 | endpoint: _endpoint, 450 | fn_index, 451 | time: new Date() 452 | }); 453 | 454 | let url = new URL(`${ws_protocol}://${host}${config.path} 455 | /queue/join`); 456 | 457 | if (jwt) { 458 | url.searchParams.set("__sign", jwt); 459 | } 460 | // @ts-ignore 461 | websocket = new WebSocket(url); 462 | 463 | websocket.onclose = (evt) => { 464 | if (!evt.wasClean) { 465 | fire_event({ 466 | type: "status", 467 | stage: "error", 468 | message: BROKEN_CONNECTION_MSG, 469 | queue: true, 470 | endpoint: _endpoint, 471 | fn_index, 472 | time: new Date() 473 | }); 474 | } 475 | }; 476 | 477 | websocket.onmessage = function (event: any) { 478 | const _data = JSON.parse(event.data as any); 479 | // console.log(event.data); 480 | const { type, status, data } = handle_message( 481 | _data, 482 | last_status[fn_index] 483 | ); 484 | 485 | if (type === "update" && status && !complete) { 486 | // call 'status' listeners 487 | fire_event({ 488 | type: "status", 489 | endpoint: _endpoint, 490 | fn_index, 491 | time: new Date(), 492 | ...status 493 | }); 494 | if (status.stage === "error") { 495 | websocket.close(); 496 | } 497 | } else if (type === "hash") { 498 | websocket.send(JSON.stringify({ fn_index, session_hash })); 499 | return; 500 | } else if (type === "data") { 501 | websocket.send(JSON.stringify({ ...payload, session_hash })); 502 | } else if (type === "complete") { 503 | complete = status as any; 504 | } else if (type === "generating") { 505 | fire_event({ 506 | type: "status", 507 | time: new Date(), 508 | ...status, 509 | stage: status?.stage!, 510 | queue: true, 511 | endpoint: _endpoint, 512 | fn_index 513 | }); 514 | } 515 | if (data) { 516 | fire_event({ 517 | type: "data", 518 | time: new Date(), 519 | data: transform_files 520 | ? transform_output( 521 | data.data, 522 | api_info, 523 | config.root, 524 | config.root_url 525 | ) 526 | : data.data, 527 | endpoint: _endpoint, 528 | fn_index 529 | }); 530 | 531 | if (complete) { 532 | fire_event({ 533 | type: "status", 534 | time: new Date(), 535 | ...complete, 536 | stage: status?.stage!, 537 | queue: true, 538 | endpoint: _endpoint, 539 | fn_index 540 | }); 541 | websocket.close(); 542 | } 543 | } 544 | }; 545 | 546 | // different ws contract for gradio versions older than 3.6.0 547 | //@ts-ignore 548 | if (semiver(config.version || "2.0.0", "3.6") < 0) { 549 | websocket.addEventListener("open", () => 550 | websocket.send(JSON.stringify({ hash: session_hash })) 551 | ); 552 | } 553 | } 554 | }); 555 | 556 | function fire_event(event: Event) { 557 | const narrowed_listener_map: ListenerMap = listener_map; 558 | const listeners = narrowed_listener_map[event.type] || []; 559 | listeners?.forEach((l) => l(event)); 560 | } 561 | 562 | function on( 563 | eventType: K, 564 | listener: EventListener 565 | ) { 566 | const narrowed_listener_map: ListenerMap = listener_map; 567 | const listeners = narrowed_listener_map[eventType] || []; 568 | narrowed_listener_map[eventType] = listeners; 569 | listeners?.push(listener); 570 | 571 | return { on, off, cancel, destroy }; 572 | } 573 | 574 | function off( 575 | eventType: K, 576 | listener: EventListener 577 | ) { 578 | const narrowed_listener_map: ListenerMap = listener_map; 579 | let listeners = narrowed_listener_map[eventType] || []; 580 | listeners = listeners?.filter((l) => l !== listener); 581 | narrowed_listener_map[eventType] = listeners; 582 | 583 | return { on, off, cancel, destroy }; 584 | } 585 | 586 | async function cancel() { 587 | const _status: Status = { 588 | stage: "complete", 589 | queue: false, 590 | time: new Date() 591 | }; 592 | complete = _status; 593 | fire_event({ 594 | ..._status, 595 | type: "status", 596 | endpoint: _endpoint, 597 | fn_index: fn_index 598 | }); 599 | 600 | if (websocket && websocket.readyState === 0) { 601 | websocket.addEventListener("open", () => { 602 | websocket.close(); 603 | }); 604 | } else { 605 | websocket.close(); 606 | } 607 | 608 | try { 609 | await fetch(`${http_protocol}//${host + config.path}/reset`, { 610 | headers: { "Content-Type": "application/json" }, 611 | method: "POST", 612 | body: JSON.stringify({ fn_index, session_hash }) 613 | }); 614 | } catch (e) { 615 | console.warn( 616 | "The `/reset` endpoint could not be called. Subsequent endpoint results may be unreliable." 617 | ); 618 | } 619 | } 620 | 621 | function destroy() { 622 | for (const event_type in listener_map) { 623 | (listener_map[event_type as "data" | "status"] || []).forEach((fn) => { 624 | off(event_type as "data" | "status", fn); 625 | }); 626 | } 627 | } 628 | 629 | return { 630 | on, 631 | off, 632 | cancel, 633 | destroy 634 | }; 635 | } 636 | 637 | async function view_api(config: Config): Promise> { 638 | if (api) return api; 639 | 640 | const headers: { 641 | Authorization?: string; 642 | "Content-Type": "application/json"; 643 | } = { "Content-Type": "application/json" }; 644 | if (hf_token) { 645 | headers.Authorization = `Bearer ${hf_token}`; 646 | } 647 | let response: Response; 648 | if (semiver(config.version || "2.0.0", "3.30") < 0) { 649 | // @ts-ignore 650 | response = await fetch( 651 | "https://gradio-space-api-fetcher-v2.hf.space/api", 652 | { 653 | method: "POST", 654 | body: JSON.stringify({ 655 | serialize: false, 656 | config: JSON.stringify(config) 657 | }), 658 | headers 659 | } 660 | ); 661 | } else { 662 | // @ts-ignore 663 | response = await fetch(`${config.root}/info`, { 664 | headers 665 | }); 666 | } 667 | 668 | if (!response.ok) { 669 | throw new Error(BROKEN_CONNECTION_MSG); 670 | } 671 | 672 | let api_info = (await response.json()) as 673 | | ApiInfo 674 | | { api: ApiInfo }; 675 | if ("api" in api_info) { 676 | api_info = api_info.api; 677 | } 678 | 679 | if ( 680 | api_info.named_endpoints["/predict"] && 681 | !api_info.unnamed_endpoints["0"] 682 | ) { 683 | api_info.unnamed_endpoints[0] = api_info.named_endpoints["/predict"]; 684 | } 685 | 686 | const x = transform_api_info(api_info, config, api_map); 687 | return x; 688 | } 689 | }); 690 | } 691 | 692 | function transform_output( 693 | data: any[], 694 | api_info: any, 695 | root_url: string, 696 | remote_url?: string 697 | ): unknown[] { 698 | return data.map((d, i) => { 699 | if (api_info?.returns?.[i]?.component === "File") { 700 | return normalise_file(d, root_url, remote_url); 701 | } else if (api_info?.returns?.[i]?.component === "Gallery") { 702 | return d.map((img: any) => { 703 | return Array.isArray(img) 704 | ? [normalise_file(img[0], root_url, remote_url), img[1]] 705 | : [normalise_file(img, root_url, remote_url), null]; 706 | }); 707 | } else if (d && typeof d === "object" && d.is_file) { 708 | return normalise_file(d, root_url, remote_url); 709 | } else { 710 | return d; 711 | } 712 | }); 713 | } 714 | 715 | function normalise_file( 716 | file: Array, 717 | root: string, 718 | root_url: string | null 719 | ): Array; 720 | function normalise_file( 721 | file: FileData | string, 722 | root: string, 723 | root_url: string | null 724 | ): FileData; 725 | function normalise_file( 726 | file: null, 727 | root: string, 728 | root_url: string | undefined 729 | ): null; 730 | function normalise_file( 731 | file: any, 732 | root: any, 733 | root_url: any, 734 | ): Array | FileData | null { 735 | if (file == null) return null; 736 | if (typeof file === "string") { 737 | return { 738 | name: "file_data", 739 | data: file 740 | }; 741 | } else if (Array.isArray(file)) { 742 | const normalized_file: Array = []; 743 | 744 | for (const x of file) { 745 | if (x === null) { 746 | normalized_file.push(null); 747 | } else { 748 | normalized_file.push(normalise_file(x, root, root_url)); 749 | } 750 | } 751 | 752 | return normalized_file as Array; 753 | } else if (file.is_file) { 754 | if (!root_url) { 755 | file.data = root + "/file=" + file.name; 756 | } else { 757 | file.data = "/proxy=" + root_url + "/file=" + file.name; 758 | } 759 | } 760 | return file; 761 | } 762 | 763 | interface ApiData { 764 | label: string; 765 | type: { 766 | type: any; 767 | description: string; 768 | }; 769 | component: string; 770 | example_input?: any; 771 | } 772 | 773 | interface JsApiData { 774 | label: string; 775 | type: string; 776 | component: string; 777 | example_input: any; 778 | } 779 | 780 | interface EndpointInfo { 781 | parameters: T[]; 782 | returns: T[]; 783 | } 784 | interface ApiInfo { 785 | named_endpoints: { 786 | [key: string]: EndpointInfo; 787 | }; 788 | unnamed_endpoints: { 789 | [key: string]: EndpointInfo; 790 | }; 791 | } 792 | 793 | function get_type( 794 | type: { [key: string]: any }, 795 | component: string, 796 | serializer: string, 797 | signature_type: "return" | "parameter" 798 | ) { 799 | switch (type.type) { 800 | case "string": 801 | return "string"; 802 | case "boolean": 803 | return "boolean"; 804 | case "number": 805 | return "number"; 806 | } 807 | 808 | if ( 809 | serializer === "JSONSerializable" || 810 | serializer === "StringSerializable" 811 | ) { 812 | return "any"; 813 | } else if (serializer === "ListStringSerializable") { 814 | return "string[]"; 815 | } else if (component === "Image") { 816 | return signature_type === "parameter" ? "Blob | File | Buffer" : "string"; 817 | } else if (serializer === "FileSerializable") { 818 | if (type?.type === "array") { 819 | return signature_type === "parameter" 820 | ? "(Blob | File | Buffer)[]" 821 | : `{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}[]`; 822 | } else { 823 | return signature_type === "parameter" 824 | ? "Blob | File | Buffer" 825 | : `{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}`; 826 | } 827 | } else if (serializer === "GallerySerializable") { 828 | return signature_type === "parameter" 829 | ? "[(Blob | File | Buffer), (string | null)][]" 830 | : `[{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}, (string | null))][]`; 831 | } 832 | } 833 | 834 | function get_description( 835 | type: { type: any; description: string }, 836 | serializer: string 837 | ) { 838 | if (serializer === "GallerySerializable") { 839 | return "array of [file, label] tuples"; 840 | } else if (serializer === "ListStringSerializable") { 841 | return "array of strings"; 842 | } else if (serializer === "FileSerializable") { 843 | return "array of files or single file"; 844 | } else { 845 | return type.description; 846 | } 847 | } 848 | 849 | function transform_api_info( 850 | api_info: ApiInfo, 851 | config: Config, 852 | api_map: Record 853 | ): ApiInfo { 854 | const new_data = { 855 | named_endpoints: {}, 856 | unnamed_endpoints: {} 857 | }; 858 | for (const key in api_info) { 859 | // @ts-ignore 860 | const cat = api_info[key]; 861 | 862 | for (const endpoint in cat) { 863 | const dep_index = config.dependencies[endpoint as any] 864 | ? endpoint 865 | : api_map[endpoint.replace("/", "")]; 866 | const info = cat[endpoint]; 867 | // @ts-ignore 868 | new_data[key][endpoint] = {}; 869 | // @ts-ignore 870 | new_data[key][endpoint].parameters = {}; 871 | // @ts-ignore 872 | new_data[key][endpoint].returns = {}; 873 | // @ts-ignore 874 | new_data[key][endpoint].type = config.dependencies[dep_index].types; 875 | // @ts-ignore 876 | new_data[key][endpoint].parameters = info.parameters.map( 877 | // @ts-ignore 878 | ({ label, component, type, serializer }) => ({ 879 | label, 880 | component, 881 | type: get_type(type, component, serializer, "parameter"), 882 | description: get_description(type, serializer) 883 | }) 884 | ); 885 | // @ts-ignore 886 | new_data[key][endpoint].returns = info.returns.map( 887 | // @ts-ignore 888 | ({ label, component, type, serializer }) => ({ 889 | label, 890 | component, 891 | type: get_type(type, component, serializer, "return"), 892 | description: get_description(type, serializer) 893 | }) 894 | ); 895 | } 896 | } 897 | 898 | return new_data; 899 | } 900 | 901 | async function get_jwt( 902 | space: string, 903 | token: `hf_${string}` 904 | ): Promise { 905 | try { 906 | const r = await fetch(`https://huggingface.co/api/spaces/${space}/jwt`, { 907 | headers: { 908 | Authorization: `Bearer ${token}` 909 | } 910 | }); 911 | 912 | // @ts-ignore 913 | const jwt = (await r.json()).token; 914 | 915 | return jwt || false; 916 | } catch (e) { 917 | console.error(e); 918 | return false; 919 | } 920 | } 921 | 922 | export async function handle_blob( 923 | endpoint: string, 924 | data: unknown[], 925 | // @ts-ignore 926 | api_info, 927 | token?: `hf_${string}` 928 | ): Promise { 929 | const blob_refs = await walk_and_store_blobs( 930 | data, 931 | undefined, 932 | [], 933 | true, 934 | api_info 935 | ); 936 | 937 | return Promise.all( 938 | // @ts-ignore 939 | blob_refs.map(async ({ path, blob, data, type }) => { 940 | if (blob) { 941 | // @ts-ignore 942 | const file_url = (await upload_files(endpoint, [blob], token)).files[0]; 943 | return { path, file_url, type }; 944 | } else { 945 | return { path, base64: data, type }; 946 | } 947 | }) 948 | ).then((r) => { 949 | r.forEach(({ path, file_url, base64, type }) => { 950 | if (base64) { 951 | update_object(data, base64, path); 952 | } else if (type === "Gallery") { 953 | update_object(data, file_url, path); 954 | } else if (file_url) { 955 | const o = { 956 | is_file: true, 957 | name: `${file_url}`, 958 | data: null 959 | // orig_name: "file.csv" 960 | }; 961 | update_object(data, o, path); 962 | } 963 | }); 964 | 965 | return data; 966 | }); 967 | } 968 | // @ts-ignore 969 | function update_object(object, newValue, stack) { 970 | while (stack.length > 1) { 971 | object = object[stack.shift()]; 972 | } 973 | 974 | object[stack.shift()] = newValue; 975 | } 976 | 977 | // @ts-ignore 978 | export async function walk_and_store_blobs( 979 | // @ts-ignore 980 | param, 981 | type = undefined, 982 | path = [], 983 | root = false, 984 | api_info = undefined 985 | ) { 986 | if (Array.isArray(param)) { 987 | let blob_refs: any = []; 988 | 989 | await Promise.all( 990 | param.map(async (v, i) => { 991 | let new_path = path.slice(); 992 | // @ts-ignore 993 | new_path.push(i); 994 | 995 | const array_refs = await walk_and_store_blobs( 996 | param[i], 997 | // @ts-ignore 998 | root ? api_info?.parameters[i]?.component || undefined : type, 999 | new_path, 1000 | false, 1001 | api_info 1002 | ); 1003 | 1004 | blob_refs = blob_refs.concat(array_refs); 1005 | }) 1006 | ); 1007 | 1008 | return blob_refs; 1009 | } else if (globalThis.Buffer && param instanceof globalThis.Buffer) { 1010 | const is_image = type === "Image"; 1011 | return [ 1012 | { 1013 | path: path, 1014 | blob: is_image ? false : new Blob([param]), 1015 | data: is_image ? `${param.toString("base64")}` : false, 1016 | type 1017 | } 1018 | ]; 1019 | } else if ( 1020 | param instanceof Blob || 1021 | (typeof window !== "undefined" && param instanceof File) 1022 | ) { 1023 | if (type === "Image") { 1024 | let data; 1025 | 1026 | if (typeof window !== "undefined") { 1027 | // browser 1028 | data = await image_to_data_uri(param as any); 1029 | } else { 1030 | const buffer = await param.arrayBuffer(); 1031 | data = Buffer.from(buffer).toString("base64"); 1032 | } 1033 | 1034 | return [{ path, data, type }]; 1035 | } else { 1036 | return [{ path: path, blob: param, type }]; 1037 | } 1038 | } else if (typeof param === "object") { 1039 | // @ts-ignore 1040 | let blob_refs = []; 1041 | for (let key in param) { 1042 | if (param.hasOwnProperty(key)) { 1043 | let new_path = path.slice(); 1044 | // @ts-ignore 1045 | new_path.push(key); 1046 | 1047 | // @ts-ignore 1048 | blob_refs = blob_refs.concat( 1049 | await walk_and_store_blobs( 1050 | param[key], 1051 | undefined, 1052 | new_path, 1053 | false, 1054 | api_info 1055 | ) 1056 | ); 1057 | } 1058 | } 1059 | return blob_refs; 1060 | } else { 1061 | return []; 1062 | } 1063 | } 1064 | 1065 | function image_to_data_uri(blob: Blob) { 1066 | return new Promise((resolve, _) => { 1067 | const reader = new FileReader(); 1068 | reader.onloadend = () => resolve(reader.result); 1069 | reader.readAsDataURL(blob as any); 1070 | }); 1071 | } 1072 | 1073 | function skip_queue(id: number, config: Config) { 1074 | return false; 1075 | // return ( 1076 | // !(config?.dependencies?.[id]?.queue === null 1077 | // ? config.enable_queue 1078 | // : config?.dependencies?.[id]?.queue) || false 1079 | // ); 1080 | } 1081 | 1082 | async function resolve_config( 1083 | endpoint?: string, 1084 | token?: `hf_${string}` 1085 | ): Promise { 1086 | const headers: { Authorization?: string } = {}; 1087 | if (token) { 1088 | headers.Authorization = `Bearer ${token}`; 1089 | } 1090 | if ( 1091 | typeof window !== "undefined" && 1092 | window.gradio_config && 1093 | location.origin !== "http://localhost:9876" 1094 | ) { 1095 | const path = window.gradio_config.root; 1096 | const config = window.gradio_config; 1097 | config.root = endpoint + config.root; 1098 | return { ...config, path: path }; 1099 | } else if (endpoint) { 1100 | let response = await fetch(`${endpoint}/config`, { headers }); 1101 | 1102 | if (response.status === 200) { 1103 | const config = await response.json() as any; 1104 | config.path = config.path ?? ""; 1105 | config.root = endpoint; 1106 | return config; 1107 | } else { 1108 | throw new Error("Could not get config."); 1109 | } 1110 | } 1111 | 1112 | throw new Error("No config or app endpoint found"); 1113 | } 1114 | 1115 | async function check_space_status( 1116 | id: string, 1117 | type: "subdomain" | "space_name", 1118 | status_callback: SpaceStatusCallback 1119 | ) { 1120 | let endpoint = 1121 | type === "subdomain" 1122 | ? `https://huggingface.co/api/spaces/by-subdomain/${id}` 1123 | : `https://huggingface.co/api/spaces/${id}`; 1124 | let response; 1125 | let _status; 1126 | try { 1127 | response = await fetch(endpoint); 1128 | _status = response.status; 1129 | if (_status !== 200) { 1130 | throw new Error(); 1131 | } 1132 | response = await response.json(); 1133 | } catch (e) { 1134 | status_callback({ 1135 | status: "error", 1136 | load_status: "error", 1137 | message: "Could not get space status", 1138 | detail: "NOT_FOUND" 1139 | }); 1140 | return; 1141 | } 1142 | 1143 | if (!response || _status !== 200) return; 1144 | const { 1145 | runtime: { stage }, 1146 | id: space_name 1147 | } = response as any; 1148 | 1149 | switch (stage) { 1150 | case "STOPPED": 1151 | case "SLEEPING": 1152 | status_callback({ 1153 | status: "sleeping", 1154 | load_status: "pending", 1155 | message: "Space is asleep. Waking it up...", 1156 | detail: stage 1157 | }); 1158 | 1159 | setTimeout(() => { 1160 | check_space_status(id, type, status_callback); 1161 | }, 1000); 1162 | break; 1163 | // poll for status 1164 | case "RUNNING": 1165 | case "RUNNING_BUILDING": 1166 | status_callback({ 1167 | status: "running", 1168 | load_status: "complete", 1169 | message: "", 1170 | detail: stage 1171 | }); 1172 | // load_config(source); 1173 | // launch 1174 | break; 1175 | case "BUILDING": 1176 | status_callback({ 1177 | status: "building", 1178 | load_status: "pending", 1179 | message: "Space is building...", 1180 | detail: stage 1181 | }); 1182 | 1183 | setTimeout(() => { 1184 | check_space_status(id, type, status_callback); 1185 | }, 1000); 1186 | break; 1187 | default: 1188 | status_callback({ 1189 | status: "space_error", 1190 | load_status: "error", 1191 | message: "This space is experiencing an issue.", 1192 | detail: stage, 1193 | discussions_enabled: await discussions_enabled(space_name) 1194 | }); 1195 | break; 1196 | } 1197 | } 1198 | 1199 | function handle_message( 1200 | data: any, 1201 | last_status: Status["stage"] 1202 | ): { 1203 | type: "hash" | "data" | "update" | "complete" | "generating" | "none"; 1204 | data?: any; 1205 | status?: Status; 1206 | } { 1207 | const queue = true; 1208 | switch (data?.msg) { 1209 | case "send_data": 1210 | return { type: "data" }; 1211 | case "send_hash": 1212 | return { type: "hash" }; 1213 | case "queue_full": 1214 | return { 1215 | type: "update", 1216 | status: { 1217 | queue, 1218 | message: QUEUE_FULL_MSG, 1219 | stage: "error", 1220 | code: data.code, 1221 | success: data.success 1222 | } 1223 | }; 1224 | case "estimation": 1225 | return { 1226 | type: "update", 1227 | status: { 1228 | queue, 1229 | stage: last_status || "pending", 1230 | code: data.code, 1231 | size: data.queue_size, 1232 | position: data.rank, 1233 | eta: data.rank_eta, 1234 | success: data.success 1235 | } 1236 | }; 1237 | case "progress": 1238 | return { 1239 | type: "update", 1240 | status: { 1241 | queue, 1242 | stage: "pending", 1243 | code: data.code, 1244 | progress_data: data.progress_data, 1245 | success: data.success 1246 | } 1247 | }; 1248 | case "process_generating": 1249 | return { 1250 | type: "generating", 1251 | status: { 1252 | queue, 1253 | message: !data.success ? data.output.error : null, 1254 | stage: data.success ? "generating" : "error", 1255 | code: data.code, 1256 | progress_data: data.progress_data, 1257 | eta: data.average_duration 1258 | }, 1259 | data: data.success ? data.output : null 1260 | }; 1261 | case "process_completed": 1262 | if ("error" in data.output) { 1263 | return { 1264 | type: "update", 1265 | status: { 1266 | queue, 1267 | message: data.output.error as string, 1268 | stage: "error", 1269 | code: data.code, 1270 | success: data.success 1271 | } 1272 | }; 1273 | } else { 1274 | return { 1275 | type: "complete", 1276 | status: { 1277 | queue, 1278 | message: !data.success ? data.output.error : undefined, 1279 | stage: data.success ? "complete" : "error", 1280 | code: data.code, 1281 | progress_data: data.progress_data, 1282 | eta: data.output.average_duration 1283 | }, 1284 | data: data.success ? data.output : null 1285 | }; 1286 | } 1287 | 1288 | case "process_starts": 1289 | return { 1290 | type: "update", 1291 | status: { 1292 | queue, 1293 | stage: "pending", 1294 | code: data.code, 1295 | size: data.rank, 1296 | position: 0, 1297 | success: data.success 1298 | } 1299 | }; 1300 | } 1301 | 1302 | return { type: "none", status: { stage: "error", queue } }; 1303 | } --------------------------------------------------------------------------------