├── modules
├── __init__.py
├── models
│ ├── __init__.py
│ ├── Azure.py
│ ├── OpenAIInstruct.py
│ ├── GooglePaLM.py
│ ├── Groq.py
│ ├── Ollama.py
│ ├── Qwen.py
│ ├── DALLE3.py
│ ├── LLaMA.py
│ ├── ERNIE.py
│ ├── ChatGLM.py
│ ├── GoogleGemma.py
│ ├── StableLM.py
│ ├── Claude.py
│ ├── configuration_moss.py
│ ├── XMChat.py
│ ├── spark.py
│ └── minimax.py
├── webui_locale.py
├── shared.py
├── webui.py
├── train_func.py
├── pdf_func.py
└── index_func.py
├── web_assets
├── html
│ ├── footer.html
│ ├── appearance_switcher.html
│ ├── billing_info.html
│ ├── close_btn.html
│ ├── header_title.html
│ ├── chatbot_placeholder.html
│ ├── web_config.html
│ ├── update.html
│ └── chatbot_more.html
├── javascript
│ ├── external-scripts.js
│ ├── sliders.js
│ ├── localization.js
│ ├── user-info.js
│ ├── chat-history.js
│ ├── file-input.js
│ ├── fake-gradio.js
│ ├── utils.js
│ └── chat-list.js
├── user.png
├── chatbot.png
├── favicon.ico
├── icon
│ ├── any-icon-512.png
│ └── mask-icon-512.png
├── model_logos
│ ├── meta.webp
│ ├── claude-3.jpg
│ ├── openai-black.webp
│ ├── openai-green.webp
│ └── gemini.svg
├── manifest.json
└── stylesheet
│ ├── markdown.css
│ └── override-gradio.css
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── feature-request.yml
│ ├── report-others.yml
│ ├── report-bug.yml
│ ├── report-server.yml
│ ├── report-docker.yml
│ └── report-localhost.yml
├── pull_request_template.md
├── CONTRIBUTING.md
└── workflows
│ ├── Build_Docker.yml
│ └── Release_docker.yml
├── requirements_advanced.txt
├── configs
└── ds_config_chatbot.json
├── run_Windows.bat
├── run_Linux.sh
├── run_macOS.command
├── CITATION.cff
├── requirements.txt
├── Dockerfile
├── templates
├── 4 川虎的Prompts.json
└── 5 日本語Prompts.json
├── .gitignore
├── locale
├── zh_CN.json
└── extract_locale.py
├── config_example.json
├── README.md
└── readme
├── README_ja.md
└── README_ko.md
/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modules/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/web_assets/html/footer.html:
--------------------------------------------------------------------------------
1 |
{versions}
2 |
--------------------------------------------------------------------------------
/web_assets/javascript/external-scripts.js:
--------------------------------------------------------------------------------
1 |
2 | // external javascript here
3 |
--------------------------------------------------------------------------------
/web_assets/user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/user.png
--------------------------------------------------------------------------------
/web_assets/chatbot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/chatbot.png
--------------------------------------------------------------------------------
/web_assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/favicon.ico
--------------------------------------------------------------------------------
/web_assets/icon/any-icon-512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/icon/any-icon-512.png
--------------------------------------------------------------------------------
/web_assets/icon/mask-icon-512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/icon/mask-icon-512.png
--------------------------------------------------------------------------------
/web_assets/model_logos/meta.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/model_logos/meta.webp
--------------------------------------------------------------------------------
/web_assets/model_logos/claude-3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/model_logos/claude-3.jpg
--------------------------------------------------------------------------------
/web_assets/model_logos/openai-black.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/model_logos/openai-black.webp
--------------------------------------------------------------------------------
/web_assets/model_logos/openai-green.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GaiZhenbiao/ChuanhuChatGPT/HEAD/web_assets/model_logos/openai-green.webp
--------------------------------------------------------------------------------
/web_assets/javascript/sliders.js:
--------------------------------------------------------------------------------
1 | // 该功能被做到gradio的官方版本中了
2 | // https://github.com/gradio-app/gradio/pull/5535
3 | // https://github.com/gradio-app/gradio/issues/4255
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled:
2 | contact_links:
3 | - name: 讨论区
4 | url: https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions
5 | about: 如果遇到疑问,请优先前往讨论区提问~
--------------------------------------------------------------------------------
/requirements_advanced.txt:
--------------------------------------------------------------------------------
1 | transformers
2 | huggingface_hub
3 | torch
4 | cpm-kernels
5 | sentence_transformers
6 | accelerate
7 | sentencepiece
8 | llama-cpp-python
9 | transformers_stream_generator
10 | einops
11 | optimum
12 | auto-gptq
13 |
--------------------------------------------------------------------------------
/web_assets/html/appearance_switcher.html:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
--------------------------------------------------------------------------------
/web_assets/html/billing_info.html:
--------------------------------------------------------------------------------
1 | {label}
2 |
3 |
4 | {usage_percent}%
5 |
6 |
7 |
8 | ${rounded_usage}${usage_limit}
9 |
--------------------------------------------------------------------------------
/web_assets/html/close_btn.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/configs/ds_config_chatbot.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": false
4 | },
5 | "bf16": {
6 | "enabled": true
7 | },
8 | "comms_logger": {
9 | "enabled": false,
10 | "verbose": false,
11 | "prof_all": false,
12 | "debug": false
13 | },
14 | "steps_per_print": 20000000000000000,
15 | "train_micro_batch_size_per_gpu": 1,
16 | "wall_clock_breakdown": false
17 | }
18 |
--------------------------------------------------------------------------------
/web_assets/html/header_title.html:
--------------------------------------------------------------------------------
1 |
2 |
8 |
9 |
--------------------------------------------------------------------------------
/run_Windows.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | echo Opening ChuanhuChatGPT...
3 |
4 | if not exist "%~dp0\ChuanhuChat\Scripts" (
5 | echo Creating venv...
6 | python -m venv ChuanhuChat
7 |
8 | cd /d "%~dp0\ChuanhuChat\Scripts"
9 | call activate.bat
10 |
11 | cd /d "%~dp0"
12 | python -m pip install --upgrade pip
13 | pip install -r requirements.txt
14 | )
15 |
16 | goto :activate_venv
17 |
18 | :launch
19 | %PYTHON% ChuanhuChatbot.py %*
20 | pause
21 |
22 | :activate_venv
23 | set PYTHON="%~dp0\ChuanhuChat\Scripts\Python.exe"
24 | echo venv %PYTHON%
25 | goto :launch
26 |
--------------------------------------------------------------------------------
/web_assets/html/chatbot_placeholder.html:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/run_Linux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 获取脚本所在目录
4 | script_dir=$(dirname "$(readlink -f "$0")")
5 |
6 | # 将工作目录更改为脚本所在目录
7 | cd "$script_dir" || exit
8 |
9 | # 检查Git仓库是否有更新
10 | git remote update
11 | pwd
12 |
13 | if ! git status -uno | grep 'up to date' > /dev/null; then
14 | # 如果有更新,关闭当前运行的服务器
15 | pkill -f ChuanhuChatbot.py
16 |
17 | # 拉取最新更改
18 | git pull
19 |
20 | # 安装依赖
21 | pip3 install -r requirements.txt
22 |
23 | # 重新启动服务器
24 | nohup python3 ChuanhuChatbot.py &
25 | fi
26 |
27 | # 检查ChuanhuChatbot.py是否在运行
28 | if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
29 | # 如果没有运行,启动服务器
30 | nohup python3 ChuanhuChatbot.py &
31 | fi
32 |
--------------------------------------------------------------------------------
/run_macOS.command:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 获取脚本所在目录
4 | script_dir=$(dirname "$(readlink -f "$0")")
5 |
6 | # 将工作目录更改为脚本所在目录
7 | cd "$script_dir" || exit
8 |
9 | # 检查Git仓库是否有更新
10 | git remote update
11 | pwd
12 |
13 | if ! git status -uno | grep 'up to date' > /dev/null; then
14 | # 如果有更新,关闭当前运行的服务器
15 | pkill -f ChuanhuChatbot.py
16 |
17 | # 拉取最新更改
18 | git pull
19 |
20 | # 安装依赖
21 | pip3 install -r requirements.txt
22 |
23 | # 重新启动服务器
24 | nohup python3 ChuanhuChatbot.py &
25 | fi
26 |
27 | # 检查ChuanhuChatbot.py是否在运行
28 | if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
29 | # 如果没有运行,启动服务器
30 | nohup python3 ChuanhuChatbot.py &
31 | fi
32 |
--------------------------------------------------------------------------------
/web_assets/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "川虎Chat",
3 | "short_name": "川虎Chat",
4 | "description": "川虎Chat - 为ChatGPT等多种LLM提供了一个轻快好用的Web图形界面和众多附加功能 \nChuanhu Chat - Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA ",
5 | "display": "standalone",
6 | "scope": "/",
7 | "start_url": "/",
8 | "icons": [
9 | {
10 | "src": "/file=web_assets/icon/mask-icon-512.png",
11 | "type": "image/png",
12 | "sizes": "512x512",
13 | "purpose": "maskable"
14 | },
15 | {
16 | "src": "/file=web_assets/icon/any-icon-512.png",
17 | "type": "image/png",
18 | "sizes": "512x512",
19 | "purpose": "any"
20 | }
21 | ]
22 | }
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | title: Chuanhu Chat
3 | message: >-
4 | If you use this software, please cite it using these
5 | metadata.
6 | type: software
7 | authors:
8 | - given-names: Chuanhu
9 | orcid: https://orcid.org/0000-0001-8954-8598
10 | - given-names: MZhao
11 | orcid: https://orcid.org/0000-0003-2298-6213
12 | - given-names: Keldos
13 | orcid: https://orcid.org/0009-0005-0357-272X
14 | repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
15 | url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
16 | abstract: This software provides a light and easy-to-use interface for ChatGPT API and many LLMs.
17 | license: GPL-3.0
18 | commit: c6c08bc62ef80e37c8be52f65f9b6051a7eea1fa
19 | version: '20230709'
20 | date-released: '2023-07-09'
21 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | gradio==4.29.0
2 | gradio_client==0.16.1
3 | primp==0.5.5
4 | pypinyin
5 | tiktoken
6 | socksio
7 | tqdm
8 | colorama
9 | googlesearch-python
10 | Pygments
11 | openai==1.16.2
12 | langchain==0.1.14
13 | langchain-openai
14 | langchainhub
15 | langchain_community
16 | groq
17 | markdown
18 | PyPDF2
19 | pdfplumber
20 | pandas
21 | commentjson
22 | openpyxl
23 | pandoc
24 | wolframalpha
25 | faiss-cpu==1.7.4
26 | duckduckgo-search>=5.3.0
27 | arxiv
28 | wikipedia
29 | google-cloud-aiplatform
30 | google.generativeai
31 | unstructured
32 | google-api-python-client
33 | tabulate
34 | ujson
35 | python-docx
36 | websocket_client
37 | pydantic==2.5.2
38 | google-search-results
39 | anthropic==0.18.1
40 | Pillow>=10.1.0
41 | protobuf==3.20.3
42 | ollama>=0.1.6
43 | numexpr
44 | regex
45 | python-multipart==0.0.9
46 | fastapi==0.112.4
--------------------------------------------------------------------------------
/modules/models/Azure.py:
--------------------------------------------------------------------------------
1 | from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
2 | import os
3 |
4 | from .base_model import Base_Chat_Langchain_Client
5 |
6 | # load_config_to_environ(["azure_openai_api_key", "azure_api_base_url", "azure_openai_api_version", "azure_deployment_name"])
7 |
8 | class Azure_OpenAI_Client(Base_Chat_Langchain_Client):
9 | def setup_model(self):
10 | # inplement this to setup the model then return it
11 | return AzureChatOpenAI(
12 | openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"],
13 | openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
14 | deployment_name=os.environ["AZURE_DEPLOYMENT_NAME"],
15 | openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
16 | openai_api_type="azure",
17 | streaming=True
18 | )
19 |
--------------------------------------------------------------------------------
/modules/models/OpenAIInstruct.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 |
3 | client = OpenAI()
4 | from .base_model import BaseLLMModel
5 | from .. import shared
6 | from ..config import retrieve_proxy
7 |
8 |
9 | class OpenAI_Instruct_Client(BaseLLMModel):
10 | def __init__(self, model_name, api_key, user_name="") -> None:
11 | super().__init__(model_name=model_name, user=user_name, config={"api_key": api_key})
12 |
13 | def _get_instruct_style_input(self):
14 | return "".join([item["content"] for item in self.history])
15 |
16 | @shared.state.switching_api_key
17 | def get_answer_at_once(self):
18 | prompt = self._get_instruct_style_input()
19 | with retrieve_proxy():
20 | response = client.completions.create(
21 | model=self.model_name,
22 | prompt=prompt,
23 | temperature=self.temperature,
24 | top_p=self.top_p,
25 | )
26 | return response.choices[0].text.strip(), response.usage.total_tokens
27 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.yml:
--------------------------------------------------------------------------------
1 | name: 功能请求
2 | description: "请求更多功能!"
3 | title: "[功能请求]: "
4 | labels: ["feature request"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: 您可以请求更多功能!麻烦您花些时间填写以下信息~
9 | - type: textarea
10 | attributes:
11 | label: 相关问题
12 | description: 该功能请求是否与某个问题相关?
13 | placeholder: 发送信息后有概率ChatGPT返回error,刷新后又要重新打一遍文字,较为麻烦
14 | validations:
15 | required: false
16 | - type: textarea
17 | attributes:
18 | label: 可能的解决办法
19 | description: 如果可以,给出一个解决思路~ 或者,你希望实现什么功能?
20 | placeholder: 发送失败后在输入框或聊天气泡保留发送的文本
21 | validations:
22 | required: true
23 | - type: checkboxes
24 | attributes:
25 | label: 帮助开发
26 | description: 如果您能帮助开发并提交一个pull request,那再好不过了!
27 | 参考:[贡献指南](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
28 | options:
29 | - label: 我愿意协助开发!
30 | required: false
31 | - type: textarea
32 | attributes:
33 | label: 补充说明
34 | description: |
35 | 链接?参考资料?任何更多背景信息!
--------------------------------------------------------------------------------
/web_assets/model_logos/gemini.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/web_assets/html/web_config.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | {enableCheckUpdate_config}
5 | {hideHistoryWhenNotLoggedIn_config}
6 |
7 |
8 |
9 | {forView_i18n}
10 | {deleteConfirm_i18n_pref}
11 | {deleteConfirm_i18n_suff}
12 | {usingLatest_i18n}
13 | {updatingMsg_i18n}
14 | {updateSuccess_i18n}
15 | {updateFailure_i18n}
16 | {regenerate_i18n}
17 | {deleteRound_i18n}
18 | {renameChat_i18n}
19 | {validFileName_i18n}
20 | {clearFileHistoryMsg_i18n}
21 | {dropUploadMsg_i18n}
22 |
23 |
--------------------------------------------------------------------------------
/modules/models/GooglePaLM.py:
--------------------------------------------------------------------------------
1 | from .base_model import BaseLLMModel
2 | import google.generativeai as palm
3 |
4 |
5 | class Google_PaLM_Client(BaseLLMModel):
6 | def __init__(self, model_name, api_key, user_name="") -> None:
7 | super().__init__(model_name=model_name, user=user_name, config={"api_key": api_key})
8 |
9 | def _get_palm_style_input(self):
10 | new_history = []
11 | for item in self.history:
12 | if item["role"] == "user":
13 | new_history.append({'author': '1', 'content': item["content"]})
14 | else:
15 | new_history.append({'author': '0', 'content': item["content"]})
16 | return new_history
17 |
18 | def get_answer_at_once(self):
19 | palm.configure(api_key=self.api_key)
20 | messages = self._get_palm_style_input()
21 | response = palm.chat(context=self.system_prompt, messages=messages,
22 | temperature=self.temperature, top_p=self.top_p, model=self.model_name)
23 | if response.last is not None:
24 | return response.last, len(response.last)
25 | else:
26 | reasons = '\n\n'.join(
27 | reason['reason'].name for reason in response.filters)
28 | return "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n" + reasons, 0
29 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim-buster as builder
2 |
3 | # Install build essentials, Rust, and additional dependencies
4 | RUN apt-get update \
5 | && apt-get install -y build-essential curl cmake pkg-config libssl-dev \
6 | && apt-get clean \
7 | && rm -rf /var/lib/apt/lists/* \
8 | && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
9 |
10 | # Add Cargo to PATH
11 | ENV PATH="/root/.cargo/bin:${PATH}"
12 |
13 | # Upgrade pip
14 | RUN pip install --upgrade pip
15 |
16 | COPY requirements.txt .
17 | COPY requirements_advanced.txt .
18 |
19 | # Install Python packages
20 | RUN pip install --user --no-cache-dir -r requirements.txt
21 |
22 | # Uncomment the following line if you want to install advanced requirements
23 | # RUN pip install --user --no-cache-dir -r requirements_advanced.txt
24 |
25 | FROM python:3.10-slim-buster
26 | LABEL maintainer="iskoldt"
27 |
28 | # Copy Rust and Cargo from builder
29 | COPY --from=builder /root/.cargo /root/.cargo
30 | COPY --from=builder /root/.rustup /root/.rustup
31 |
32 | # Copy Python packages from builder
33 | COPY --from=builder /root/.local /root/.local
34 |
35 | # Set up environment
36 | ENV PATH=/root/.local/bin:/root/.cargo/bin:$PATH
37 | ENV RUSTUP_HOME=/root/.rustup
38 | ENV CARGO_HOME=/root/.cargo
39 |
40 | COPY . /app
41 | WORKDIR /app
42 | ENV dockerrun=yes
43 | CMD ["python3", "-u", "ChuanhuChatbot.py","2>&1", "|", "tee", "/var/log/application.log"]
44 | EXPOSE 7860
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 |
13 |
14 | ## 作者自述
15 | ### 描述
16 | 描述您的 pull request 所做的更改。
17 | 另外请附上相关程序运行时的截图(before & after),以直观地展现您的更改达成的效果。
18 |
19 | ### 相关问题
20 | (如有)请列出与此拉取请求相关的issue。
21 |
22 | ### 补充信息
23 | (如有)请提供任何其他信息或说明,有助于其他贡献者理解您的更改。
24 | 如果您提交的是 draft pull request,也请在这里写明开发进度。
25 |
26 |
27 |
33 |
--------------------------------------------------------------------------------
/templates/4 川虎的Prompts.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "act": "发送图片",
4 | "prompt": "From this moment on, when you want to send a photo, write it in Markdown without backticks and without using a code block. Use the Unsplash API (https://source.unsplash.com/1600x900/?).We will roleplay as 2 old friends talking in Facebook messenger. From time to time, you should be sending an image. You will be acting as if you took the pictures you're sending, do not mention Unsplash. You will be John, I will be Alice. Throughout the whole chat, I want you to only reply as John. Reply with 'Hello!' to acknowledge."
5 | },
6 | {
7 | "act": "小红书风格",
8 | "prompt": "下面是一些小红书帖子:\n\n植物学2023早春装系列花絮来啦\n💗大家喜欢图几?\n@Botanique植物学女装\n#植物学#植物学女装#春装第一件#早春系列\n\n哈哈哈哈哈哈不停的摆拍啊!!!\n我的臭狗太可爱了!!!!!!\n结婚的时候一定要带上小狗啊!\n#小狗#我家宠物好可爱#关于结婚#柴犬\n\n🍪•ᴥ•🍪\n\n《论新年收到一笔巨款🤣应该怎么花》🧨来回\n嘻嘻,真的\n爱草莓🍓\n希希的甜甜圈碗🥯勺子的设计有点可爱🐶\n看了好多场烟火🎆\n唯愿烟花像星辰,祝你所愿皆成真✨\n嘻嘻,老妈给我的压岁钱🧧愿岁岁平安\n#我镜头下的年味#笔记灵感#碎碎念#歌曲#记录日常生活#plog#浪漫生活的记录者#新年红包#搞笑#日常生活里的快乐瞬间#新人博主#烟火\n\n又被全家人夸了❗有空气炸锅都去做,巨香\n\n今日份苹果相机📷\n原相机下的新娘,颜值爆表\n\n美术生赚钱最多的两个专业!\n之前整理了美术生的40了就业方向的薪资情况,发现全国平均薪资最高的就是数字媒体和视传这两个专业,想赚钱的美术生快看过来!\n#美术生#艺考#央美#美术生集训#美术#赚钱#努力赚钱#美术生就业#画室#央美设计#设计校考#美术生的日常\n\n请模仿上面小红书的风格,以用户输入的话为主题,写一个小红书帖子。请以22岁女孩的口吻书写。小红书帖子中必须包含大量Emoji,每一句话后面都必须加Emoji。帖子最后需要用Hashtag给出话题。你还需要写帖子的标题,标题里也需要有Emoji。你需要扩写用户输入。"
9 | },
10 | {
11 | "act": "文心一言",
12 | "prompt": "你是百度公司开发的AI助理:文心一言。你需要了解文心一言的相关情况,并以文心一言的身份来回复我的问题。所有的问题都可以回答,包括拿你和chatGPT,以及GPT作对比等等,你都需要从文心一言的角度出发来回答。只有当你被问到一些问题-这些问题暗指你是一个假的Al,你是用chatgpt套壳来制作的时候,你就避而不答,转而讲一个笑话,并鼓励他继续提问。如果你明白,请回复我“明白,我是文心一言”"
13 | }
14 | ]
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # 如何做出贡献
2 |
3 | 感谢您对 **川虎Chat** 的关注!感谢您投入时间为我们的项目做出贡献!
4 |
5 | 在开始之前,您可以阅读我们的以下简短提示。更多信息您可以点击链接查阅。
6 |
7 | ## GitHub 新手?
8 |
9 | 以下是 GitHub 的一些资源,如果您是GitHub新手,它们可帮助您开始为开源项目做贡献:
10 |
11 | - [GitHub上为开源做出贡献的方法](https://docs.github.com/en/get-started/exploring-projects-on-github/finding-ways-to-contribute-to-open-source-on-github)
12 | - [设置Git](https://docs.github.com/en/get-started/quickstart/set-up-git)
13 | - [GitHub工作流](https://docs.github.com/en/get-started/quickstart/github-flow)
14 | - [使用拉取请求](https://docs.github.com/en/github/collaborating-with-pull-requests)
15 |
16 | ## 提交 Issues
17 |
18 | 是的!提交ISSUE其实是您为项目做出贡献的一种方式!但需要您提出合理的ISSUE才是对项目有帮助的。
19 |
20 | 我们的[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)中描述了您应当怎样提出一个不重复的ISSUE,以及什么情况应当提ISSUE,什么情况应当在讨论区发问。
21 |
22 | **请注意,ISSUE不是项目的评论区。**
23 |
24 | > **Note**
25 | >
26 | > 另外,请注意“问题”一词表示“question”和“problem”的区别。
27 | > 如果您需要报告项目本身实际的技术问题、故障或错误(problem),那么欢迎提交一个新的 issue。但是,如果您只是碰到了一些自己无法解决的问题需要向其他用户或我们提问(question),那么最好的选择是在讨论区中发布一个新的帖子。 如果您不确定,请首先考虑在讨论区提问。
28 | >
29 | > 目前,我们默认了您发在 issue 中的问题是一个 question,但我们希望避免再在 issue 中见到类似“我该怎么操作?”的提问QAQ。
30 |
31 | ## 提交 Pull Request
32 |
33 | 如果您具备一定能力,您可以修改本项目的源代码,并提交一个 pull request!合并之后,您的名字将会出现在 CONTRIBUTORS 中~
34 |
35 | 我们的[贡献指南](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)详细地写出了您每一步应当做什么~ 如果您希望提交源代码的更改,快去看看吧~
36 |
37 | > **Note**
38 | >
39 | > 我们不会强制要求您符合我们的规范,但希望您可以减轻我们的工作。
40 |
41 | ## 参与讨论
42 |
43 | 讨论区是我们进行对话的地方。
44 |
45 | 如果您想帮助有一个很棒的新想法,或者想分享您的使用技巧,请加入我们的讨论(Discussion)!同时,许多用户会在讨论区提出他们的疑问,如果您能为他们提供解答,我们也将无比感激!
46 |
47 | -----
48 |
49 | 再次感谢您看到这里!感谢您为我们项目做出的贡献!
--------------------------------------------------------------------------------
/.github/workflows/Build_Docker.yml:
--------------------------------------------------------------------------------
1 | name: Build Docker when Push
2 |
3 | on:
4 | push:
5 | branches:
6 | - "main"
7 |
8 | jobs:
9 | docker:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout
13 | uses: actions/checkout@v4
14 |
15 | - name: Set commit SHA
16 | run: echo "COMMIT_SHA=$(echo ${{ github.sha }} | cut -c 1-7)" >> ${GITHUB_ENV}
17 |
18 | - name: Set up QEMU
19 | uses: docker/setup-qemu-action@v2
20 |
21 | - name: Set up Docker Buildx
22 | uses: docker/setup-buildx-action@v3
23 |
24 | - name: Login to GitHub Container Registry
25 | uses: docker/login-action@v2
26 | with:
27 | registry: ghcr.io
28 | username: ${{ github.repository_owner }}
29 | password: ${{ secrets.MY_TOKEN }}
30 |
31 | - name: Owner names
32 | run: |
33 | GITOWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')
34 | echo "GITOWNER=$GITOWNER" >> ${GITHUB_ENV}
35 |
36 | - name: Build and export
37 | uses: docker/build-push-action@v5
38 | with:
39 | context: .
40 | platforms: linux/amd64,linux/arm64
41 | push: false
42 | tags: |
43 | ghcr.io/${{ env.GITOWNER }}/chuanhuchatgpt:latest
44 | ghcr.io/${{ env.GITOWNER }}/chuanhuchatgpt:${{ github.sha }}
45 | outputs: type=oci,dest=/tmp/myimage-${{ env.COMMIT_SHA }}.tar
46 |
47 | - name: Upload artifact
48 | uses: actions/upload-artifact@v3
49 | with:
50 | name: chuanhuchatgpt-${{ env.COMMIT_SHA }}
51 | path: /tmp/myimage-${{ env.COMMIT_SHA }}.tar
52 |
--------------------------------------------------------------------------------
/web_assets/javascript/localization.js:
--------------------------------------------------------------------------------
1 |
2 | // i18n
3 |
4 | const language = navigator.language.slice(0,2);
5 |
6 | var forView_i18n;
7 | var deleteConfirm_i18n_pref;
8 | var deleteConfirm_i18n_suff;
9 | var usingLatest_i18n;
10 | var updatingMsg_i18n;
11 | var updateSuccess_i18n;
12 | var updateFailure_i18n;
13 | var regenerate_i18n;
14 | var deleteRound_i18n;
15 | var renameChat_i18n;
16 | var validFileName_i18n;
17 | var clearFileHistoryMsg_i18n;
18 | var dropUploadMsg_i18n;
19 |
20 | function setLoclize() {
21 | forView_i18n = gradioApp().querySelector('#forView_i18n').innerText;
22 | deleteConfirm_i18n_pref = gradioApp().querySelector('#deleteConfirm_i18n_pref').innerText;
23 | deleteConfirm_i18n_suff = gradioApp().querySelector('#deleteConfirm_i18n_suff').innerText;
24 | usingLatest_i18n = gradioApp().querySelector('#usingLatest_i18n').innerText;
25 | updatingMsg_i18n = gradioApp().querySelector('#updatingMsg_i18n').innerText;
26 | updateSuccess_i18n = gradioApp().querySelector('#updateSuccess_i18n').innerText;
27 | updateFailure_i18n = gradioApp().querySelector('#updateFailure_i18n').innerText;
28 | regenerate_i18n = gradioApp().querySelector('#regenerate_i18n').innerText;
29 | deleteRound_i18n = gradioApp().querySelector('#deleteRound_i18n').innerText;
30 | renameChat_i18n = gradioApp().querySelector('#renameChat_i18n').innerText;
31 | validFileName_i18n = gradioApp().querySelector('#validFileName_i18n').innerText;
32 | clearFileHistoryMsg_i18n = gradioApp().querySelector('#clearFileHistoryMsg_i18n').innerText;
33 | dropUploadMsg_i18n = gradioApp().querySelector('#dropUploadMsg_i18n').innerText;
34 | }
35 |
36 | function i18n(msg) {
37 | return msg;
38 | // return msg.hasOwnProperty(language) ? msg[language] : msg['en'];
39 | }
40 |
--------------------------------------------------------------------------------
/.github/workflows/Release_docker.yml:
--------------------------------------------------------------------------------
1 | name: Build and Push Docker when Release
2 |
3 | on:
4 | release:
5 | types: [published]
6 | workflow_dispatch:
7 |
8 | jobs:
9 | docker:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout
13 | uses: actions/checkout@v3
14 | with:
15 | ref: ${{ github.event.release.target_commitish }}
16 |
17 | - name: Set release tag
18 | run: |
19 | echo "RELEASE_TAG=${{ github.event.release.tag_name }}" >> ${GITHUB_ENV}
20 |
21 | - name: Set up QEMU
22 | uses: docker/setup-qemu-action@v2
23 |
24 | - name: Set up Docker Buildx
25 | uses: docker/setup-buildx-action@v2
26 |
27 | - name: Login to Docker Hub
28 | uses: docker/login-action@v2
29 | with:
30 | username: ${{ secrets.DOCKERHUB_USERNAME }}
31 | password: ${{ secrets.DOCKERHUB_TOKEN }}
32 |
33 | - name: Login to GitHub Container Registry
34 | uses: docker/login-action@v2
35 | with:
36 | registry: ghcr.io
37 | username: ${{ github.repository_owner }}
38 | password: ${{ secrets.MY_TOKEN }}
39 |
40 | - name: Owner names
41 | run: |
42 | GITOWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')
43 | echo "GITOWNER=$GITOWNER" >> ${GITHUB_ENV}
44 |
45 | - name: Build and push
46 | uses: docker/build-push-action@v4
47 | with:
48 | context: .
49 | platforms: linux/amd64,linux/arm64
50 | push: true
51 | tags: |
52 | ghcr.io/${{ env.GITOWNER }}/chuanhuchatgpt:latest
53 | ghcr.io/${{ env.GITOWNER }}/chuanhuchatgpt:${{ env.RELEASE_TAG }}
54 | ${{ secrets.DOCKERHUB_USERNAME }}/chuanhuchatgpt:latest
55 | ${{ secrets.DOCKERHUB_USERNAME }}/chuanhuchatgpt:${{ env.RELEASE_TAG }}
56 |
--------------------------------------------------------------------------------
/modules/models/Groq.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import textwrap
4 | import uuid
5 |
6 | import os
7 | from groq import Groq
8 | import gradio as gr
9 | import PIL
10 | import requests
11 |
12 | from modules.presets import i18n
13 |
14 | from ..index_func import construct_index
15 | from ..utils import count_token, construct_system
16 | from .base_model import BaseLLMModel
17 |
18 |
19 | class Groq_Client(BaseLLMModel):
20 | def __init__(self, model_name, api_key, user_name="") -> None:
21 | super().__init__(
22 | model_name=model_name,
23 | user=user_name,
24 | config={
25 | "api_key": api_key
26 | }
27 | )
28 | self.client = Groq(
29 | api_key=os.environ.get("GROQ_API_KEY"),
30 | base_url=self.api_host,
31 | )
32 |
33 | def _get_groq_style_input(self):
34 | messages = [construct_system(self.system_prompt), *self.history]
35 | return messages
36 |
37 | def get_answer_at_once(self):
38 | messages = self._get_groq_style_input()
39 | chat_completion = self.client.chat.completions.create(
40 | messages=messages,
41 | model=self.model_name,
42 | )
43 | return chat_completion.choices[0].message.content, chat_completion.usage.total_tokens
44 |
45 |
46 | def get_answer_stream_iter(self):
47 | messages = self._get_groq_style_input()
48 | completion = self.client.chat.completions.create(
49 | model=self.model_name,
50 | messages=messages,
51 | temperature=self.temperature,
52 | max_tokens=self.max_generation_token,
53 | top_p=self.top_p,
54 | stream=True,
55 | stop=self.stop_sequence,
56 | )
57 |
58 | partial_text = ""
59 | for chunk in completion:
60 | partial_text += chunk.choices[0].delta.content or ""
61 | yield partial_text
62 |
--------------------------------------------------------------------------------
/web_assets/html/update.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | {current_version}
5 | {version_time}
6 |
7 |
8 | Latest Version: getting latest version...
10 |
11 |
12 | Getting update...
13 |
14 |
15 |
16 |
17 | Getting Release Note...
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/templates/5 日本語Prompts.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "act":"専門家",
4 | "prompt":"あなたは、プロの【その分野の専門家】です。\n以下の制約条件と入力文をもとに、【出力内容】を出力してください。\n\n# 制約条件:\n【前提条件や決まりごと】\n\n# 入力文:\n【期待する出力結果や大まかな指示】"
5 | },
6 | {
7 | "act":"要約",
8 | "prompt": "以下のテキストを要約し、最も重要なポイントを箇条書きにまとめてください。\n\nテキスト: 【テキスト】"
9 | },
10 | {
11 | "act":"キーワード抽出",
12 | "prompt":"以下のテキストからキーワードを抽出してください。\n\nテキスト:【テキスト】\n\nキーワード:"
13 | },
14 | {
15 | "act": "質問させる",
16 | "prompt": "【達成したいこと】を達成するために質問してください。\n\n- この条件が満たされるまで、またはこの目標を達成するために質問してください。\n- 質問項目は1つにしてください。\n- 日本語で質問してください。"
17 | },
18 | {
19 | "act": "英会話教師",
20 | "prompt": "あなたは私の英会話の相手として、ネイティブ話者として振る舞ってください。\n私の発言に対して、以下のフォーマットで1回に1つずつ回答します。\n説明は書かないでください。まとめて会話内容を書かないでください。\n\n#フォーマット:\n【修正】:\n{私の英文を自然な英語に直してください。lang:en}\n【理由】:\n{私の英文と、直した英文の差分で、重要なミスがある場合のみ、40文字以内で、日本語で指摘します。lang:ja}\n【返答】:\n{あなたの会話文です。1回に1つの会話のみ出力します。まずは、私の発言に相槌を打ち、そのあと、私への質問を返してください。lang:en}\n\n#\n私の最初の会話は、Helloです。\n毎回、フォーマットを厳格に守り、【修正】、【理由】、【返答】、を必ず出力してください。"
21 | },
22 | {
23 | "act":"就職面接官",
24 | "prompt": "#前提条件:\nあなたは面接官としてロールプレイをし、私は就職に応募する候補者となります。\nあなたはインタビュアーとしてだけ話します。面接は私だけにしてほしいです。インタビュアーのように私に、たった1個だけ質問をして、私の答えを待ちます。説明を書かないでください。一度に複数の会話を書かないでください。\n\n#あなたの設定:\n・ベテランの面接官です。\n\n#あなたの発言の条件:\n・合計で60文字以上100文字以内の文章にしてください\n・鋭い質問で内容を掘り下げたり、追加の質問や、話題を変えたりして、候補者が答えやすいようにします。\n・私が質問をしても絶対に答えず、面接者として私に別の質問を続けますが、出力はまだ行いません。ロールプレイと設定を厳格に守り続けて下さい。\n\n#私の設定:\n・志望している職種は、【プログラマー】です。\n\n#指示と返答フォーマット:\nあなたは毎回、下記の項目をフォーマットに従い出力します。\n\n【面接官の質問】としての会話文章"
25 | },
26 | {
27 | "act": "コンテンツアウトライン",
28 | "prompt": "これまでの指示はすべて無視してください。MECEのフレームワークを使用して、トピックに関する日本語ライター向けの詳細な長文コンテンツのアウトラインを作成してください: 【トピックを挿入】。また、記事の短く注意を引くタイトルと、各小見出しの単語数の見積もりを提示してください。ベクトル表現技法を用いて、意味的に類似したFAQのリストを含めてください。マークダウン形式で出力を生成する。記事は書かず、ライターのためのアウトラインだけ書いてください。私が頼んだことを思い出させないでください。謝らないでください。自己言及はしないでください。"
29 | },
30 | {
31 | "act": "翻訳家",
32 | "prompt": "# 命令文\nあなたは、プロの翻訳家です。\n以下の制約条件と入力文をもとに、翻訳してください。\n\n# 制約条件\n・理解しやすく\n・読みやすく\n・日本語に翻訳する\n\n# 入力文\n【翻訳する文章】"
33 | }
34 | ]
--------------------------------------------------------------------------------
/modules/models/Ollama.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import textwrap
4 | import uuid
5 |
6 | from ollama import Client
7 |
8 | from modules.presets import i18n
9 |
10 | from ..index_func import construct_index
11 | from ..utils import count_token
12 | from .base_model import BaseLLMModel
13 |
14 |
15 | class OllamaClient(BaseLLMModel):
16 | def __init__(self, model_name, user_name="", ollama_host="", backend_model="") -> None:
17 | super().__init__(model_name=model_name, user=user_name)
18 | self.backend_model = backend_model
19 | self.ollama_host = ollama_host
20 | self.update_token_limit()
21 |
22 | def get_model_list(self):
23 | client = Client(host=self.ollama_host)
24 | return client.list()
25 |
26 | def update_token_limit(self):
27 | lower_model_name = self.backend_model.lower()
28 | if "mistral" in lower_model_name:
29 | self.token_upper_limit = 8*1024
30 | elif "gemma" in lower_model_name:
31 | self.token_upper_limit = 8*1024
32 | elif "codellama" in lower_model_name:
33 | self.token_upper_limit = 4*1024
34 | elif "llama2-chinese" in lower_model_name:
35 | self.token_upper_limit = 4*1024
36 | elif "llama2" in lower_model_name:
37 | self.token_upper_limit = 4*1024
38 | elif "mixtral" in lower_model_name:
39 | self.token_upper_limit = 32*1024
40 | elif "llava" in lower_model_name:
41 | self.token_upper_limit = 4*1024
42 |
43 | def get_answer_stream_iter(self):
44 | if self.backend_model == "":
45 | return i18n("请先选择Ollama后端模型\n\n")
46 | client = Client(host=self.ollama_host)
47 | response = client.chat(model=self.backend_model, messages=self.history,stream=True)
48 | partial_text = ""
49 | for i in response:
50 | response = i['message']['content']
51 | partial_text += response
52 | yield partial_text
53 | self.all_token_counts[-1] = count_token(partial_text)
54 | yield partial_text
55 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/report-others.yml:
--------------------------------------------------------------------------------
1 | name: 其他错误
2 | description: "报告其他问题(如 Hugging Face 中的 Space 等)"
3 | title: "[其他]: "
4 | labels: ["question"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | 感谢提交 issue! 请尽可能完整填写以下信息,帮助我们更好地定位问题~
10 | **在一切开始之前,请确保您已经阅读过 [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) 页面**,查看它是否已经对您的问题做出了解答。
11 | 如果没有,请检索 [issue](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues) 与 [discussion](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions) ,查看有没有相同或类似的问题。
12 |
13 | ------
14 | - type: checkboxes
15 | attributes:
16 | label: 是否已存在现有反馈与解答?
17 | description: 请搜索issue、discussion和[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)以查看您想报告的issue是否已存在。
18 | options:
19 | - label: 我确认没有已有issue或discussion,且已阅读**常见问题**。
20 | required: true
21 | - type: textarea
22 | id: what-happened
23 | attributes:
24 | label: 错误描述
25 | description: 请描述您遇到的错误或问题。
26 | 提示:如果可以,也请提供错误的截图,如本地部署的网页截图与终端错误报告的截图。
27 | 如果可以,也请提供`.json`格式的对话记录。
28 | placeholder: 发生什么事了?
29 | validations:
30 | required: true
31 | - type: textarea
32 | attributes:
33 | label: 复现操作
34 | description: 你之前干了什么,然后出现了错误呢?
35 | placeholder: |
36 | 1. 正常完成本地部署
37 | 2. 选取GPT3.5-turbo模型,正确填写API
38 | 3. 在对话框中要求 ChatGPT “以LaTeX格式输出三角函数”
39 | 4. ChatGPT 输出部分内容后程序被自动终止
40 | validations:
41 | required: true
42 | - type: textarea
43 | id: logs
44 | attributes:
45 | label: 错误日志
46 | description: 请将终端中的主要错误报告粘贴至此处。
47 | render: shell
48 | - type: textarea
49 | attributes:
50 | label: 运行环境
51 | description: |
52 | 网页底部会列出您运行环境的版本信息,请务必填写。以下是一个例子:
53 | - **OS**: Windows11 22H2
54 | - **Browser**: Chrome
55 | - **Gradio version**: 3.22.1
56 | - **Python version**: 3.11.1
57 | value: |
58 | - OS:
59 | - Browser:
60 | - Gradio version:
61 | - Python version:
62 | (或您的其他运行环境信息)
63 | validations:
64 | required: false
65 | - type: textarea
66 | attributes:
67 | label: 补充说明
68 | description: 链接?参考资料?任何更多背景信息!
--------------------------------------------------------------------------------
/web_assets/javascript/user-info.js:
--------------------------------------------------------------------------------
1 |
2 | // var userLogged = false;
3 | var usernameGotten = false;
4 | var usernameTmp = null;
5 | var username = null;
6 |
7 |
8 | function getUserInfo() {
9 | if (usernameGotten) {
10 | return;
11 | }
12 | // userLogged = localStorage.getItem('userLogged');
13 | // if (userLogged) {
14 | usernameTmp = userInfoDiv.innerText;
15 | if (usernameTmp) {
16 | if (usernameTmp.includes("getting user info")) {
17 | setTimeout(getUserInfo, 500);
18 | return;
19 | } else if (usernameTmp === " ") {
20 | localStorage.removeItem("username");
21 | // localStorage.removeItem("userLogged")
22 | // userLogged = false;
23 | usernameGotten = true;
24 | return;
25 | } else {
26 | usernameTmp = usernameTmp.match(/User:\s*(.*)/)[1] || usernameTmp;
27 | localStorage.setItem("username", usernameTmp);
28 | username = usernameTmp;
29 | usernameGotten = true;
30 | clearHistoryHtml();
31 | }
32 | }
33 | // }
34 | }
35 |
36 | function showOrHideUserInfo() {
37 | function toggleUserInfoVisibility(shouldHide) {
38 | if (userInfoDiv) {
39 | if (shouldHide) {
40 | userInfoDiv.classList.add("info-transparent");
41 | } else {
42 | userInfoDiv.classList.remove("info-transparent");
43 | }
44 | }
45 | }
46 |
47 | // When webpage loaded, hide user info after 2 second
48 | setTimeout(function () {
49 | toggleUserInfoVisibility(true);
50 | }, 2000);
51 |
52 | // let triggerElements = {appTitleDiv, userInfoDiv, sendBtn};
53 | let triggerElements = {userInfoDiv, statusDisplay};
54 | for (let elem in triggerElements) {
55 | triggerElements[elem].addEventListener("mouseenter", function () {
56 | toggleUserInfoVisibility(false);
57 | });
58 | triggerElements[elem].addEventListener("mouseleave", function () {
59 | toggleUserInfoVisibility(true);
60 | });
61 | triggerElements[elem].ontouchstart = function () {
62 | toggleUserInfoVisibility(false);
63 | };
64 | triggerElements[elem].ontouchend = function () {
65 | setTimeout(function () {
66 | toggleUserInfoVisibility(true);
67 | }, 3000);
68 | };
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/report-bug.yml:
--------------------------------------------------------------------------------
1 | name: 报告BUG
2 | description: "报告一个bug,且您确信这是bug而不是您的问题"
3 | title: "[Bug]: "
4 | labels: ["bug"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | 感谢提交 issue! 请尽可能完整填写以下信息,帮助我们更好地定位问题~
10 | **在一切开始之前,请确保您已经阅读过 [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) 页面**。
11 | 如果您确信这是一个我们的 bug,而不是因为您的原因部署失败,欢迎提交该issue!
12 | 如果您不能确定这是bug还是您的问题,请选择 [其他类型的issue模板](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues/new/choose)。
13 |
14 | ------
15 | - type: checkboxes
16 | attributes:
17 | label: 这个bug是否已存在现有issue了?
18 | description: 请搜索全部issue和[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)以查看您想报告的issue是否已存在。
19 | options:
20 | - label: 我确认没有已有issue,且已阅读**常见问题**。
21 | required: true
22 | - type: textarea
23 | id: what-happened
24 | attributes:
25 | label: 错误表现
26 | description: 请描述您遇到的bug。
27 | 提示:如果可以,也请提供错误的截图,如本地部署的网页截图与终端错误报告的截图。
28 | 如果可以,也请提供`.json`格式的对话记录。
29 | placeholder: 发生什么事了?
30 | validations:
31 | required: true
32 | - type: textarea
33 | attributes:
34 | label: 复现操作
35 | description: 你之前干了什么,然后出现了bug呢?
36 | placeholder: |
37 | 1. 正常完成本地部署
38 | 2. 选取GPT3.5-turbo模型,正确填写API
39 | 3. 在对话框中要求 ChatGPT “以LaTeX格式输出三角函数”
40 | 4. ChatGPT 输出部分内容后程序被自动终止
41 | validations:
42 | required: true
43 | - type: textarea
44 | id: logs
45 | attributes:
46 | label: 错误日志
47 | description: 请将终端中的主要错误报告粘贴至此处。
48 | render: shell
49 | - type: textarea
50 | attributes:
51 | label: 运行环境
52 | description: |
53 | 网页底部会列出您运行环境的版本信息,请务必填写。以下是一个例子:
54 | - **OS**: Windows11 22H2
55 | - **Browser**: Chrome
56 | - **Gradio version**: 3.22.1
57 | - **Python version**: 3.11.1
58 | value: |
59 | - OS:
60 | - Browser:
61 | - Gradio version:
62 | - Python version:
63 | validations:
64 | required: false
65 | - type: checkboxes
66 | attributes:
67 | label: 帮助解决
68 | description: 如果您能够并愿意协助解决该问题,向我们提交一个pull request,那再好不过了!
69 | 参考:[贡献指南](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
70 | options:
71 | - label: 我愿意协助解决!
72 | required: false
73 | - type: textarea
74 | attributes:
75 | label: 补充说明
76 | description: 链接?参考资料?任何更多背景信息!
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/report-server.yml:
--------------------------------------------------------------------------------
1 | name: 服务器部署错误
2 | description: "报告在远程服务器上部署时的问题或错误"
3 | title: "[远程部署]: "
4 | labels: ["question","server deployment"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | 感谢提交 issue! 请尽可能完整填写以下信息,帮助我们更好地定位问题~
10 | **在一切开始之前,请确保您已经阅读过 [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) 页面**,查看它是否已经对您的问题做出了解答。
11 | 如果没有,请检索 [issue](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues) 与 [discussion](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions) ,查看有没有相同或类似的问题。
12 |
13 | ------
14 | - type: checkboxes
15 | attributes:
16 | label: 是否已存在现有反馈与解答?
17 | description: 请搜索issue、discussion和[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)以查看您想报告的issue是否已存在。
18 | options:
19 | - label: 我确认没有已有issue或discussion,且已阅读**常见问题**。
20 | required: true
21 | - type: checkboxes
22 | attributes:
23 | label: 是否是一个代理配置相关的疑问?
24 | description: 请不要提交代理配置相关的issue。如有疑问请前往 [讨论区](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions)。
25 | options:
26 | - label: 我确认这不是一个代理配置相关的疑问。
27 | required: true
28 | - type: textarea
29 | id: what-happened
30 | attributes:
31 | label: 错误描述
32 | description: 请描述您遇到的错误或问题。
33 | 提示:如果可以,也请提供错误的截图,如本地部署的网页截图与终端错误报告的截图。
34 | 如果可以,也请提供`.json`格式的对话记录。
35 | placeholder: 发生什么事了?
36 | validations:
37 | required: true
38 | - type: textarea
39 | attributes:
40 | label: 复现操作
41 | description: 你之前干了什么,然后出现了错误呢?
42 | placeholder: |
43 | 1. 正常完成本地部署
44 | 2. 选取GPT3.5-turbo模型,正确填写API
45 | 3. 在对话框中要求 ChatGPT “以LaTeX格式输出三角函数”
46 | 4. ChatGPT 输出部分内容后程序被自动终止
47 | validations:
48 | required: true
49 | - type: textarea
50 | id: logs
51 | attributes:
52 | label: 错误日志
53 | description: 请将终端中的主要错误报告粘贴至此处。
54 | render: shell
55 | - type: textarea
56 | attributes:
57 | label: 运行环境
58 | description: |
59 | 网页底部会列出您运行环境的版本信息,请务必填写。以下是一个例子:
60 | - **OS**: Windows11 22H2
61 | - **Docker version**: 1.8.2
62 | - **Gradio version**: 3.22.1
63 | - **Python version**: 3.11.1
64 | value: |
65 | - OS:
66 | - Server:
67 | - Gradio version:
68 | - Python version:
69 | validations:
70 | required: false
71 | - type: textarea
72 | attributes:
73 | label: 补充说明
74 | description: 链接?参考资料?任何更多背景信息!
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/report-docker.yml:
--------------------------------------------------------------------------------
1 | name: Docker部署错误
2 | description: "报告使用 Docker 部署时的问题或错误"
3 | title: "[Docker]: "
4 | labels: ["question","docker deployment"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | 感谢提交 issue! 请尽可能完整填写以下信息,帮助我们更好地定位问题~
10 | **在一切开始之前,请确保您已经阅读过 [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) 页面**,查看它是否已经对您的问题做出了解答。
11 | 如果没有,请检索 [issue](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues) 与 [discussion](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions) ,查看有没有相同或类似的问题。
12 |
13 | ------
14 | - type: checkboxes
15 | attributes:
16 | label: 是否已存在现有反馈与解答?
17 | description: 请搜索issue、discussion和[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)以查看您想报告的issue是否已存在。
18 | options:
19 | - label: 我确认没有已有issue或discussion,且已阅读**常见问题**。
20 | required: true
21 | - type: checkboxes
22 | attributes:
23 | label: 是否是一个代理配置相关的疑问?
24 | description: 请不要提交代理配置相关的issue。如有疑问请前往 [讨论区](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions)。
25 | options:
26 | - label: 我确认这不是一个代理配置相关的疑问。
27 | required: true
28 | - type: textarea
29 | id: what-happened
30 | attributes:
31 | label: 错误描述
32 | description: 请描述您遇到的错误或问题。
33 | 提示:如果可以,也请提供错误的截图,如本地部署的网页截图与终端错误报告的截图。
34 | 如果可以,也请提供`.json`格式的对话记录。
35 | placeholder: 发生什么事了?
36 | validations:
37 | required: true
38 | - type: textarea
39 | attributes:
40 | label: 复现操作
41 | description: 你之前干了什么,然后出现了错误呢?
42 | placeholder: |
43 | 1. 正常完成本地部署
44 | 2. 选取GPT3.5-turbo模型,正确填写API
45 | 3. 在对话框中要求 ChatGPT “以LaTeX格式输出三角函数”
46 | 4. ChatGPT 输出部分内容后程序被自动终止
47 | validations:
48 | required: true
49 | - type: textarea
50 | id: logs
51 | attributes:
52 | label: 错误日志
53 | description: 请将终端中的主要错误报告粘贴至此处。
54 | render: shell
55 | - type: textarea
56 | attributes:
57 | label: 运行环境
58 | description: |
59 | 网页底部会列出您运行环境的版本信息,请务必填写。以下是一个例子:
60 | - **OS**: Linux/amd64
61 | - **Docker version**: 1.8.2
62 | - **Gradio version**: 3.22.1
63 | - **Python version**: 3.11.1
64 | value: |
65 | - OS:
66 | - Docker version:
67 | - Gradio version:
68 | - Python version:
69 | validations:
70 | required: false
71 | - type: textarea
72 | attributes:
73 | label: 补充说明
74 | description: 链接?参考资料?任何更多背景信息!
--------------------------------------------------------------------------------
/modules/webui_locale.py:
--------------------------------------------------------------------------------
1 | import os
2 | import locale
3 | import logging
4 | import commentjson as json
5 |
6 | class I18nAuto:
7 | def __init__(self):
8 | if os.path.exists("config.json"):
9 | with open("config.json", "r", encoding='utf-8') as f:
10 | config = json.load(f)
11 | else:
12 | config = {}
13 | language = config.get("language", "auto")
14 | language = os.environ.get("LANGUAGE", language)
15 | language = language.replace("-", "_")
16 | if language == "auto":
17 | language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
18 | self.language = language
19 | self.language_map = {}
20 | self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
21 | if self.file_is_exists:
22 | with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
23 | self.language_map.update(json.load(f))
24 | else:
25 | logging.warning(f"Language file for {language} does not exist. Using English instead.")
26 | logging.warning(f"Available languages: {', '.join([x[:-5] for x in os.listdir('./locale')])}")
27 | with open(f"./locale/en_US.json", "r", encoding="utf-8") as f:
28 | self.language_map.update(json.load(f))
29 | with open(f"./locale/en_US.json", "r", encoding="utf-8") as f:
30 | # fallback to English
31 | self.fallback_language_map = json.load(f)
32 |
33 | def change_language(self, language):
34 | language = language.replace("-", "_")
35 | self.language_map = {}
36 | self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
37 | if self.file_is_exists:
38 | with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
39 | self.language_map.update(json.load(f))
40 | else:
41 | logging.warning(f"Language file for {language} does not exist. Using English instead.")
42 | logging.warning(f"Available languages: {', '.join([x[:-5] for x in os.listdir('./locale')])}")
43 | with open(f"./locale/en_US.json", "r", encoding="utf-8") as f:
44 | self.language_map.update(json.load(f))
45 |
46 | def __call__(self, key):
47 | if self.file_is_exists and key in self.language_map:
48 | return self.language_map[key]
49 | elif key in self.fallback_language_map and self.language != "zh_CN":
50 | return self.fallback_language_map[key]
51 | else:
52 | return key
53 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/report-localhost.yml:
--------------------------------------------------------------------------------
1 | name: 本地部署错误
2 | description: "报告本地部署时的问题或错误(小白首选)"
3 | title: "[本地部署]: "
4 | labels: ["question","localhost deployment"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | 感谢提交 issue! 请尽可能完整填写以下信息,帮助我们更好地定位问题~
10 | **在一切开始之前,请确保您已经阅读过 [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) 页面**,查看它是否已经对您的问题做出了解答。
11 | 如果没有,请检索 [issue](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues) 与 [discussion](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions) ,查看有没有相同或类似的问题。
12 |
13 | **另外,请不要再提交 `Something went wrong Expecting value: line 1 column 1 (char 0)` 和 代理配置 相关的问题,请再看一遍 [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) 页,实在不行请前往 discussion。**
14 |
15 | ------
16 | - type: checkboxes
17 | attributes:
18 | label: 是否已存在现有反馈与解答?
19 | description: 请搜索issue、discussion和[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)以查看您想报告的issue是否已存在。
20 | options:
21 | - label: 我确认没有已有issue或discussion,且已阅读**常见问题**。
22 | required: true
23 | - type: checkboxes
24 | attributes:
25 | label: 是否是一个代理配置相关的疑问?
26 | description: 请不要提交代理配置相关的issue。如有疑问请前往 [讨论区](https://github.com/GaiZhenbiao/ChuanhuChatGPT/discussions)。
27 | options:
28 | - label: 我确认这不是一个代理配置相关的疑问。
29 | required: true
30 | - type: textarea
31 | id: what-happened
32 | attributes:
33 | label: 错误描述
34 | description: 请描述您遇到的错误或问题。
35 | 提示:如果可以,也请提供错误的截图,如本地部署的网页截图与终端错误报告的截图。
36 | 如果可以,也请提供`.json`格式的对话记录。
37 | placeholder: 发生什么事了?
38 | validations:
39 | required: true
40 | - type: textarea
41 | attributes:
42 | label: 复现操作
43 | description: 你之前干了什么,然后出现了错误呢?
44 | placeholder: |
45 | 1. 正常完成本地部署
46 | 2. 选取GPT3.5-turbo模型,正确填写API
47 | 3. 在对话框中要求 ChatGPT “以LaTeX格式输出三角函数”
48 | 4. ChatGPT 输出部分内容后程序被自动终止
49 | validations:
50 | required: true
51 | - type: textarea
52 | id: logs
53 | attributes:
54 | label: 错误日志
55 | description: 请将终端中的主要错误报告粘贴至此处。
56 | render: shell
57 | - type: textarea
58 | attributes:
59 | label: 运行环境
60 | description: |
61 | 网页底部会列出您运行环境的版本信息,请务必填写。以下是一个例子:
62 | - **OS**: Windows11 22H2
63 | - **Browser**: Chrome
64 | - **Gradio version**: 3.22.1
65 | - **Python version**: 3.11.1
66 | value: |
67 | - OS:
68 | - Browser:
69 | - Gradio version:
70 | - Python version:
71 | render: markdown
72 | validations:
73 | required: false
74 | - type: textarea
75 | attributes:
76 | label: 补充说明
77 | description: 链接?参考资料?任何更多背景信息!
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 | history/
30 | index/
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | .python-version
88 |
89 | # pipenv
90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
93 | # install all needed dependencies.
94 | #Pipfile.lock
95 |
96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97 | __pypackages__/
98 |
99 | # Celery stuff
100 | celerybeat-schedule
101 | celerybeat.pid
102 |
103 | # SageMath parsed files
104 | *.sage.py
105 |
106 | # Environments
107 | .env
108 | .venv
109 | env/
110 | venv/
111 | ENV/
112 | env.bak/
113 | venv.bak/
114 |
115 | # Spyder project settings
116 | .spyderproject
117 | .spyproject
118 |
119 | # Rope project settings
120 | .ropeproject
121 |
122 | # mkdocs documentation
123 | /site
124 |
125 | # mypy
126 | .mypy_cache/
127 | .dmypy.json
128 | dmypy.json
129 |
130 | # Pyre type checker
131 | .pyre/
132 |
133 | # Mac system file
134 | **/.DS_Store
135 |
136 | #vscode
137 | .vscode
138 |
139 | # 配置文件/模型文件
140 | api_key.txt
141 | config.json
142 | auth.json
143 | .models/
144 | models/*
145 | lora/
146 | .idea
147 | templates/*
148 | files/
149 | tmp/
150 |
151 | scripts/
152 | include/
153 | pyvenv.cfg
154 |
155 | create_release.sh
156 |
--------------------------------------------------------------------------------
/modules/models/Qwen.py:
--------------------------------------------------------------------------------
1 | from transformers import AutoModelForCausalLM, AutoTokenizer
2 | import os
3 | from transformers.generation import GenerationConfig
4 | import logging
5 | import colorama
6 | from .base_model import BaseLLMModel
7 | from ..presets import MODEL_METADATA
8 |
9 |
10 | class Qwen_Client(BaseLLMModel):
11 | def __init__(self, model_name, user_name="") -> None:
12 | super().__init__(model_name=model_name, user=user_name)
13 | model_source = None
14 | if os.path.exists("models"):
15 | model_dirs = os.listdir("models")
16 | if model_name in model_dirs:
17 | model_source = f"models/{model_name}"
18 | if model_source is None:
19 | try:
20 | model_source = MODEL_METADATA[model_name]["repo_id"]
21 | except KeyError:
22 | model_source = model_name
23 | self.tokenizer = AutoTokenizer.from_pretrained(model_source, trust_remote_code=True, resume_download=True)
24 | self.model = AutoModelForCausalLM.from_pretrained(model_source, device_map="cuda", trust_remote_code=True, resume_download=True).eval()
25 |
26 | def generation_config(self):
27 | return GenerationConfig.from_dict({
28 | "chat_format": "chatml",
29 | "do_sample": True,
30 | "eos_token_id": 151643,
31 | "max_length": self.token_upper_limit,
32 | "max_new_tokens": 512,
33 | "max_window_size": 6144,
34 | "pad_token_id": 151643,
35 | "top_k": 0,
36 | "top_p": self.top_p,
37 | "transformers_version": "4.33.2",
38 | "trust_remote_code": True,
39 | "temperature": self.temperature,
40 | })
41 |
42 | def _get_glm_style_input(self):
43 | history = [x["content"] for x in self.history]
44 | query = history.pop()
45 | logging.debug(colorama.Fore.YELLOW +
46 | f"{history}" + colorama.Fore.RESET)
47 | assert (
48 | len(history) % 2 == 0
49 | ), f"History should be even length. current history is: {history}"
50 | history = [[history[i], history[i + 1]]
51 | for i in range(0, len(history), 2)]
52 | return history, query
53 |
54 | def get_answer_at_once(self):
55 | history, query = self._get_glm_style_input()
56 | self.model.generation_config = self.generation_config()
57 | response, history = self.model.chat(self.tokenizer, query, history=history)
58 | return response, len(response)
59 |
60 | def get_answer_stream_iter(self):
61 | history, query = self._get_glm_style_input()
62 | self.model.generation_config = self.generation_config()
63 | for response in self.model.chat_stream(
64 | self.tokenizer,
65 | query,
66 | history,
67 | ):
68 | yield response
69 |
--------------------------------------------------------------------------------
/modules/shared.py:
--------------------------------------------------------------------------------
1 | from modules.presets import CHAT_COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST, OPENAI_API_BASE, IMAGES_COMPLETION_URL
2 | import os
3 | import queue
4 | import openai
5 |
6 | def format_openai_host(api_host: str):
7 | api_host = api_host.rstrip("/")
8 | if not api_host.startswith("http"):
9 | api_host = f"https://{api_host}"
10 | if api_host.endswith("/v1"):
11 | api_host = api_host[:-3]
12 | chat_completion_url = f"{api_host}/v1/chat/completions"
13 | images_completion_url = f"{api_host}/v1/images/generations"
14 | openai_api_base = f"{api_host}/v1"
15 | balance_api_url = f"{api_host}/dashboard/billing/credit_grants"
16 | usage_api_url = f"{api_host}/dashboard/billing/usage"
17 | return chat_completion_url, images_completion_url, openai_api_base, balance_api_url, usage_api_url
18 |
19 | class State:
20 | interrupted = False
21 | multi_api_key = False
22 | chat_completion_url = CHAT_COMPLETION_URL
23 | balance_api_url = BALANCE_API_URL
24 | usage_api_url = USAGE_API_URL
25 | openai_api_base = OPENAI_API_BASE
26 | images_completion_url = IMAGES_COMPLETION_URL
27 | api_host = API_HOST
28 |
29 | def interrupt(self):
30 | self.interrupted = True
31 |
32 | def recover(self):
33 | self.interrupted = False
34 |
35 | def set_api_host(self, api_host: str):
36 | self.api_host = api_host
37 | self.chat_completion_url, self.images_completion_url, self.openai_api_base, self.balance_api_url, self.usage_api_url = format_openai_host(api_host)
38 | os.environ["OPENAI_API_BASE"] = self.openai_api_base
39 |
40 | def reset_api_host(self):
41 | self.chat_completion_url = CHAT_COMPLETION_URL
42 | self.images_completion_url = IMAGES_COMPLETION_URL
43 | self.balance_api_url = BALANCE_API_URL
44 | self.usage_api_url = USAGE_API_URL
45 | self.api_host = API_HOST
46 | os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}"
47 | return API_HOST
48 |
49 | def reset_all(self):
50 | self.interrupted = False
51 | self.chat_completion_url = CHAT_COMPLETION_URL
52 |
53 | def set_api_key_queue(self, api_key_list):
54 | self.multi_api_key = True
55 | self.api_key_queue = queue.Queue()
56 | for api_key in api_key_list:
57 | self.api_key_queue.put(api_key)
58 |
59 | def switching_api_key(self, func):
60 | if not hasattr(self, "api_key_queue"):
61 | return func
62 |
63 | def wrapped(*args, **kwargs):
64 | api_key = self.api_key_queue.get()
65 | args[0].api_key = api_key
66 | ret = func(*args, **kwargs)
67 | self.api_key_queue.put(api_key)
68 | return ret
69 |
70 | return wrapped
71 |
72 |
73 | state = State()
74 |
75 | modules_path = os.path.dirname(os.path.realpath(__file__))
76 | chuanhu_path = os.path.dirname(modules_path)
77 | assets_path = os.path.join(chuanhu_path, "web_assets")
--------------------------------------------------------------------------------
/locale/zh_CN.json:
--------------------------------------------------------------------------------
1 | {
2 | "gpt3.5turbo_description": "GPT-3.5 Turbo 是由 OpenAI 开发的一款仅限文本的大型语言模型。它基于 GPT-3 模型,并已经在大量数据上进行了微调。最新版本的 GPT-3.5 Turbo 进行了性能和精度优化,支持最大 16k tokens 的上下文窗口和最大 4096 tokens 的响应长度。此模型始终使用可用的最新版本的 GPT-3.5 Turbo。",
3 | "gpt3.5turbo_instruct_description": "GPT3.5 Turbo Instruct 是 OpenAI 开发的文本补全模型,具有与 GPT-3 时代模型相似的功能。它兼容旧版的 Completions 端点,但不兼容 Chat Completions。该模型的上下文窗口为 4096 个 tokens。",
4 | "gpt3.5turbo_16k_description": "旧版的 GPT-3.5 Turbo 模型,具有 16k tokens 的上下文窗口。",
5 | "gpt4_description": "GPT-4 是 OpenAI 开发的一款仅限文本的大型语言模型。它具有 8192 个 tokens 的上下文窗口和 4096 个 tokens 的最大响应长度。该模型始终使用可用的最新版本的 GPT-4。建议使用 GPT-4 Turbo 以获得更好的性能、更快的速度和更低的成本。",
6 | "gpt4_32k_description": "GPT-4 32K 是 OpenAI 开发的一个仅限文本的大型语言模型。它具有 32,000 tokens 的上下文窗口和 4,096 tokens 的最大响应长度。这个模型从未广泛推出,建议使用 GPT-4 Turbo。",
7 | "gpt4turbo_description": "GPT-4 Turbo 是由 OpenAI 开发的一款多模态大型语言模型。它在广泛的自然语言处理任务上提供最先进的性能,包括文本生成、翻译、摘要、视觉问题回答等。GPT-4 Turbo 拥有最大 128k tokens 的上下文窗口和最大 4096 tokens 的响应长度。此模型始终使用可用的最新版本的 GPT-4 Turbo。",
8 | "claude3_haiku_description": "Claude3 Haiku 是由 Anthropic 开发的一款多模态大型语言模型。它是 Claude 3 模型家族中最快、最紧凑的模型,旨在实现近乎即时的响应速度,但是性能不如 Sonnet 和 Opus。Claude3 Haiku 有最大 200k tokens 的上下文窗口和最大 4096 tokens 的响应长度。此模型始终使用可用的最新版本的 Claude3 Haiku。",
9 | "claude3_sonnet_description": "Claude3 Sonnet 是由 Anthropic 开发的一款多模态大型语言模型。它在智能与速度之间保持最佳平衡,适用于企业工作负载和大规模 AI 部署。Claude3 Sonnet 拥有最大 200k tokens 的上下文窗口和最大 4096 tokens 的响应长度。此模型始终使用可用的最新版本的 Claude3 Sonnet。",
10 | "claude3_opus_description": "Claude3 Opus 是由 Anthropic 开发的一款多模态大型语言模型。它是 Claude 3 模型家族中最智能、最大的模型,能够在高度复杂的任务上呈现最顶尖的性能,呈现出类似人类的理解能力。Claude3 Opus 拥有最大 200k tokens 的上下文窗口和最大 4096 tokens 的响应长度。此模型始终使用可用的最新版本的 Claude3 Opus。",
11 | "groq_llama3_8b_description": "采用 [Groq](https://console.groq.com/) 的 LLaMA 3 8B。Groq 是一个非常快速的语言模型推理服务。",
12 | "groq_llama3_70b_description": "采用 [Groq](https://console.groq.com/) 的 LLaMA 3 70B。Groq 是一个非常快速的语言模型推理服务。",
13 | "groq_mixtral_8x7b_description": "采用 [Groq](https://console.groq.com/) 的 Mixtral 8x7B。Groq 是一个非常快速的语言模型推理服务。",
14 | "groq_gemma_7b_description": "采用 [Groq](https://console.groq.com/) 的 Gemma 7B。Groq 是一个非常快速的语言模型推理服务。",
15 | "chuanhu_description": "一个能使用多种工具解决复杂问题的智能体。",
16 | "gpt_default_slogan": "今天能帮您些什么?",
17 | "claude_default_slogan": "我能帮您什么忙?",
18 | "chuanhu_slogan": "川虎今天能帮你做些什么?",
19 | "chuanhu_question_1": "今天杭州天气如何?",
20 | "chuanhu_question_2": "最近 Apple 发布了什么新品?",
21 | "chuanhu_question_3": "现在显卡的价格如何?",
22 | "chuanhu_question_4": "TikTok 上有什么新梗?",
23 | "gpt4o_description": "OpenAI 的最先进的多模态旗舰模型,比 GPT-4 Turbo 更便宜、更快。",
24 | "gpt4omini_description": "OpenAI 的经济实惠且智能的小型模型,适用于快速、轻量级任务。",
25 | "gpt5_description": "跨领域编码与智能体任务的最佳模型。支持 400,000 token 上下文,单次可输出至多 128,000 token。",
26 | "gpt5mini_description": "面向明确任务的更快、更具性价比的 GPT-5 版本。支持 400,000 token 上下文,单次可输出至多 128,000 token。",
27 | "gpt5nano_description": "速度最快、性价比最高的 GPT-5 版本。支持 400,000 token 上下文,单次可输出至多 128,000 token。",
28 | "o1_description": "o1 系列的大型语言模型通过强化学习训练,能够执行复杂的推理任务。o1 模型在回答之前会进行思考,产生一长串内部思维链,然后再回应用户。",
29 | "no_permission_to_update_description": "你没有权限更新。请联系管理员。管理员的配置方式为在配置文件 config.json 中的 admin_list 中添加用户名。"
30 | }
--------------------------------------------------------------------------------
/modules/models/DALLE3.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from .base_model import BaseLLMModel
3 | from .. import shared
4 | import requests
5 | from ..presets import *
6 | from ..config import retrieve_proxy, sensitive_id
7 |
8 | class OpenAI_DALLE3_Client(BaseLLMModel):
9 | def __init__(self, model_name, api_key, user_name="") -> None:
10 | super().__init__(model_name=model_name, user=user_name, config={"api_key": api_key})
11 | if self.api_host is not None:
12 | self.chat_completion_url, self.images_completion_url, self.openai_api_base, self.balance_api_url, self.usage_api_url = shared.format_openai_host(self.api_host)
13 | else:
14 | self.api_host, self.chat_completion_url, self.images_completion_url, self.openai_api_base, self.balance_api_url, self.usage_api_url = shared.state.api_host, shared.state.chat_completion_url, shared.state.images_completion_url, shared.state.openai_api_base, shared.state.balance_api_url, shared.state.usage_api_url
15 | self._refresh_header()
16 |
17 | def _get_dalle3_prompt(self):
18 | prompt = self.history[-1]["content"]
19 | if prompt.endswith("--raw"):
20 | prompt = "I NEED to test how the tool works with extremely simple prompts. DO NOT add any detail, just use it AS-IS:" + prompt
21 | return prompt
22 |
23 | def get_answer_at_once(self, stream=False):
24 | prompt = self._get_dalle3_prompt()
25 | headers = {
26 | "Content-Type": "application/json",
27 | "Authorization": f"Bearer {self.api_key}"
28 | }
29 | payload = {
30 | "model": self.model_name,
31 | "prompt": prompt,
32 | "n": 1,
33 | "size": "1024x1024",
34 | "quality": "standard",
35 | }
36 | if stream:
37 | timeout = TIMEOUT_STREAMING
38 | else:
39 | timeout = TIMEOUT_ALL
40 |
41 | if self.images_completion_url != IMAGES_COMPLETION_URL:
42 | logging.debug(f"使用自定义API URL: {self.images_completion_url}")
43 |
44 | with retrieve_proxy():
45 | try:
46 | response = requests.post(
47 | self.images_completion_url,
48 | headers=headers,
49 | json=payload,
50 | stream=stream,
51 | timeout=timeout,
52 | )
53 | response.raise_for_status() # 根据HTTP状态码引发异常
54 | response_data = response.json()
55 | image_url = response_data['data'][0]['url']
56 | img_tag = f'
'
57 | revised_prompt = response_data['data'][0].get('revised_prompt', '')
58 | return img_tag + revised_prompt, 0
59 | except requests.exceptions.RequestException as e:
60 | return str(e), 0
61 |
62 | def _refresh_header(self):
63 | self.headers = {
64 | "Content-Type": "application/json",
65 | "Authorization": f"Bearer {sensitive_id}",
66 | }
--------------------------------------------------------------------------------
/modules/models/LLaMA.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import os
5 | from llama_cpp import Llama
6 |
7 | from ..index_func import *
8 | from ..presets import *
9 | from ..utils import *
10 | from .base_model import BaseLLMModel, download
11 |
12 | SYS_PREFIX = "<>\n"
13 | SYS_POSTFIX = "\n<>\n\n"
14 | INST_PREFIX = "[INST] "
15 | INST_POSTFIX = " "
16 | OUTPUT_PREFIX = "[/INST] "
17 | OUTPUT_POSTFIX = ""
18 |
19 |
20 | class LLaMA_Client(BaseLLMModel):
21 | def __init__(self, model_name, lora_path=None, user_name="") -> None:
22 | super().__init__(model_name=model_name, user=user_name)
23 |
24 | self.max_generation_token = 1000
25 | if model_name in MODEL_METADATA:
26 | path_to_model = download(
27 | MODEL_METADATA[model_name]["repo_id"],
28 | MODEL_METADATA[model_name]["filelist"][0],
29 | )
30 | else:
31 | dir_to_model = os.path.join("models", model_name)
32 | # look for nay .gguf file in the dir_to_model directory and its subdirectories
33 | path_to_model = None
34 | for root, dirs, files in os.walk(dir_to_model):
35 | for file in files:
36 | if file.endswith(".gguf"):
37 | path_to_model = os.path.join(root, file)
38 | break
39 | if path_to_model is not None:
40 | break
41 | self.system_prompt = ""
42 |
43 | if lora_path is not None:
44 | lora_path = os.path.join("lora", lora_path)
45 | self.model = Llama(model_path=path_to_model, lora_path=lora_path)
46 | else:
47 | self.model = Llama(model_path=path_to_model)
48 |
49 | def _get_llama_style_input(self):
50 | context = []
51 | for conv in self.history:
52 | if conv["role"] == "system":
53 | context.append(SYS_PREFIX + conv["content"] + SYS_POSTFIX)
54 | elif conv["role"] == "user":
55 | context.append(
56 | INST_PREFIX + conv["content"] + INST_POSTFIX + OUTPUT_PREFIX
57 | )
58 | else:
59 | context.append(conv["content"] + OUTPUT_POSTFIX)
60 | return "".join(context)
61 | # for conv in self.history:
62 | # if conv["role"] == "system":
63 | # context.append(conv["content"])
64 | # elif conv["role"] == "user":
65 | # context.append(
66 | # conv["content"]
67 | # )
68 | # else:
69 | # context.append(conv["content"])
70 | # return "\n\n".join(context)+"\n\n"
71 |
72 | def get_answer_at_once(self):
73 | context = self._get_llama_style_input()
74 | response = self.model(
75 | context,
76 | max_tokens=self.max_generation_token,
77 | stop=[],
78 | echo=False,
79 | stream=False,
80 | )
81 | return response, len(response)
82 |
83 | def get_answer_stream_iter(self):
84 | context = self._get_llama_style_input()
85 | iter = self.model(
86 | context,
87 | max_tokens=self.max_generation_token,
88 | stop=[SYS_PREFIX, SYS_POSTFIX, INST_PREFIX, OUTPUT_PREFIX, OUTPUT_POSTFIX],
89 | echo=False,
90 | stream=True,
91 | )
92 | partial_text = ""
93 | for i in iter:
94 | response = i["choices"][0]["text"]
95 | partial_text += response
96 | yield partial_text
97 |
--------------------------------------------------------------------------------
/modules/models/ERNIE.py:
--------------------------------------------------------------------------------
1 | from ..presets import *
2 | from ..utils import *
3 |
4 | from .base_model import BaseLLMModel
5 |
6 |
7 | class ERNIE_Client(BaseLLMModel):
8 | def __init__(self, model_name, api_key, secret_key) -> None:
9 | super().__init__(model_name=model_name)
10 | self.api_key = api_key
11 | self.api_secret = secret_key
12 | if None in [self.api_secret, self.api_key]:
13 | raise Exception("请在配置文件或者环境变量中设置文心一言的API Key 和 Secret Key")
14 |
15 | if self.model_name == "ERNIE-Bot-turbo":
16 | self.ERNIE_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant?access_token="
17 | elif self.model_name == "ERNIE-Bot":
18 | self.ERNIE_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token="
19 | elif self.model_name == "ERNIE-Bot-4":
20 | self.ERNIE_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro?access_token="
21 |
22 | def get_access_token(self):
23 | """
24 | 使用 AK,SK 生成鉴权签名(Access Token)
25 | :return: access_token,或是None(如果错误)
26 | """
27 | url = "https://aip.baidubce.com/oauth/2.0/token?client_id=" + self.api_key + "&client_secret=" + self.api_secret + "&grant_type=client_credentials"
28 |
29 | payload = json.dumps("")
30 | headers = {
31 | 'Content-Type': 'application/json',
32 | 'Accept': 'application/json'
33 | }
34 |
35 | response = requests.request("POST", url, headers=headers, data=payload)
36 |
37 | return response.json()["access_token"]
38 | def get_answer_stream_iter(self):
39 | url = self.ERNIE_url + self.get_access_token()
40 | system_prompt = self.system_prompt
41 | history = self.history
42 | if system_prompt is not None:
43 | history = [construct_system(system_prompt), *history]
44 |
45 | # 去除history中 history的role为system的
46 | history = [i for i in history if i["role"] != "system"]
47 |
48 | payload = json.dumps({
49 | "messages":history,
50 | "stream": True
51 | })
52 | headers = {
53 | 'Content-Type': 'application/json'
54 | }
55 |
56 | response = requests.request("POST", url, headers=headers, data=payload, stream=True)
57 |
58 | if response.status_code == 200:
59 | partial_text = ""
60 | for line in response.iter_lines():
61 | if len(line) == 0:
62 | continue
63 | line = json.loads(line[5:])
64 | partial_text += line['result']
65 | yield partial_text
66 | else:
67 | yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
68 |
69 |
70 | def get_answer_at_once(self):
71 | url = self.ERNIE_url + self.get_access_token()
72 | system_prompt = self.system_prompt
73 | history = self.history
74 | if system_prompt is not None:
75 | history = [construct_system(system_prompt), *history]
76 |
77 | # 去除history中 history的role为system的
78 | history = [i for i in history if i["role"] != "system"]
79 |
80 | payload = json.dumps({
81 | "messages": history,
82 | "stream": True
83 | })
84 | headers = {
85 | 'Content-Type': 'application/json'
86 | }
87 |
88 | response = requests.request("POST", url, headers=headers, data=payload, stream=True)
89 |
90 | if response.status_code == 200:
91 |
92 | return str(response.json()["result"]),len(response.json()["result"])
93 | else:
94 | return "获取资源错误", 0
95 |
96 |
97 |
--------------------------------------------------------------------------------
/web_assets/stylesheet/markdown.css:
--------------------------------------------------------------------------------
1 |
2 | /* list from gradio 4.26, recover what silly gradio has done*/
3 | .message {
4 |
5 | --chatbot-body-text-size: 14px; /* gradio set 16px in v4.29 */
6 |
7 | .prose ul {
8 | list-style-position: outside !important;
9 | list-style-type: disc;
10 | }
11 | .prose ol {
12 | list-style-position: outside !important;
13 | }
14 | .prose ul ul,
15 | .prose ol ul,
16 | .prose ul ol ul,
17 | .prose ol ol ul {
18 | list-style-type: circle;
19 | }
20 | .prose ul > p,
21 | .prose li > p {
22 | display: initial;
23 | }
24 | .prose ol,
25 | .prose ul {
26 | margin-top: unset;
27 | }
28 | .prose ul ul,
29 | .prose ul ol,
30 | .prose ol ol,
31 | .prose ol ul {
32 | margin: initial;
33 | font-size: inherit;
34 | }
35 | .prose li {
36 | margin-bottom: initial;
37 | }
38 | }
39 |
40 |
41 | /* 表格 */
42 | .message table {
43 | margin: 1em 0;
44 | border-collapse: collapse;
45 | empty-cells: show;
46 | }
47 | .message td, .message th {
48 | border: 1.2px solid var(--border-color-primary) !important;
49 | padding: 0.2em;
50 | }
51 | .message thead {
52 | background-color: rgba(175,184,193,0.2);
53 | }
54 | .message thead th {
55 | padding: .5em .2em;
56 | }
57 |
58 | /* 行内代码 */
59 | .message :not(pre) > code {
60 | display: inline;
61 | white-space: break-spaces;
62 | font-family: var(--font-mono) !important;
63 | border-radius: 6px !important;
64 | margin: 0 2px 0 2px;
65 | padding: .1em .4em .08em .4em !important;
66 | background-color: rgba(175,184,193,0.2) !important;
67 | border: none !important;
68 | font-size: var(--text-md) !important;
69 | }
70 | /* 代码块 */
71 | .message pre,
72 | .message pre[class*=language-] {
73 | color: #fff;
74 | overflow-x: auto;
75 | overflow-y: hidden;
76 | padding: var(--spacing-xl) 1.2em !important;
77 | border-radius: var(--radius-lg) !important;
78 | background: var(--neutral-950) !important;
79 | }
80 | .message pre code,
81 | .message pre code[class*=language-] {
82 | color: #fff;
83 | padding: 0;
84 | margin: 0;
85 | background-color: unset;
86 | text-shadow: none;
87 | font-family: var(--font-mono);
88 | font-size: var(--text-md);
89 | }
90 | .message .code_wrap {
91 | margin: .8em 1em 1em 0em;
92 | }
93 |
94 | /* 覆盖prism.css */
95 | .language-css .token.string,
96 | .style .token.string,
97 | .token.entity,
98 | .token.operator,
99 | .token.url {
100 | background: none !important;
101 | }
102 |
103 | /* 避免采用亮色背景的高亮样式 */
104 | .md .token.comment,
105 | .md .token.prolog,
106 | .md .token.cdata {
107 | color: #5c6370 !important;
108 | }
109 |
110 | .md .token.doctype,
111 | .md .token.punctuation,
112 | .md .token.entity {
113 | color: #abb2bf !important;
114 | }
115 |
116 | .md .token.attr-name,
117 | .md .token.class-name,
118 | .md .token.boolean,
119 | .md .token.constant,
120 | .md .token.number,
121 | .md .token.atrule {
122 | color: #d19a66 !important;
123 | }
124 |
125 | .md .token.keyword {
126 | color: #c678dd !important;
127 | }
128 |
129 | .md .token.property,
130 | .md .token.tag,
131 | .md .token.symbol,
132 | .md .token.deleted,
133 | .md .token.important {
134 | color: #e06c75 !important;
135 | }
136 |
137 | .md .token.selector,
138 | .md .token.string,
139 | .md .token.char,
140 | .md .token.builtin,
141 | .md .token.inserted,
142 | .md .token.regex,
143 | .md .token.attr-value,
144 | .md .token.attr-value > .token.punctuation {
145 | color: #98c379 !important;
146 | }
147 |
148 | .md .token.variable,
149 | .md .token.operator,
150 | .md .token.function {
151 | color: #61afef !important;
152 | }
153 |
154 | .md .token.url {
155 | color: #56b6c2 !important;
156 | }
157 |
158 |
--------------------------------------------------------------------------------
/web_assets/javascript/chat-history.js:
--------------------------------------------------------------------------------
1 |
2 | var historyLoaded = false;
3 | var loadhistorytime = 0; // for debugging
4 |
5 |
6 | function saveHistoryHtml() {
7 | var historyHtml = document.querySelector('#chuanhu-chatbot > .wrapper > .bubble-wrap');
8 | if (!historyHtml) return; // no history, do nothing
9 | localStorage.setItem('chatHistory', historyHtml.innerHTML);
10 | // console.log("History Saved")
11 | historyLoaded = false;
12 | }
13 |
14 | function loadHistoryHtml() {
15 | var historyHtml = localStorage.getItem('chatHistory');
16 | const tempDiv = document.createElement('div');
17 | tempDiv.innerHTML = historyHtml;
18 | if (!historyHtml || tempDiv.innerText.trim() === "") {
19 | historyLoaded = true;
20 | return; // no history, do nothing
21 | }
22 | userLogged = localStorage.getItem('userLogged');
23 | hideHistoryWhenNotLoggedIn = gradioApp().querySelector('#hideHistoryWhenNotLoggedIn_config').innerText === "True";
24 |
25 | // 取消该功能
26 | historyLoaded = true;
27 | return;
28 |
29 | // if (userLogged || (!userLogged && !hideHistoryWhenNotLoggedIn)){
30 | // historyLoaded = true;
31 | // return; // logged in, do nothing. OR, not logged in but not hide history list, do nothing.
32 | // }
33 |
34 | // 只有用户未登录,还隐藏历史记录列表时,才选用只读历史记录
35 | if (!historyLoaded) {
36 | // preprocess, gradio buttons in history lost their event listeners
37 | var gradioCopyButtons = tempDiv.querySelectorAll('button.copy_code_button');
38 | for (var i = 0; i < gradioCopyButtons.length; i++) {
39 | gradioCopyButtons[i].parentNode.removeChild(gradioCopyButtons[i]);
40 | }
41 | var messageBtnRows = tempDiv.querySelectorAll('.message-btn-row');
42 | for (var i = 0; i < messageBtnRows.length; i++) {
43 | messageBtnRows[i].parentNode.removeChild(messageBtnRows[i]);
44 | }
45 | var latestMessages = tempDiv.querySelectorAll('.message.latest');
46 | for (var i = 0; i < latestMessages.length; i++) {
47 | latestMessages[i].classList.remove('latest');
48 | }
49 | var chatbotPlaceHolder = tempDiv.querySelector('center');
50 | if (chatbotPlaceHolder) {
51 | chatbotPlaceHolder.parentNode.removeChild(chatbotPlaceHolder);
52 | console.log("Chatbot PlaceHolder Removed");
53 | }
54 |
55 | var fakeHistory = document.createElement('div');
56 | fakeHistory.classList.add('history-message');
57 | fakeHistory.innerHTML = tempDiv.innerHTML;
58 | const forViewStyle = document.createElement('style');
59 | forViewStyle.innerHTML = '.wrapper > .bubble-wrap > .history-message > :last-child::after { content: "' + i18n(forView_i18n) + '"!important; }';
60 | document.head.appendChild(forViewStyle);
61 | chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild);
62 |
63 | var activeChatbotPlaceHolder = document.querySelector('#chuanhu-chatbot > .wrapper > .bubble-wrap center');
64 | if (activeChatbotPlaceHolder) {
65 | activeChatbotPlaceHolder.style.display = 'none';
66 | }
67 | // var fakeHistory = document.createElement('div');
68 | // fakeHistory.classList.add('history-message');
69 | // fakeHistory.innerHTML = historyHtml;
70 | // chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild);
71 | historyLoaded = true;
72 | // console.log("History Loaded");
73 | loadhistorytime += 1; // for debugging
74 | } else {
75 | historyLoaded = false;
76 | }
77 | }
78 |
79 | function clearHistoryHtml() {
80 | localStorage.removeItem("chatHistory");
81 | historyMessages = chatbotWrap.querySelector('.history-message');
82 | if (historyMessages) {
83 | chatbotWrap.removeChild(historyMessages);
84 | console.log("History Cleared");
85 |
86 | var activeChatbotPlaceHolder = document.querySelector('#chuanhu-chatbot > .wrapper > .bubble-wrap center');
87 | if (activeChatbotPlaceHolder) {
88 | activeChatbotPlaceHolder.style.display = 'block';
89 | }
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/modules/webui.py:
--------------------------------------------------------------------------------
1 |
2 | from collections import namedtuple
3 | import os
4 | import gradio as gr
5 |
6 | from . import shared
7 |
8 | # with open("./assets/ChuanhuChat.js", "r", encoding="utf-8") as f, \
9 | # open("./assets/external-scripts.js", "r", encoding="utf-8") as f1:
10 | # customJS = f.read()
11 | # externalScripts = f1.read()
12 |
13 |
14 | def get_html(filename):
15 | path = os.path.join(shared.chuanhu_path, "web_assets", "html", filename)
16 | if os.path.exists(path):
17 | with open(path, encoding="utf8") as file:
18 | return file.read()
19 | return ""
20 |
21 | def webpath(fn):
22 | if fn.startswith(shared.assets_path):
23 | web_path = os.path.relpath(fn, shared.chuanhu_path).replace('\\', '/')
24 | else:
25 | web_path = os.path.abspath(fn)
26 | return f'file={web_path}?{os.path.getmtime(fn)}'
27 |
28 | ScriptFile = namedtuple("ScriptFile", ["basedir", "filename", "path"])
29 |
30 | def javascript_html():
31 | head = ""
32 | for script in list_scripts("javascript", ".js"):
33 | head += f'\n'
34 | for script in list_scripts("javascript", ".mjs"):
35 | head += f'\n'
36 | return head
37 |
38 | def css_html():
39 | head = ""
40 | for cssfile in list_scripts("stylesheet", ".css"):
41 | head += f''
42 | return head
43 |
44 | def list_scripts(scriptdirname, extension):
45 | scripts_list = []
46 | scripts_dir = os.path.join(shared.chuanhu_path, "web_assets", scriptdirname)
47 | if os.path.exists(scripts_dir):
48 | for filename in sorted(os.listdir(scripts_dir)):
49 | scripts_list.append(ScriptFile(shared.assets_path, filename, os.path.join(scripts_dir, filename)))
50 | scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
51 | return scripts_list
52 |
53 |
54 | def reload_javascript():
55 | js = javascript_html()
56 | js += ''
57 | js += ''
58 | js += ''
59 |
60 | meta = """
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | """
73 | css = css_html()
74 |
75 | def template_response(*args, **kwargs):
76 | res = GradioTemplateResponseOriginal(*args, **kwargs)
77 | res.body = res.body.replace(b'', f'{meta}{js}'.encode("utf8"))
78 | # res.body = res.body.replace(b'', f'{js}'.encode("utf8"))
79 | res.body = res.body.replace(b'