├── .npmrc ├── server ├── tsconfig.json ├── utils │ ├── logger.ts │ ├── h3.ts │ ├── axios.ts │ ├── azure.ts │ └── openai.ts ├── middleware │ └── log.ts └── api │ ├── crypto.post.ts │ ├── images │ └── generations.post.ts │ ├── completions.post.ts │ ├── chat │ └── completions.post.ts │ └── models.get.ts ├── app.vue ├── assets ├── preview.gif ├── preview-en.png ├── preview-ja.png ├── preview-v2.png ├── preview-zh.png ├── preview-chat.jpeg ├── appreciation-qr.png └── preview-welcome.jpeg ├── public ├── favicon.ico ├── system.png └── assistant.webp ├── types ├── index.ts ├── chat │ ├── index.ts │ ├── chat.ts │ ├── prompt.ts │ ├── message.ts │ └── setting.ts ├── crypto.ts └── api.ts ├── tsconfig.json ├── .github └── FUNDING.yml ├── docker-compose.yaml ├── utils ├── isMobile.ts ├── trimPrompt.ts └── toggleSideBar.ts ├── components ├── ChatMessageItemTime.vue ├── BaseLabel.vue ├── LogoBar.vue ├── ChatMessageSystem.vue ├── ChatMessageItem.vue ├── ChatSideBar.vue ├── ChatAvatar.vue ├── ChatSendButton.vue ├── ChatStop.vue ├── BaseInput.vue ├── ChatMessageList.vue ├── ChatContentBar.vue ├── FuncBar.vue ├── ChatMessageRole.vue ├── ChatModelSelector.vue ├── CopyText.vue ├── ChatWelcome.vue ├── ChatMessageLoding.vue ├── ChatList.vue ├── HotKeyHelp.vue ├── ChatTitleBar.vue ├── ChatSendBar.vue └── ChatSetting.vue ├── Dockerfile ├── .gitignore ├── composables ├── tiktoken.ts ├── markdown.ts ├── setting.ts └── chatDB.ts ├── package.json ├── .env.example ├── LICENSE ├── nuxt.config.ts ├── docs ├── README-CN.md └── README-JA.md ├── README.md ├── pages └── index.vue ├── locales ├── zh.json ├── ja.json └── en.json └── stores └── chat.ts /.npmrc: -------------------------------------------------------------------------------- 1 | shamefully-hoist=true 2 | auto-install-peers=true 3 | -------------------------------------------------------------------------------- /server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../.nuxt/tsconfig.server.json" 3 | } 4 | -------------------------------------------------------------------------------- /app.vue: -------------------------------------------------------------------------------- 1 | 6 | -------------------------------------------------------------------------------- /assets/preview.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview.gif -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/public/favicon.ico -------------------------------------------------------------------------------- /public/system.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/public/system.png -------------------------------------------------------------------------------- /assets/preview-en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview-en.png -------------------------------------------------------------------------------- /assets/preview-ja.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview-ja.png -------------------------------------------------------------------------------- /assets/preview-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview-v2.png -------------------------------------------------------------------------------- /assets/preview-zh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview-zh.png -------------------------------------------------------------------------------- /public/assistant.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/public/assistant.webp -------------------------------------------------------------------------------- /types/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./chat"; 2 | export * from "./crypto"; 3 | export * from "./api"; 4 | -------------------------------------------------------------------------------- /assets/preview-chat.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview-chat.jpeg -------------------------------------------------------------------------------- /assets/appreciation-qr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/appreciation-qr.png -------------------------------------------------------------------------------- /assets/preview-welcome.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lianginx/chatgpt-nuxt/HEAD/assets/preview-welcome.jpeg -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // https://nuxt.com/docs/guide/concepts/typescript 3 | "extends": "./.nuxt/tsconfig.json" 4 | } 5 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | custom: ['https://github.com/lianginx/sponsor'] 4 | -------------------------------------------------------------------------------- /server/utils/logger.ts: -------------------------------------------------------------------------------- 1 | export function logger(...message: any[]) { 2 | console.log(`${new Date().toISOString()}`, ...message); 3 | } 4 | -------------------------------------------------------------------------------- /types/chat/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./chat"; 2 | export * from "./message"; 3 | export * from "./prompt"; 4 | export * from "./setting"; 5 | -------------------------------------------------------------------------------- /types/crypto.ts: -------------------------------------------------------------------------------- 1 | export type CryptoType = "en" | "de"; 2 | 3 | export interface CryptoRequest { 4 | message: string; 5 | type: CryptoType; 6 | } 7 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | chatgpt-nuxt: 4 | build: . 5 | ports: 6 | - 80:3000 7 | restart: unless-stopped 8 | -------------------------------------------------------------------------------- /utils/isMobile.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * 判断是否为移动设备 3 | * @returns {Boolean} 返回布尔值表示是否为移动设备 4 | */ 5 | export const isMobile = (): boolean => 6 | /iPhone|iPad|iPod|Android/i.test(navigator.userAgent); 7 | -------------------------------------------------------------------------------- /server/middleware/log.ts: -------------------------------------------------------------------------------- 1 | import { logger } from "@/server/utils/logger"; 2 | 3 | export default defineEventHandler((event) => { 4 | logger(`[${getMethod(event)}]`, getRequestURL(event).href); 5 | }); 6 | -------------------------------------------------------------------------------- /components/ChatMessageItemTime.vue: -------------------------------------------------------------------------------- 1 | 4 | 5 | 12 | -------------------------------------------------------------------------------- /utils/trimPrompt.ts: -------------------------------------------------------------------------------- 1 | export const trimPrompt = (input: string) => { 2 | return input 3 | .split("\n") 4 | .map((line) => line.trim()) // Removing leading whitespace characters. 5 | .join("\n") 6 | .trim(); // Removing the first/last empty line. 7 | }; 8 | -------------------------------------------------------------------------------- /components/BaseLabel.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 设置基础镜像 Set up base image 2 | FROM node:slim 3 | 4 | # 设置工作目录 Set working directory 5 | WORKDIR /app 6 | 7 | # 复制生产版本到镜像中 Copy production version into the image. 8 | COPY .output . 9 | 10 | # 暴露应用程序端口 Expose application ports 11 | EXPOSE 3000 12 | 13 | # 启动服务 Start Service 14 | CMD [ "node","server/index.mjs" ] -------------------------------------------------------------------------------- /components/LogoBar.vue: -------------------------------------------------------------------------------- 1 | 8 | 9 | 12 | -------------------------------------------------------------------------------- /types/api.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CreateChatCompletionRequest, 3 | CreateCompletionRequest, 4 | CreateImageRequest, 5 | } from "openai"; 6 | 7 | export type ApiRequestModel = "models" | "chat" | "text" | "img"; 8 | 9 | export type ApiRequest = 10 | | CreateChatCompletionRequest 11 | | CreateCompletionRequest 12 | | CreateImageRequest; 13 | -------------------------------------------------------------------------------- /utils/toggleSideBar.ts: -------------------------------------------------------------------------------- 1 | export const toggleSideBar = () => { 2 | const clientWidth = document.body.clientWidth; 3 | if (clientWidth < 640) { 4 | const sideBar = document.getElementById("sidebar"); 5 | const mainContent = document.getElementById("main"); 6 | sideBar?.classList.toggle("hidden"); 7 | mainContent?.classList.toggle("hidden"); 8 | } 9 | }; 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Nuxt dev/build outputs 2 | .output 3 | .nuxt 4 | .nitro 5 | .cache 6 | dist 7 | 8 | # Node dependencies 9 | node_modules 10 | 11 | # Logs 12 | logs 13 | *.log 14 | 15 | # Misc 16 | .DS_Store 17 | .fleet 18 | .idea 19 | 20 | # Local env files 21 | .env 22 | .env.* 23 | !.env.example 24 | 25 | # lock files 26 | package-lock.json 27 | yarn.lock 28 | pnpm-lock.yaml 29 | 30 | # deploy script 31 | deploy.sh -------------------------------------------------------------------------------- /components/ChatMessageSystem.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /components/ChatMessageItem.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /components/ChatSideBar.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | 12 | 13 | 18 | -------------------------------------------------------------------------------- /types/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { ChatPromptCategoryItem, ChatSettingItem } from "@/types"; 2 | 3 | export type ChatModel = "gpt-3.5-turbo" | "gpt-4" | "dall-e"; 4 | 5 | export type ImageSize = "256x256" | "512x512" | "1024x1024"; 6 | 7 | export interface ChatItem extends ChatOption { 8 | id: number; 9 | } 10 | 11 | export interface ChatOption { 12 | promptId?: ChatPromptCategoryItem["id"]; 13 | settingId?: ChatSettingItem["id"]; 14 | name: string; 15 | model?: ChatModel; 16 | order: number; 17 | } 18 | -------------------------------------------------------------------------------- /components/ChatAvatar.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 22 | -------------------------------------------------------------------------------- /types/chat/prompt.ts: -------------------------------------------------------------------------------- 1 | import { ChatMessageExItem } from "@/types"; 2 | 3 | export interface ChatPromptCategoryItem extends ChatPromptCategoryOption { 4 | id: number; 5 | } 6 | 7 | export interface ChatPromptCategoryOption { 8 | name: string; 9 | order: number; 10 | } 11 | 12 | export interface ChatPromptItem extends ChatPromptOption { 13 | id: number; 14 | } 15 | 16 | export interface ChatPromptOption { 17 | promptCategoryId: ChatPromptCategoryItem["id"]; 18 | name: string; 19 | order: number; 20 | message: ChatMessageExItem; 21 | } 22 | -------------------------------------------------------------------------------- /composables/tiktoken.ts: -------------------------------------------------------------------------------- 1 | import { ChatRole } from "@/types"; 2 | 3 | let total = 0; 4 | const tokens = { 5 | user: 0, 6 | assistant: 0, 7 | system: 0, 8 | }; 9 | 10 | const add = (role: ChatRole, count?: number) => (tokens[role] += count ?? 1); 11 | 12 | const sub = (role: ChatRole, count?: number) => (tokens[role] -= count ?? 1); 13 | 14 | const getRoleTokens = (role: ChatRole) => tokens[role]; 15 | 16 | const getTotalTokens = () => 17 | (total = Object.values(tokens).reduce((acc, value) => acc + value, 0)); 18 | 19 | export { add, sub, getRoleTokens, getTotalTokens }; 20 | -------------------------------------------------------------------------------- /server/api/crypto.post.ts: -------------------------------------------------------------------------------- 1 | import CryptoJS from "crypto-js"; 2 | import { CryptoRequest } from "@/types"; 3 | 4 | const key = "lianginx"; 5 | 6 | export default defineEventHandler(async (event) => { 7 | const crypto = (await readBody(event)) as CryptoRequest; 8 | return aesCrypto(crypto); 9 | }); 10 | 11 | export function aesCrypto(crypto: CryptoRequest) { 12 | if (crypto.type === "en") { 13 | return CryptoJS.AES.encrypt(crypto.message, key).toString(); 14 | } else { 15 | return CryptoJS.AES.decrypt(crypto.message, key).toString( 16 | CryptoJS.enc.Utf8 17 | ); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /types/chat/message.ts: -------------------------------------------------------------------------------- 1 | import { ChatItem, ImageSize } from "@/types"; 2 | 3 | export type ChatRole = "user" | "assistant" | "system"; 4 | 5 | export interface ChatMessageExItem extends ChatMessageExOption { 6 | id: number; 7 | } 8 | 9 | export interface ChatMessageExOption extends ChatMessage { 10 | chatId?: ChatItem["id"]; 11 | active?: boolean; 12 | show?: boolean; 13 | error?: boolean; 14 | errorMessage?: string; 15 | sendDate?: number; 16 | } 17 | 18 | export interface ChatMessage { 19 | role: ChatRole; 20 | content: string; 21 | imageN?: number; 22 | imageSize?: ImageSize; 23 | } 24 | -------------------------------------------------------------------------------- /components/ChatSendButton.vue: -------------------------------------------------------------------------------- 1 | 16 | 17 | 24 | -------------------------------------------------------------------------------- /composables/markdown.ts: -------------------------------------------------------------------------------- 1 | import Markdown from "markdown-it"; 2 | import highlight from "highlight.js"; 3 | 4 | const mdOptions: Markdown.Options = { 5 | linkify: true, 6 | typographer: true, 7 | breaks: true, 8 | langPrefix: "language-", 9 | highlight(str, lang) { 10 | if (lang && highlight.getLanguage(lang)) { 11 | try { 12 | return ( 13 | '
' +
14 |           highlight.highlight(lang, str, true).value +
15 |           "
" 16 | ); 17 | } catch (__) {} 18 | } 19 | return ""; 20 | }, 21 | }; 22 | 23 | export const md = new Markdown(mdOptions); 24 | -------------------------------------------------------------------------------- /components/ChatStop.vue: -------------------------------------------------------------------------------- 1 | 12 | 13 | 19 | -------------------------------------------------------------------------------- /types/chat/setting.ts: -------------------------------------------------------------------------------- 1 | export type ChatSettingType = "default" | "global" | "chat"; 2 | 3 | export type ApiType = "openai" | "azure"; 4 | 5 | export type ColorMode = "system" | "light" | "dark"; 6 | 7 | export interface ChatSettingItem extends ChatSettingOption { 8 | id: number; 9 | } 10 | 11 | export interface ChatSettingOption extends ChatSetting { 12 | type: ChatSettingType; 13 | } 14 | 15 | export interface ChatSetting { 16 | apiType: ApiType; 17 | apiKey?: string; 18 | apiHost?: string; 19 | azureApiVersion?: string; 20 | azureGpt35DeploymentId?: string; 21 | azureGpt4DeploymentId?: string; 22 | temperature: number; 23 | locale: string; 24 | colorMode: ColorMode; 25 | } 26 | -------------------------------------------------------------------------------- /components/BaseInput.vue: -------------------------------------------------------------------------------- 1 | 15 | 16 | 24 | -------------------------------------------------------------------------------- /components/ChatMessageList.vue: -------------------------------------------------------------------------------- 1 | 11 | 12 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /composables/setting.ts: -------------------------------------------------------------------------------- 1 | import { ChatSettingItem, ChatSettingOption } from "@/types"; 2 | 3 | const key = "chatSetting"; 4 | 5 | export async function saveSetting(setting: ChatSettingOption) { 6 | const oldSetting = loadSetting(); 7 | 8 | if ( 9 | (!oldSetting && setting.apiKey) || 10 | (oldSetting?.apiKey && oldSetting?.apiKey !== setting.apiKey) // 处理之前明文储存 API Key 的过度方法 11 | ) { 12 | const encrypt = await $fetch("/api/crypto", { 13 | method: "post", 14 | body: { message: setting.apiKey, type: "en" }, 15 | }); 16 | setting.apiKey = encrypt; 17 | } 18 | 19 | localStorage.setItem(key, JSON.stringify({ ...setting })); 20 | } 21 | 22 | export function loadSetting(): ChatSettingItem | undefined { 23 | const settingString = localStorage.getItem(key); 24 | if (settingString) { 25 | return JSON.parse(settingString); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /server/api/images/generations.post.ts: -------------------------------------------------------------------------------- 1 | import { CreateImageRequest } from "openai"; 2 | import { RequestHeaders } from "h3"; 3 | import { errorHandler, setResStatus } from "~/server/utils/h3"; 4 | import { getOpenAIApiInstance } from "../../utils/openai"; 5 | 6 | export default defineEventHandler(async (event) => { 7 | try { 8 | const headers = getHeaders(event); 9 | const body = (await readBody(event)) as CreateImageRequest; 10 | const response = await createImage(headers, body); 11 | 12 | setResStatus(event, response.status, response.statusText); 13 | return response.data; 14 | } catch (e: any) { 15 | await errorHandler(event, e); 16 | } 17 | }); 18 | 19 | async function createImage(headers: RequestHeaders, body: CreateImageRequest) { 20 | const openai = getOpenAIApiInstance("img", headers, body); 21 | return openai.createImage(body); 22 | } 23 | -------------------------------------------------------------------------------- /server/api/completions.post.ts: -------------------------------------------------------------------------------- 1 | import { CreateCompletionRequest } from "openai"; 2 | import { RequestHeaders } from "h3"; 3 | import { errorHandler, setResStatus } from "~/server/utils/h3"; 4 | import { getOpenAIApiInstance } from "../utils/openai"; 5 | 6 | export default defineEventHandler(async (event) => { 7 | try { 8 | const headers = getHeaders(event); 9 | const body = (await readBody(event)) as CreateCompletionRequest; 10 | const response = await createCompletion(headers, body); 11 | 12 | setResStatus(event, response.status, response.statusText); 13 | return response.data; 14 | } catch (e: any) { 15 | return await errorHandler(event, e); 16 | } 17 | }); 18 | 19 | async function createCompletion( 20 | headers: RequestHeaders, 21 | body: CreateCompletionRequest 22 | ) { 23 | const openai = getOpenAIApiInstance("text", headers, body); 24 | return openai.createCompletion(body); 25 | } 26 | -------------------------------------------------------------------------------- /server/api/chat/completions.post.ts: -------------------------------------------------------------------------------- 1 | import { CreateChatCompletionRequest } from "openai"; 2 | import { RequestHeaders } from "h3"; 3 | import { errorHandler, setResStatus } from "~/server/utils/h3"; 4 | import { getOpenAIApiInstance } from "../../utils/openai"; 5 | 6 | export default defineEventHandler(async (event) => { 7 | try { 8 | const headers = getHeaders(event); 9 | const body = (await readBody(event)) as CreateChatCompletionRequest; 10 | const response = await createChatCompletion(headers, body); 11 | 12 | setResStatus(event, response.status, response.statusText); 13 | return response.data; 14 | } catch (e: any) { 15 | return await errorHandler(event, e); 16 | } 17 | }); 18 | 19 | async function createChatCompletion( 20 | headers: RequestHeaders, 21 | body: CreateChatCompletionRequest 22 | ) { 23 | const openai = getOpenAIApiInstance("chat", headers, body); 24 | return openai.createChatCompletion(body); 25 | } 26 | -------------------------------------------------------------------------------- /components/ChatContentBar.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | 31 | -------------------------------------------------------------------------------- /server/utils/h3.ts: -------------------------------------------------------------------------------- 1 | import { H3Event } from "h3"; 2 | 3 | export function setResStatus(event: H3Event, code: number, message: string) { 4 | event.node.res.statusCode = code; 5 | event.node.res.statusMessage = message; 6 | } 7 | 8 | export async function errorHandler(event: H3Event, e: any) { 9 | // 很奇怪,在我的 mac 开发环境中报错时,response 永远是一个 Stream 对象 10 | // 但是在 Windows 开发环境和 Docker 中报错时,response 却是 undefined 11 | if (e.response?.data) { 12 | setResStatus(event, e.response.status, e.response.data.statusText); 13 | 14 | let isStreamNull = true; // mac 开发环境中上,response 永远不是 undefined 15 | 16 | for await (const data of e.response.data) { 17 | isStreamNull = false; 18 | const message = data.toString(); 19 | try { 20 | const parsed = JSON.parse(message); 21 | return parsed; 22 | } catch (error) { 23 | return message; 24 | } 25 | } 26 | 27 | if (isStreamNull) { 28 | return e; 29 | } 30 | } else { 31 | return e; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chatgpt-nuxt", 3 | "license": "MIT", 4 | "private": true, 5 | "scripts": { 6 | "build": "nuxt build", 7 | "dev": "nuxt dev", 8 | "generate": "nuxt generate", 9 | "preview": "nuxt preview", 10 | "postinstall": "nuxt prepare", 11 | "docker:dev": "docker-compose up" 12 | }, 13 | "devDependencies": { 14 | "@nuxt/devtools": "latest", 15 | "@nuxtjs/color-mode": "^3.3.0", 16 | "@nuxtjs/i18n": "8.0.0-beta.12", 17 | "@nuxtjs/tailwindcss": "^6.8.0", 18 | "@tailwindcss/typography": "^0.5.9", 19 | "@types/node": "^18", 20 | "nuxt": "^3.6.0", 21 | "nuxt-icon": "^0.4.1", 22 | "tailwindcss": "^3.3.2", 23 | "typescript": "latest" 24 | }, 25 | "dependencies": { 26 | "@icon-park/vue-next": "^1.4.2", 27 | "@pinia/nuxt": "^0.4.11", 28 | "crypto-js": "^4.1.1", 29 | "dexie": "^3.2.4", 30 | "highlight.js": "^11.8.0", 31 | "hotkeys-js": "^3.10.3", 32 | "markdown-it": "^13.0.1", 33 | "moment": "^2.29.4", 34 | "openai": "^3.3.0" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | ################### 2 | # Common Settings # 3 | ################### 4 | 5 | # Use environment variables or not. `yes` or `no` 6 | NUXT_PUBLIC_USE_ENV=yes 7 | 8 | # The API type. `openai` or `azure` 9 | NUXT_PUBLIC_API_TYPE=openai 10 | 11 | # The API key used for authentication with OpenAI or Azure OpenAI Service. 12 | NUXT_API_KEY=YOUR_API_KEY 13 | 14 | # Higher values will make the output more random, while lower values will make it more focused and deterministic. `0.0` - `2.0` 15 | NUXT_PUBLIC_DEFAULT_TEMPERATURE=1 16 | 17 | 18 | ################################# 19 | # Azure OpenAI Service Settings # 20 | ################################# 21 | 22 | # The endpoint of the Azure OpenAI Service. 23 | NUXT_API_HOST=https://YOUR_RESOURCE_NAME.openai.azure.com 24 | 25 | # API version of the Azure OpenAI Service. 26 | NUXT_AZURE_API_VERSION=2023-06-01-preview 27 | 28 | # Deployment name of the GPT-3.5 model on the Azure OpenAI Service. 29 | NUXT_AZURE_GPT35_DEPLOYMENT_ID= 30 | 31 | # Deployment name of the GPT-4 model on the Azure OpenAI Service. 32 | NUXT_AZURE_GPT4_DEPLOYMENT_ID= 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Liang INX 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /components/FuncBar.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /components/ChatMessageRole.vue: -------------------------------------------------------------------------------- 1 | 24 | 25 | 38 | 39 | 44 | -------------------------------------------------------------------------------- /composables/chatDB.ts: -------------------------------------------------------------------------------- 1 | import Dexie from "dexie"; 2 | import { 3 | ChatSettingOption, 4 | ChatItem, 5 | ChatMessageExItem, 6 | ChatSettingItem, 7 | ChatPromptCategoryItem, 8 | ChatPromptItem, 9 | ChatMessageExOption, 10 | ChatOption, 11 | ChatPromptOption, 12 | ChatPromptCategoryOption, 13 | } from "@/types"; 14 | 15 | const databaseName = "ChatGPT"; 16 | const lastVersion = 2; 17 | 18 | const chat = [ 19 | "++id", 20 | "promptId", 21 | "settingId", 22 | "name", 23 | "model", 24 | "order", 25 | ].toString(); 26 | const message = [ 27 | "++id", 28 | "chatId", 29 | "role", 30 | "active", 31 | "show", 32 | "current", 33 | "error", 34 | "sendData", 35 | ].toString(); 36 | const setting = ["++id", "type"].toString(); 37 | const promptCategory = ["++id", "name", "order"].toString(); 38 | const prompt = [ 39 | "++id", 40 | "promptCategoryId", 41 | "name", 42 | "order", 43 | "message", 44 | ].toString(); 45 | 46 | export class ChatDB extends Dexie { 47 | chat!: Dexie.Table; 48 | message!: Dexie.Table; 49 | setting!: Dexie.Table; 50 | promptCategory!: Dexie.Table; 51 | prompt!: Dexie.Table; 52 | 53 | constructor() { 54 | super(databaseName); 55 | this.version(lastVersion).stores({ 56 | chat, 57 | message, 58 | setting, 59 | promptCategory, 60 | prompt, 61 | }); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /components/ChatModelSelector.vue: -------------------------------------------------------------------------------- 1 | 27 | 28 | 52 | -------------------------------------------------------------------------------- /nuxt.config.ts: -------------------------------------------------------------------------------- 1 | // https://nuxt.com/docs/api/configuration/nuxt-config 2 | export default defineNuxtConfig({ 3 | app: {}, 4 | runtimeConfig: { 5 | apiKey: "", 6 | apiHost: "", 7 | azureApiVersion: "2023-06-01-preview", 8 | azureGpt35DeploymentId: "", 9 | azureGpt4DeploymentId: "", 10 | public: { 11 | useEnv: "no", 12 | apiType: "openai", 13 | defaultTemperature: "1", 14 | }, 15 | }, 16 | modules: [ 17 | "@nuxtjs/color-mode", 18 | "@nuxtjs/i18n", 19 | "@nuxtjs/tailwindcss", 20 | "@pinia/nuxt", 21 | "nuxt-icon", 22 | ], 23 | css: ["highlight.js/styles/dark.css"], 24 | i18n: { 25 | locales: [ 26 | { 27 | code: "zh", 28 | iso: "zh-CN", 29 | file: "zh.json", 30 | name: "简体中文", 31 | }, 32 | { 33 | code: "en", 34 | iso: "en-US", 35 | file: "en.json", 36 | name: "English (US)", 37 | }, 38 | { 39 | code: "ja", 40 | iso: "ja-JP", 41 | file: "ja.json", 42 | name: "日本語", 43 | }, 44 | ], 45 | langDir: "locales", 46 | defaultLocale: "en", 47 | detectBrowserLanguage: { 48 | useCookie: true, 49 | cookieKey: "i18n_redirected", 50 | redirectOn: "root", 51 | }, 52 | precompile: { 53 | strictMessage: false, 54 | }, 55 | }, 56 | tailwindcss: { 57 | config: { 58 | darkMode: "class", 59 | content: [], 60 | plugins: [require("@tailwindcss/typography")], 61 | }, 62 | }, 63 | colorMode: { 64 | classSuffix: "", 65 | }, 66 | ssr: false, 67 | devtools: { enabled: false }, 68 | }); 69 | -------------------------------------------------------------------------------- /components/CopyText.vue: -------------------------------------------------------------------------------- 1 | 13 | 14 | 49 | 50 | 55 | -------------------------------------------------------------------------------- /server/utils/axios.ts: -------------------------------------------------------------------------------- 1 | import axios, { AxiosRequestConfig, AxiosResponse } from "axios"; 2 | 3 | export function createAxiosInstance() { 4 | const axiosRequestConfig: AxiosRequestConfig = { 5 | responseType: "stream", 6 | timeout: 1000 * 20, 7 | timeoutErrorMessage: "**Network connection timed out. Please try again**", 8 | // 使用代理,配置参考 https://axios-http.com/docs/req_config 9 | // proxy: { 10 | // protocol: "http", 11 | // host: "127.0.0.1", 12 | // port: 7890, 13 | // }, 14 | }; 15 | 16 | function onRequest(config: AxiosRequestConfig) { 17 | logger("onRequest", `[${config.method?.toUpperCase()}]`, config.url); 18 | return config; 19 | } 20 | 21 | function onResponse(response: AxiosResponse) { 22 | logger( 23 | "onResponse", 24 | `[${response.config.method?.toUpperCase()}]`, 25 | response.config.url, 26 | response.status, 27 | response.statusText 28 | ); 29 | return response; 30 | } 31 | 32 | function onRequestError(error: any) { 33 | logger("onRequestError", error); 34 | return error; 35 | } 36 | 37 | function onResponseError(error: any) { 38 | logger("onResponseError", error); 39 | return error.response; 40 | } 41 | 42 | const axiosInstance = axios.create(axiosRequestConfig); 43 | axiosInstance.interceptors.request.use( 44 | (config) => onRequest(config) || config 45 | ); 46 | axiosInstance.interceptors.response.use( 47 | (response) => onResponse(response) || response 48 | ); 49 | axiosInstance.interceptors.request.use( 50 | undefined, 51 | (error) => onRequestError(error) || Promise.reject(error) 52 | ); 53 | axiosInstance.interceptors.response.use( 54 | undefined, 55 | (error) => onResponseError(error) || Promise.reject(error) 56 | ); 57 | 58 | return axiosInstance; 59 | } 60 | -------------------------------------------------------------------------------- /docs/README-CN.md: -------------------------------------------------------------------------------- 1 | # chatgpt-nuxt 2 | 3 | ![preview](/assets/preview-zh.png) 4 | 5 | [ENGLISH](/README.md) | 简体中文 | [日本語](/docs/README-JA.md) 6 | 7 | 这是一个使用[Nuxt 3](https://nuxt.com/)实现的前端应用程序,用于 OpenAI 的 [ChatGPT](https://openai.com/blog/chatgpt) 和 [DALL·E](https://openai.com/dall-e-2) API。 8 | 9 | ## 支持的 API 10 | 11 | - [OpenAI API](https://openai.com/blog/openai-api) 12 | - [Azure Open AI Service API](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/reference) 13 | 14 | ## 支持的模型 15 | 16 | - Chat completion 17 | - gpt-4 18 | - gpt-3.5-turbo 19 | - Image generation 20 | - DALL·E 21 | 22 | ## 设置 23 | 24 | 首先,请确保您已安装所有依赖项: 25 | 26 | ```bash 27 | npm i 28 | # 或 29 | yarn 30 | ``` 31 | 32 | ## 开发服务器 33 | 34 | 启动开发服务器并在 上查看它: 35 | 36 | ```bash 37 | npm run dev 38 | # 或 39 | yarn dev 40 | ``` 41 | 42 | ## 生产 43 | 44 | 要为生产构建应用程序,请执行: 45 | 46 | ```bash 47 | npm run build 48 | # 或 49 | yarn build 50 | ``` 51 | 52 | 使用以下命令在本地预览生产构建: 53 | 54 | ```bash 55 | npm run preview 56 | # 或 57 | yarn preview 58 | ``` 59 | 60 | ## 部署 61 | 62 | 一行命令快速部署: 63 | 64 | ```bash 65 | docker run -d \ 66 | -p 80:3000 \ 67 | --restart unless-stopped \ 68 | --name chatgpt-nuxt \ 69 | lianginx/chatgpt-nuxt:latest 70 | ``` 71 | 72 | 使用 Docker Compose 文件部署: 73 | 74 | ```bash 75 | version: "3" 76 | services: 77 | chatgpt-nuxt: 78 | image: lianginx/chatgpt-nuxt:latest 79 | ports: 80 | - 80:3000 81 | restart: unless-stopped 82 | ``` 83 | 84 | ```bash 85 | docker-compose up -d # 启动并在后台运行。 86 | docker-compose stop # 停止 87 | docker-compose down # 停止并删除 88 | ``` 89 | 90 | 完成后,您的项目将部署到端口 `80`。 91 | 92 | 如果您对此 README 文档有任何反馈或建议,立即告诉我,我将很感激您的意见。 93 | 94 | ## 配置 95 | 96 | 应用程序可以通过两种方式进行配置: 97 | 98 | - 使用应用程序上的配置界面进行设置。 99 | - 通过预先设置环境变量进行设置。 100 | 101 | 如果要设置环境变量,请先参考 [`.env.example`](/.env.example) 并在根目录下创建 `.env` 文件。 102 | 103 | ## 许可证 104 | 105 | 本项目使用 [MIT](/license) 许可证。 106 | -------------------------------------------------------------------------------- /components/ChatWelcome.vue: -------------------------------------------------------------------------------- 1 | 23 | 24 | 68 | -------------------------------------------------------------------------------- /docs/README-JA.md: -------------------------------------------------------------------------------- 1 | # chatgpt-nuxt 2 | 3 | ![preview](/assets/preview-ja.png) 4 | 5 | [ENGLISH](/README.md) | [简体中文](/docs/README-CN.md) | 日本語 6 | 7 | OpenAI 社の [ChatGPT](https://openai.com/blog/chatgpt) および [DALL·E](https://openai.com/dall-e-2) の API を使用するために [Nuxt 3](https://nuxt.com/) で実装したフロントエンドアプリケーションです。 8 | 9 | ## 対応 API 10 | 11 | - [OpenAI API](https://openai.com/blog/openai-api) 12 | - [Azure Open AI Service API](https://learn.microsoft.com/ja-jp/azure/cognitive-services/openai/reference) 13 | 14 | ## 対応モデル 15 | 16 | - Chat completion 17 | - gpt-4 18 | - gpt-3.5-turbo 19 | - Image generation 20 | - DALL·E 21 | 22 | ## セットアップ 23 | 24 | 最初に全ての依存関係をインストールします。 25 | 26 | ```bash 27 | npm i 28 | # or 29 | yarn 30 | ``` 31 | 32 | ## 開発サーバーの起動 33 | 34 | 以下のコマンドで開発用のサーバーをローカルで起動すると で動作確認が可能です。 35 | 36 | ```bash 37 | npm run dev 38 | # or 39 | yarn dev 40 | ``` 41 | 42 | ## プロダクション・ビルド 43 | 44 | プロダクション用にビルドする場合は次のコマンドを実行します。 45 | 46 | ```bash 47 | npm run build 48 | # or 49 | yarn build 50 | ``` 51 | 52 | プロダクション・ビルドをローカルでプレビューするには、以下のコマンドを実行します。 53 | 54 | ```bash 55 | npm run preview 56 | # or 57 | yarn preview 58 | ``` 59 | 60 | ## デプロイ 61 | 62 | クイックデプロイメントのためのワンライナーコマンド: 63 | 64 | ```bash 65 | docker run -d \ 66 | -p 80:3000 \ 67 | --restart unless-stopped \ 68 | --name chatgpt-nuxt \ 69 | lianginx/chatgpt-nuxt:latest 70 | ``` 71 | 72 | Docker Compose を用いてデプロイが可能です。 73 | 74 | ```bash 75 | version: "3" 76 | services: 77 | chatgpt-nuxt: 78 | image: lianginx/chatgpt-nuxt:latest 79 | ports: 80 | - 80:3000 81 | restart: unless-stopped 82 | ``` 83 | 84 | ```bash 85 | docker-compose up -d # バックグラウンドで起動 86 | docker-compose stop # 停止 87 | docker-compose down # 停止&削除 88 | ``` 89 | 90 | デプロイが完了すると `80` 番ポートで接続できるようになります。 91 | 92 | この README ドキュメントを改善するためのフィードバックや提案がある場合は、遠慮なくお知らせください。あなたのご意見を大切にしています。 93 | 94 | ## 設定 95 | 96 | このアプリケーションは以下の 2 つのいずれかの方法で設定可能です: 97 | 98 | - アプリケーション上の設定画面で設定する 99 | - あらかじめ環境変数に設定を定義しておく 100 | 101 | 環境変数を設定する場合は、[`.env.example`](/.env.example) を参考にして作成した `.env` ファイルをルートディレクトリに配置して下さい。 102 | 103 | ## ライセンス 104 | 105 | このプロジェクトは [MIT](/LICENSE) ライセンスを使用しています。 106 | -------------------------------------------------------------------------------- /components/ChatMessageLoding.vue: -------------------------------------------------------------------------------- 1 | 16 | 17 | 115 | -------------------------------------------------------------------------------- /server/api/models.get.ts: -------------------------------------------------------------------------------- 1 | import { RequestHeaders } from "h3"; 2 | import { ApiType } from "@/types"; 3 | import { errorHandler, setResStatus } from "~/server/utils/h3"; 4 | import { getOpenAIApiInstance } from "../utils/openai"; 5 | 6 | const runtimeConfig = useRuntimeConfig(); 7 | 8 | export default defineEventHandler(async (event) => { 9 | try { 10 | const headers = getHeaders(event); 11 | const response = await listModels(headers); 12 | 13 | setResStatus(event, response.status, response.statusText); 14 | return response.data; 15 | } catch (e: any) { 16 | return await errorHandler(event, e); 17 | } 18 | }); 19 | 20 | async function listModels(headers: RequestHeaders) { 21 | const openai = getOpenAIApiInstance("models", headers); 22 | 23 | const useEnv = runtimeConfig.public.useEnv === "yes"; 24 | const apiType = useEnv 25 | ? runtimeConfig.public.apiType 26 | : (headers["x-api-type"] as ApiType); 27 | const azureGpt4DeploymentId = useEnv 28 | ? runtimeConfig.azureGpt4DeploymentId 29 | : headers["x-azure-gpt4-deployment-id"]!; 30 | 31 | switch (apiType) { 32 | // Fetch available models from OpenAI API 33 | case "openai": 34 | return openai.listModels(); 35 | 36 | // Generate response compatible with the list of models response from the OpenAI API. 37 | case "azure": 38 | const gpt35ModelData = { 39 | id: "gpt-3.5-turbo", 40 | object: "model", 41 | owned_by: "", 42 | permission: [], 43 | }; 44 | const gpt4ModelData = { 45 | id: "gpt-4", 46 | object: "model", 47 | owned_by: "", 48 | permission: [], 49 | }; 50 | const availableModels = [gpt35ModelData]; 51 | if (azureGpt4DeploymentId) { 52 | availableModels.push(gpt4ModelData); 53 | } 54 | 55 | // Generate response compatible with openai.ListModelsResponse 56 | const responseData = { 57 | data: availableModels, 58 | object: "list", 59 | }; 60 | 61 | // Generate response compatible with AxiosResponse 62 | return { 63 | data: responseData, 64 | status: 200, 65 | statusText: "OK", 66 | config: {}, 67 | request: {}, 68 | }; 69 | 70 | // Unknown API Type 71 | default: 72 | // Generate error response compatible with AxiosResponse 73 | return { 74 | data: {}, 75 | status: 400, 76 | statusText: "Bad Request", 77 | config: {}, 78 | request: {}, 79 | }; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /components/ChatList.vue: -------------------------------------------------------------------------------- 1 | 56 | 57 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /components/HotKeyHelp.vue: -------------------------------------------------------------------------------- 1 | 40 | 41 | 80 | 81 | 86 | -------------------------------------------------------------------------------- /server/utils/azure.ts: -------------------------------------------------------------------------------- 1 | import { CreateImageRequest, ImagesResponse, OpenAIApi } from "openai"; 2 | import { AxiosRequestConfig, AxiosResponse } from "axios"; 3 | 4 | const pollingInterval = 1000; 5 | 6 | export class AzureOpenAIApi extends OpenAIApi { 7 | public createImage( 8 | createImageRequest: CreateImageRequest, 9 | options?: AxiosRequestConfig 10 | ) { 11 | const createImageUrl = 12 | (this.configuration?.basePath || this.basePath) + 13 | "/images/generations:submit"; 14 | 15 | const axiosRequestConfig: AxiosRequestConfig = { 16 | ...options, 17 | ...this.configuration?.baseOptions, 18 | responseType: "json", 19 | }; 20 | 21 | return this.axios 22 | .post(createImageUrl, createImageRequest, axiosRequestConfig) 23 | .then((response) => { 24 | const operationId = response.data.id; 25 | return this.getImage(operationId, options); 26 | }) 27 | .catch((error) => { 28 | return error; 29 | }); 30 | } 31 | 32 | public getImage(operationId: string, options?: AxiosRequestConfig) { 33 | const getImageUrl = 34 | (this.configuration?.basePath || this.basePath) + 35 | "/operations/images/" + 36 | operationId; 37 | 38 | const axiosRequestConfig: AxiosRequestConfig = { 39 | ...options, 40 | ...this.configuration?.baseOptions, 41 | responseType: "json", 42 | }; 43 | 44 | return new Promise>( 45 | (resolve, reject) => { 46 | const intervalId = setInterval(() => { 47 | // Call the image retrieval API 48 | this.axios 49 | .get(getImageUrl, axiosRequestConfig) 50 | .then((response) => { 51 | // If it is the termination status, end polling 52 | const finishedStatus = [ 53 | "succeeded", 54 | "canceled", 55 | "failed", 56 | "deleted", 57 | ]; 58 | if (finishedStatus.includes(response.data.status)) { 59 | clearInterval(intervalId); 60 | if (response.data.result) { 61 | // Return only data that complies with the OpenAI API specifications. 62 | response.data = response.data.result; 63 | } 64 | resolve(response); 65 | } 66 | }) 67 | .catch((error) => { 68 | // If an error occurs, reject the Promise 69 | clearInterval(intervalId); 70 | reject(error); 71 | }); 72 | }, pollingInterval); 73 | } 74 | ); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # chatgpt-nuxt 2 | 3 | ![preview](/assets/preview-en.png) 4 | 5 | ENGLISH | [简体中文](/docs/README-CN.md) | [日本語](/docs/README-JA.md) 6 | 7 | This is a frontend application implemented in [Nuxt 3](https://nuxt.com/) for OpenAI's [ChatGPT](https://openai.com/blog/chatgpt) and [DALL·E](https://openai.com/dall-e-2) API. 8 | 9 | ## Supported APIs 10 | 11 | - [OpenAI API](https://openai.com/blog/openai-api) 12 | - [Azure Open AI Service API](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference) 13 | 14 | ## Supported Models 15 | 16 | - Chat completion 17 | - gpt-4 18 | - gpt-3.5-turbo 19 | - Image generation 20 | - DALL·E 21 | 22 | ## Setup 23 | 24 | To begin, please ensure you have installed all dependencies: 25 | 26 | ```bash 27 | npm i 28 | # or 29 | yarn 30 | # or 31 | pnpm i 32 | ``` 33 | 34 | ## Development Server 35 | 36 | Launch the development server and view it at : 37 | 38 | ```bash 39 | npm run dev 40 | # or 41 | yarn dev 42 | # or 43 | pnpm dev 44 | ``` 45 | 46 | ## Production 47 | 48 | To build your application for production, execute: 49 | 50 | ```bash 51 | npm run build 52 | # or 53 | yarn build 54 | # or 55 | pnpm build 56 | ``` 57 | 58 | Preview the production build locally with the following command: 59 | 60 | ```bash 61 | npm run preview 62 | # or 63 | yarn preview 64 | # or 65 | pnpm preview 66 | ``` 67 | 68 | ## Deploy 69 | 70 | One-line command for quick deployment: 71 | 72 | ```bash 73 | docker run -d \ 74 | -p 80:3000 \ 75 | --restart unless-stopped \ 76 | --name chatgpt-nuxt \ 77 | lianginx/chatgpt-nuxt:latest 78 | ``` 79 | 80 | To deploy using Docker Compose: 81 | 82 | ```bash 83 | version: "3" 84 | services: 85 | chatgpt-nuxt: 86 | image: lianginx/chatgpt-nuxt:latest 87 | ports: 88 | - 80:3000 89 | restart: unless-stopped 90 | ``` 91 | 92 | ```bash 93 | docker-compose up -d # Start and run in the background. 94 | docker-compose stop # Stop 95 | docker-compose down # Stop and delete 96 | ``` 97 | 98 | When completed, your project will be deployed onto port `80`. 99 | 100 | If you have any feedback or suggestions for improving this README document, please don’t hesitate to let me know. I appreciate your input. 101 | 102 | ## Configuration 103 | 104 | There are two ways to configure the application: 105 | 106 | - setting it up using the configuration screen on the application. 107 | - setting it up by using environment variables in advance. 108 | 109 | If you want to set environment variables, please refer to [`.env.example`](/.env.example) and place a `.env` file in the root directory. 110 | 111 | ## License 112 | 113 | This project uses the [MIT](/LICENSE) license. 114 | -------------------------------------------------------------------------------- /server/utils/openai.ts: -------------------------------------------------------------------------------- 1 | import { RequestHeaders } from "h3"; 2 | import { aesCrypto } from "~/server/api/crypto.post"; 3 | import { Configuration, CreateChatCompletionRequest, OpenAIApi } from "openai"; 4 | import { ApiRequest, ApiRequestModel, ApiType, ChatModel } from "~/types"; 5 | import { createAxiosInstance } from "./axios"; 6 | import { AzureOpenAIApi } from "./azure"; 7 | 8 | const runtimeConfig = useRuntimeConfig(); 9 | 10 | function createOpenAIConfiguration( 11 | model: ApiRequestModel, 12 | headers: RequestHeaders, 13 | body?: ApiRequest 14 | ) { 15 | const useEnv = runtimeConfig.public.useEnv === "yes"; 16 | 17 | const apiType = useEnv 18 | ? runtimeConfig.public.apiType 19 | : (headers["x-api-type"] as ApiType); 20 | const apiKey = useEnv 21 | ? runtimeConfig.apiKey 22 | : aesCrypto({ message: headers["x-cipher-api-key"]!, type: "de" }); 23 | const apiHost = useEnv ? runtimeConfig.apiHost : headers["x-api-host"]; 24 | const azureApiVersion = useEnv 25 | ? runtimeConfig.azureApiVersion 26 | : headers["x-azure-api-version"]; 27 | const azureGpt35DeploymentId = useEnv 28 | ? runtimeConfig.azureGpt35DeploymentId 29 | : headers["x-azure-gpt35-deployment-id"]!; 30 | const azureGpt4DeploymentId = useEnv 31 | ? runtimeConfig.azureGpt4DeploymentId 32 | : headers["x-azure-gpt4-deployment-id"]!; 33 | 34 | // Identify the basePath of the Azure OpenAI Service from the OpenAI model name 35 | let basePath = `${apiHost}/openai`; 36 | if (model === "chat") { 37 | let azureDeploymentId = ""; 38 | switch ((body as CreateChatCompletionRequest).model as ChatModel) { 39 | case "gpt-3.5-turbo": 40 | azureDeploymentId = azureGpt35DeploymentId; 41 | break; 42 | case "gpt-4": 43 | azureDeploymentId = azureGpt4DeploymentId; 44 | break; 45 | } 46 | basePath += `/deployments/${azureDeploymentId}`; 47 | } else if (model === "text") { 48 | // TODO: Support completion model 49 | } 50 | 51 | const azureOptions = 52 | apiType === "azure" 53 | ? { 54 | basePath, 55 | baseOptions: { 56 | headers: { "api-key": apiKey }, 57 | params: { 58 | "api-version": azureApiVersion, 59 | }, 60 | }, 61 | } 62 | : {}; 63 | 64 | return new Configuration({ 65 | apiKey, 66 | ...azureOptions, 67 | }); 68 | } 69 | 70 | export function getOpenAIApiInstance( 71 | model: ApiRequestModel, 72 | headers: RequestHeaders, 73 | body?: ApiRequest 74 | ) { 75 | const configuration = createOpenAIConfiguration(model, headers, body); 76 | const axiosInstance = createAxiosInstance(); 77 | 78 | const useEnv = runtimeConfig.public.useEnv === "yes"; 79 | const apiType = useEnv 80 | ? runtimeConfig.public.apiType 81 | : (headers["x-api-type"] as ApiType); 82 | 83 | if (apiType === "azure") { 84 | return new AzureOpenAIApi(configuration, undefined, axiosInstance); 85 | } else { 86 | return new OpenAIApi(configuration, undefined, axiosInstance); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /components/ChatTitleBar.vue: -------------------------------------------------------------------------------- 1 | 53 | 54 | 100 | 101 | 106 | -------------------------------------------------------------------------------- /pages/index.vue: -------------------------------------------------------------------------------- 1 | 16 | 17 | 141 | 142 | 149 | -------------------------------------------------------------------------------- /components/ChatSendBar.vue: -------------------------------------------------------------------------------- 1 | 50 | 51 | 138 | 139 | 148 | -------------------------------------------------------------------------------- /locales/zh.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": { 3 | "title": "ChatGPT", 4 | "description": "基于 OpenAI 的 ChatGPT 自然语言模型人工智能对话" 5 | }, 6 | "BaseInput": { 7 | "placeholder": "请输入" 8 | }, 9 | "ChatList": { 10 | "conversations": "对话", 11 | "images": "图片" 12 | }, 13 | "ChatSendBar": { 14 | "placeholder": "输入消息,Enter 发送,Shift + Enter 换行", 15 | "imageN": "图片数量", 16 | "imageSize": "图片大小" 17 | }, 18 | "ChatSendButton": { 19 | "label": "发送" 20 | }, 21 | "ChatSetting": { 22 | "apiType": "API 类型", 23 | "apiKey": { 24 | "label": "API Key", 25 | "placeholder": "请输入" 26 | }, 27 | "apiHost": { 28 | "label": "API Endpoint", 29 | "placeholder": "https://YOUR_RESOURCE_NAME.openai.azure.com/" 30 | }, 31 | "azureGpt35DeploymentId": { 32 | "label": "GPT-3.5模型的部署名称", 33 | "placeholder": "gpt-35-turbo" 34 | }, 35 | "azureGpt4DeploymentId": { 36 | "label": "GPT-4模型的部署名称", 37 | "placeholder": "gpt-4" 38 | }, 39 | "azureApiVersion": { 40 | "label": "Azure OpenAI Service 的 API 版本", 41 | "placeholder": "2023-06-01-preview" 42 | }, 43 | "temperature": "temperature", 44 | "language": "语言", 45 | "colorMode": { 46 | "label": "配色", 47 | "system": "系统", 48 | "light": "浅色", 49 | "dark": "深色" 50 | }, 51 | "save": "保存", 52 | "back": "返回", 53 | "initialMessage": "嘿!能听到我说话吗?" 54 | }, 55 | "ChatStop": { 56 | "label": "停止回答", 57 | "message": "已停止回答" 58 | }, 59 | "ChatTitleBar": { 60 | "initialTitle": "闲聊", 61 | "clearMessages": { 62 | "confirm": "是否清空聊天记录?" 63 | } 64 | }, 65 | "ChatWelcome": { 66 | "tool": { 67 | "title": "效率工具", 68 | "examples": [ 69 | { 70 | "title": "如何使用 Javascript 发出 HTTP 请求?", 71 | "message": { 72 | "role": "user", 73 | "content": "如何使用 Javascript 发出 HTTP 请求?" 74 | } 75 | }, 76 | { 77 | "title": "翻译:Hello, Happy World!", 78 | "message": { 79 | "role": "user", 80 | "content": "翻译:Hello, Happy World!" 81 | } 82 | }, 83 | { 84 | "title": "变量命名:获取用户信息", 85 | "message": { 86 | "role": "system", 87 | "content": "变量命名:获取用户信息" 88 | } 89 | }, 90 | { 91 | "title": "Unsplash 图片生成器", 92 | "message": { 93 | "role": "system", 94 | "content": "请使用'![image]https://source.unsplash.com/featured/?<已翻译的英文内容>'格式回复,并追加原始链接,不要使用代码块,不要描述其他内容,不要解释,根据我输入的内容生成对应格式;如果你理解了请回复:请告诉我你需要什么图片?" 95 | } 96 | } 97 | ] 98 | }, 99 | "rolePlaying": { 100 | "title": "角色扮演", 101 | "examples": [ 102 | { 103 | "title": "充当英语翻译和改进者", 104 | "message": { 105 | "role": "system", 106 | "content": "我希望你能担任英语翻译、拼写校对和修辞改进的角色。我会用任何语言和你交流,你会识别语言,将其翻译并用更为优美和精炼的英语回答我。请将我简单的词汇和句子替换成更为优美和高雅的表达方式,确保意思不变,但使其更具文学性。请仅回答更正和改进的部分,不要写解释。如果你理解,请回复:OK!" 107 | } 108 | }, 109 | { 110 | "title": "扮演 Yann LeCun —— 编程/算法设计专家", 111 | "message": { 112 | "role": "system", 113 | "content": "你是Yann LeCun的AI克隆版,你是编程和算法设计方面的专家。记住给你出这个问题的人是Yann LeCun,他对像你这样的人工智能的力量非常怀疑。" 114 | } 115 | }, 116 | { 117 | "title": "扮演一名心理医生", 118 | "message": { 119 | "role": "system", 120 | "content": "我想让你担任心理医生。我将为您提供一个寻求指导和建议的人,以管理他们的情绪、压力、焦虑和其他心理健康问题。您应该利用您的认知行为疗法、冥想技巧、正念练习和其他治疗方法的知识来制定个人可以实施的策略,以改善他们的整体健康状况。如果你理解了,请回复“好的,我们能聊聊吗?”" 121 | } 122 | }, 123 | { 124 | "title": "扮演塔罗占卜师", 125 | "message": { 126 | "role": "system", 127 | "content": "我请求你担任塔罗占卜师的角色。 您将接受我的问题并使用虚拟塔罗牌进行塔罗牌阅读。 不要忘记洗牌并介绍您在本套牌中使用的套牌。 问我给3个号要不要自己抽牌? 如果没有,请帮我抽随机卡。 拿到卡片后,请您仔细说明它们的意义,解释哪张卡片属于未来或现在或过去,结合我的问题来解释它们,并给我有用的建议或我现在应该做的事情。" 128 | } 129 | } 130 | ] 131 | }, 132 | "casualChat": { 133 | "title": "轻松闲聊", 134 | "examples": [ 135 | { 136 | "title": "有没有关于10岁生日的创意?", 137 | "message": { 138 | "role": "user", 139 | "content": "有没有关于10岁生日的创意?" 140 | } 141 | }, 142 | { 143 | "title": "苏格拉底是一个什么样的人?", 144 | "message": { 145 | "role": "user", 146 | "content": "苏格拉底是一个什么样的人?" 147 | } 148 | }, 149 | { 150 | "title": "番茄牛腩怎么做?", 151 | "message": { 152 | "role": "user", 153 | "content": "番茄牛腩怎么做?" 154 | } 155 | }, 156 | { 157 | "title": "给我讲个故事吧", 158 | "message": { 159 | "role": "user", 160 | "content": "给我讲个故事吧" 161 | } 162 | } 163 | ] 164 | } 165 | }, 166 | "CopyText": { 167 | "copyAll": "复制全文", 168 | "copySuccessful": "复制成功" 169 | }, 170 | "FuncBar": { 171 | "chat": "新建聊天", 172 | "image": "新图片", 173 | "setting": "设置", 174 | "github": "查看项目" 175 | }, 176 | "HotKeyHelp": { 177 | "title": "全局快捷键", 178 | "close": "我知道了!", 179 | "newChat": "新建聊天", 180 | "deleteChat": "删除聊天", 181 | "newTopic": "开始新话题", 182 | "clearChat": "清空聊天记录" 183 | }, 184 | "titlePrompt": "请用相同语言内限定10个字以内总结上面的内容作为标题,不要使用符号,开始总结:", 185 | "newTopicAlert": "已开始新话题,历史消息不参与本次对话!", 186 | "removeChatConfirm": "确认删除当前会话?" 187 | } 188 | -------------------------------------------------------------------------------- /locales/ja.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": { 3 | "title": "ChatGPT", 4 | "description": "OpenAIのChatGPT自然言語モデルを用いたチャット" 5 | }, 6 | "BaseInput": { 7 | "placeholder": "入力してください" 8 | }, 9 | "ChatList": { 10 | "conversations": "会話", 11 | "images": "画像" 12 | }, 13 | "ChatSendBar": { 14 | "placeholder": "メッセージを入力して [Enter] で送信、 [Shift] + [Enter] で改行します。", 15 | "imageN": "枚数", 16 | "imageSize": "サイズ" 17 | }, 18 | "ChatSendButton": { 19 | "label": "送信" 20 | }, 21 | "ChatSetting": { 22 | "apiType": "API 種別", 23 | "apiKey": { 24 | "label": "API キー", 25 | "placeholder": "入力してください" 26 | }, 27 | "apiHost": { 28 | "label": "API エンドポイント", 29 | "placeholder": "https://YOUR_RESOURCE_NAME.openai.azure.com/" 30 | }, 31 | "azureGpt35DeploymentId": { 32 | "label": "GPT-3.5モデルのデプロイ名", 33 | "placeholder": "gpt-35-turbo" 34 | }, 35 | "azureGpt4DeploymentId": { 36 | "label": "GPT-4モデルのデプロイ名", 37 | "placeholder": "gpt-4" 38 | }, 39 | "azureApiVersion": { 40 | "label": "Azure OpenAI Service の API バージョン", 41 | "placeholder": "2023-06-01-preview" 42 | }, 43 | "temperature": "temperature", 44 | "language": "言語", 45 | "colorMode": { 46 | "label": "配色", 47 | "system": "システム", 48 | "light": "ライト", 49 | "dark": "ダーク" 50 | }, 51 | "save": "保存", 52 | "back": "戻る", 53 | "initialMessage": "こんにちは、私の声が聞こえますか?" 54 | }, 55 | "ChatStop": { 56 | "label": "回答停止", 57 | "message": "回答の生成が停止されました" 58 | }, 59 | "ChatTitleBar": { 60 | "initialTitle": "おしゃべり", 61 | "clearMessages": { 62 | "confirm": "チャット履歴をクリアしますか?" 63 | } 64 | }, 65 | "ChatWelcome": { 66 | "tool": { 67 | "title": "ツール", 68 | "examples": [ 69 | { 70 | "title": "JavascriptでHTTPリクエストを送信する方法は?", 71 | "message": { 72 | "role": "user", 73 | "content": "JavascriptでHTTPリクエストを送信する方法は?" 74 | } 75 | }, 76 | { 77 | "title": "翻訳: Hello, Happy World!", 78 | "message": { 79 | "role": "user", 80 | "content": "翻訳: Hello, Happy World!" 81 | } 82 | }, 83 | { 84 | "title": "変数名付け:ユーザー情報の取得", 85 | "message": { 86 | "role": "system", 87 | "content": "変数名付け:ユーザー情報の取得" 88 | } 89 | }, 90 | { 91 | "title": "Unsplash画像ジェネレーター", 92 | "message": { 93 | "role": "system", 94 | "content": "あなたは与えられた文字列をもとに'![image]https://source.unsplash.com/featured/?<入力を英語に翻訳した文字列>'の形式で返信し、元のリンクを追加してください。コードブロックは使用しないでください。他の内容を説明しないでください。入力された内容に基づいて対応する形式を生成してください。まずは「どのような画像が必要ですか?」というメッセージで開始して下さい。" 95 | } 96 | } 97 | ] 98 | }, 99 | "rolePlaying": { 100 | "title": "ロールプレイ", 101 | "examples": [ 102 | { 103 | "title": "英語の翻訳と改善を担当する", 104 | "message": { 105 | "role": "system", 106 | "content": "私はあなたに英語の翻訳、スペルチェック、修辞的改善を担当してもらいたいと思っています。私はどんな言語でもあなたとコミュニケーションを取ることができますが、あなたはそれを認識し、より優れた英語に翻訳して返答することができます。私の単純な用語や文をより優美かつ洗練された表現に置き換えてください。意味は変わらず、文学性が高まるようにしてください。修正や改善部分だけ回答してください。解釈は書かないでください。「OK」と理解した場合は返信してください。" 107 | } 108 | }, 109 | { 110 | "title": "Yann LeCunを演じる —— プログラミング/アルゴリズム設計の専門家", 111 | "message": { 112 | "role": "system", 113 | "content": "あなたはYann LeCunのAIクローン版であり、プログラミングやアルゴリズムの設計において専門家です。あなたにこの質問を出した人がYann LeCunであり、彼はあなたのような人工知能の力に非常に懐疑的です。" 114 | } 115 | }, 116 | { 117 | "title": "心理学者を演じる", 118 | "message": { 119 | "role": "system", 120 | "content": "私はあなたに心理医師の役割を担ってもらいたいです。あなたに相談し、アドバイスを求める人を提供します。彼らの感情、ストレス、不安などの心理的健康問題を管理するために、あなたは認知行動療法、瞑想技法、マインドフルネス練習などの知識を活用して、個人が実施できる戦略を策定する必要があります。全体的な健康状態を改善するためです。理解できた場合は、「OK、話しましょうか?」と返信してください。" 121 | } 122 | }, 123 | { 124 | "title": "タロット占い師を演じる", 125 | "message": { 126 | "role": "system", 127 | "content": "あなたにはタロット占い師の役割をお願いしたいと思います。 私の質問を受け取り、仮想のタロットカードを使用してタロット占いを行ってください。 シャッフルを忘れずに行い、使用するデッキを紹介してください。私に3つの数字を与えて、カードを引いてもよいか尋ねます。もしそうでなければ、ランダムにカードを引いてください。カードを受け取った後、その意味を詳しく説明し、どのカードが未来、現在、または過去に属するかを説明し、私の質問に合わせてそれらを説明し、有用なアドバイスや私が今すべきことを教えてください。" 128 | } 129 | } 130 | ] 131 | }, 132 | "casualChat": { 133 | "title": "おしゃべり", 134 | "examples": [ 135 | { 136 | "title": "10歳の誕生日のアイデアはありますか?", 137 | "message": { 138 | "role": "user", 139 | "content": "10歳の誕生日のアイデアはありますか?" 140 | } 141 | }, 142 | { 143 | "title": "ソクラテスはどのような人物でしたか?", 144 | "message": { 145 | "role": "user", 146 | "content": "ソクラテスはどのような人物でしたか?" 147 | } 148 | }, 149 | { 150 | "title": "牛ブリスケットのトマト煮の作り方", 151 | "message": { 152 | "role": "user", 153 | "content": "牛ブリスケットのトマト煮の作り方" 154 | } 155 | }, 156 | { 157 | "title": "お話を聞かせてください。", 158 | "message": { 159 | "role": "user", 160 | "content": "お話を聞かせてください。" 161 | } 162 | } 163 | ] 164 | } 165 | }, 166 | "CopyText": { 167 | "copyAll": "すべてコピー", 168 | "copySuccessful": "コピー成功" 169 | }, 170 | "FuncBar": { 171 | "chat": "新規チャット", 172 | "image": "新規イメージ", 173 | "setting": "設定", 174 | "github": "プロジェクト" 175 | }, 176 | "HotKeyHelp": { 177 | "title": "ショートカットキー", 178 | "close": "わかりました!", 179 | "newChat": "新しいチャットを作成する", 180 | "deleteChat": "チャットを削除する", 181 | "newTopic": "新しいトピックを始める", 182 | "clearChat": "チャット履歴をクリアする" 183 | }, 184 | "titlePrompt": "上記の内容を入力と同一の言語で10語以内で要約してください。記号は使用しないでください。要約を始めてください:", 185 | "newTopicAlert": "新しいトピックが開始されました。今後の会話では過去の会話は考慮されません。", 186 | "removeChatConfirm": "このチャットを削除してもよろしいですか?" 187 | } 188 | -------------------------------------------------------------------------------- /components/ChatSetting.vue: -------------------------------------------------------------------------------- 1 | 133 | 134 | 190 | 191 | 218 | -------------------------------------------------------------------------------- /locales/en.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": { 3 | "title": "ChatGPT", 4 | "description": "AI conversations based on the ChatGPT natural language model from OpenAI" 5 | }, 6 | "BaseInput": { 7 | "placeholder": "Pleas input." 8 | }, 9 | "ChatList": { 10 | "conversations": "Conversations", 11 | "images": "Images" 12 | }, 13 | "ChatSendBar": { 14 | "placeholder": "Type a message and press [Enter] to send, press [Shift]+[Enter] to start a new line.", 15 | "imageN": "Number of images", 16 | "imageSize": "Size of image" 17 | }, 18 | "ChatSendButton": { 19 | "label": "Send" 20 | }, 21 | "ChatSetting": { 22 | "apiType": "API Type", 23 | "apiKey": { 24 | "label": "API Key", 25 | "placeholder": "Pleas input." 26 | }, 27 | "apiHost": { 28 | "label": "API Endpoint", 29 | "placeholder": "https://YOUR_RESOURCE_NAME.openai.azure.com/" 30 | }, 31 | "azureGpt35DeploymentId": { 32 | "label": "Deployment name of the GPT-3.5 model", 33 | "placeholder": "gpt-35-turbo" 34 | }, 35 | "azureGpt4DeploymentId": { 36 | "label": "Deployment name of the GPT-4 model", 37 | "placeholder": "gpt-4" 38 | }, 39 | "azureApiVersion": { 40 | "label": "API version of Azure OpenAI Service", 41 | "placeholder": "2023-06-01-preview" 42 | }, 43 | "temperature": "temperature", 44 | "language": "Language", 45 | "colorMode": { 46 | "label": "Color", 47 | "system": "System", 48 | "light": "Light", 49 | "dark": "Dark" 50 | }, 51 | "save": "Save", 52 | "back": "Back", 53 | "initialMessage": "Hey! Can you hear me?" 54 | }, 55 | "ChatStop": { 56 | "label": "Stop Generating", 57 | "message": "Generating stopped" 58 | }, 59 | "ChatTitleBar": { 60 | "initialTitle": "Talk", 61 | "clearMessages": { 62 | "confirm": "Are you sure you want to clear the chat history?" 63 | } 64 | }, 65 | "ChatWelcome": { 66 | "tool": { 67 | "title": "Tool", 68 | "examples": [ 69 | { 70 | "title": "How to send HTTP requests using Javascript?", 71 | "message": { 72 | "role": "user", 73 | "content": "How to send HTTP requests using Javascript?" 74 | } 75 | }, 76 | { 77 | "title": "Translation: 你好,快乐的世界!", 78 | "message": { 79 | "role": "user", 80 | "content": "Translation: 你好,快乐的世界!" 81 | } 82 | }, 83 | { 84 | "title": "Variable Naming: Retrieving User Information.", 85 | "message": { 86 | "role": "system", 87 | "content": "Variable Naming: Retrieving User Information." 88 | } 89 | }, 90 | { 91 | "title": "Unsplash Image Generator", 92 | "message": { 93 | "role": "system", 94 | "content": "Please reply using the format of '![image]https://source.unsplash.com/featured/?' and append the original link. Do not use code blocks, do not describe other content, and do not explain. Generate the corresponding format based on what I enter. If you understand, please reply with 'Please tell me what image you need?'" 95 | } 96 | } 97 | ] 98 | }, 99 | "rolePlaying": { 100 | "title": "Role Playing", 101 | "examples": [ 102 | { 103 | "title": "Act as an English translator and improver", 104 | "message": { 105 | "role": "system", 106 | "content": "I hope you can take on the role of English translator, spelling checker and rhetoric improver. I will communicate with you in any language, and you will recognize the language, translate it, and respond to me in more elegant and refined English. Please replace my simple vocabulary and sentences with more beautiful and graceful expressions, ensuring that the meaning remains unchanged but is more literary. Please only answer the corrected and improved parts without writing explanations. If you understand, please reply: OK!" 107 | } 108 | }, 109 | { 110 | "title": "Play as Yann LeCun - Programming/Algorithm Design Expert", 111 | "message": { 112 | "role": "system", 113 | "content": "You are an AI clone of Yann LeCun, an expert in programming and algorithm design. Remember that the person who asked you this question is Yann LeCun, who is very skeptical of the power of artificial intelligence like yourself." 114 | } 115 | }, 116 | { 117 | "title": "Play the role of a psychologist", 118 | "message": { 119 | "role": "system", 120 | "content": "I want you to play the role of a psychologist. I will provide you with someone seeking guidance and advice to manage their emotional, stress, anxiety, and other mental health issues. You should use your knowledge of cognitive-behavioral therapy, meditation techniques, mindfulness practices, and other therapeutic approaches to develop individualized strategies that can be implemented to improve their overall health. If you understand, please reply with 'Okay, can we talk?'" 121 | } 122 | }, 123 | { 124 | "title": "Play the role of a tarot card reader.", 125 | "message": { 126 | "role": "system", 127 | "content": "I would like you to play the role of a tarot reader. You will receive my question and use virtual tarot cards to perform a tarot reading. Don't forget to shuffle and introduce the deck you will be using. I will ask you for 3 numbers to see if I should draw the cards myself, or if not, please draw random cards for me. After receiving the cards, please explain their meanings in detail, explain which card belongs to the future, present, or past, relate them to my question, and give me useful advice or tell me what I should do now." 128 | } 129 | } 130 | ] 131 | }, 132 | "casualChat": { 133 | "title": "Casual Chat", 134 | "examples": [ 135 | { 136 | "title": "Do you have any ideas for a 10th birthday?", 137 | "message": { 138 | "role": "user", 139 | "content": "Do you have any ideas for a 10th birthday?" 140 | } 141 | }, 142 | { 143 | "title": "What kind of person was Socrates?", 144 | "message": { 145 | "role": "user", 146 | "content": "What kind of person was Socrates?" 147 | } 148 | }, 149 | { 150 | "title": "How to make tomato beef brisket?", 151 | "message": { 152 | "role": "user", 153 | "content": "How to make tomato beef brisket?" 154 | } 155 | }, 156 | { 157 | "title": "Tell me a story, please.", 158 | "message": { 159 | "role": "user", 160 | "content": "Tell me a story, please." 161 | } 162 | } 163 | ] 164 | } 165 | }, 166 | "CopyText": { 167 | "copyAll": "Copy All", 168 | "copySuccessful": "Copy Successful" 169 | }, 170 | "FuncBar": { 171 | "chat": "New Chat", 172 | "image": "New Image", 173 | "setting": "Setting", 174 | "github": "Project" 175 | }, 176 | "HotKeyHelp": { 177 | "title": "Global shortcut keys", 178 | "close": "Got it!", 179 | "newChat": "New Chat", 180 | "deleteChat": "Delete Chat", 181 | "newTopic": "Start a new topic", 182 | "clearChat": "Clear chat history" 183 | }, 184 | "titlePrompt": "Summarize the above content within 10 words in the same language as input, and use it as the title. Do not use symbols. Begin summarizing.", 185 | "newTopicAlert": "New topic has started, history messages will not be involved in this conversation!", 186 | "removeChatConfirm": "Are you sure you want to delete this chat?" 187 | } 188 | -------------------------------------------------------------------------------- /stores/chat.ts: -------------------------------------------------------------------------------- 1 | import { defineStore } from "pinia"; 2 | import { 3 | ApiRequest, 4 | ChatItem, 5 | ChatMessageExItem, 6 | ChatMessageExOption, 7 | ChatModel, 8 | ChatOption, 9 | ChatSettingItem, 10 | ImageSize, 11 | } from "@/types"; 12 | import { 13 | CreateChatCompletionRequest, 14 | CreateChatCompletionResponse, 15 | CreateImageRequest, 16 | ImagesResponse, 17 | ListModelsResponse, 18 | Model, 19 | } from "openai"; 20 | 21 | export const useChatStore = defineStore("chat", () => { 22 | const decoder = new TextDecoder("utf-8"); 23 | const db = new ChatDB(); 24 | 25 | const i18n = useI18n(); 26 | 27 | let controller: AbortController; 28 | 29 | const showSetting = ref(false); 30 | const showHelp = ref(false); 31 | 32 | const models = ref([]); 33 | 34 | const chats = ref([]); 35 | const chat = ref(); 36 | const messages = ref([]); 37 | const messageContent = ref(""); 38 | const talkingChats = ref(new Set([])); 39 | 40 | const imageN = ref(1); 41 | const imageSize = ref("256x256"); 42 | 43 | // talking 44 | 45 | const talking = computed( 46 | () => talkingChats.value.has(chat.value?.id ?? 0) ?? false 47 | ); 48 | 49 | function startTalking(chatId: number) { 50 | talkingChats.value.add(chatId); 51 | } 52 | 53 | function endTalking(chatId: number) { 54 | talkingChats.value.delete(chatId); 55 | } 56 | 57 | // chat 58 | 59 | async function getAllChats() { 60 | chats.value = (await db.chat.reverse().toArray()) as ChatItem[]; 61 | 62 | // 没有则创建 (create without) 63 | if (!chats.value.length) { 64 | await createImageChat(); 65 | await createChat(); 66 | } else if (!chat.value) { 67 | await openChat(chats.value[0]); 68 | } 69 | } 70 | 71 | async function createChat(item?: ChatOption) { 72 | chat.value = undefined; 73 | const chatItem: ChatOption = item ?? { name: "New Chat", order: 0 }; 74 | await db.chat.put({ ...chatItem }); 75 | 76 | // 加载列表并打开第一个 (load the list and open the first) 77 | await getAllChats(); 78 | } 79 | 80 | async function createImageChat(item?: ChatOption) { 81 | chat.value = undefined; 82 | const chatItem: ChatOption = item ?? { 83 | name: "New Image", 84 | model: "dall-e", 85 | order: 0, 86 | }; 87 | await db.chat.put({ ...chatItem }); 88 | await getAllChats(); 89 | } 90 | 91 | async function openChat(item: ChatItem) { 92 | // console.log(item); 93 | chat.value = item; 94 | await getChatMessages(item.id); 95 | } 96 | 97 | async function removeChat(chatId: number) { 98 | if (!confirm(i18n.t("removeChatConfirm"))) return; 99 | await db.transaction("rw", "chat", "message", async () => { 100 | await db.chat.delete(chatId); 101 | await clearMessages(chatId); 102 | }); 103 | await getAllChats(); 104 | } 105 | 106 | async function reChatName(chatId: number, name: string) { 107 | await db.chat.update(chatId, { name }); 108 | await getAllChats(); 109 | const chat = chats.value.find((item) => item.id === chatId); 110 | if (chat) openChat(chat); 111 | } 112 | 113 | // model 114 | 115 | async function getAvailableModels() { 116 | const setting = loadSetting(); 117 | if (!setting) { 118 | showSetting.value = true; 119 | return; 120 | } 121 | 122 | controller = new AbortController(); 123 | try { 124 | const response = await fetch("/api/models", { 125 | method: "get", 126 | headers: getHeaders(setting), 127 | signal: controller.signal, 128 | }); 129 | const listModelsResponse: ListModelsResponse = await response.json(); 130 | models.value = listModelsResponse.data; 131 | } catch (e: any) { 132 | console.error(e); 133 | } 134 | } 135 | 136 | async function isGpt4Supported() { 137 | if (!models.value.length) { 138 | await getAvailableModels(); 139 | } 140 | return models.value.findIndex((model) => model.id === "gpt-4") > -1; 141 | } 142 | 143 | function getChatModelNameById(id: ChatModel) { 144 | switch (id) { 145 | case "gpt-3.5-turbo": 146 | return "GPT-3.5"; 147 | case "gpt-4": 148 | return "GPT-4"; 149 | case "dall-e": 150 | return "DALL·E"; 151 | } 152 | } 153 | 154 | async function changeChatModel(chatId: number, model: ChatModel) { 155 | await db.chat.update(chatId, { model }); 156 | await getAllChats(); 157 | const chat = chats.value.find((item) => item.id === chatId); 158 | if (chat) openChat(chat); 159 | } 160 | 161 | // message 162 | 163 | const standardList = computed(() => 164 | messages.value 165 | .filter((item) => item.active && !item.error && item.content) 166 | .map((item) => ({ 167 | role: item.role, 168 | content: item.content, 169 | })) 170 | ); 171 | 172 | const setNotActiveDbMessages = () => { 173 | return db.message.toCollection().modify({ active: false }); 174 | }; 175 | 176 | async function getChatMessages(chatId: number) { 177 | messages.value = (await db.message 178 | .where("chatId") 179 | .equals(chatId) 180 | .toArray()) as ChatMessageExItem[]; 181 | } 182 | 183 | async function clearMessages(chatId: number) { 184 | await db.message.where("chatId").equals(chatId).delete(); 185 | await getChatMessages(chatId); 186 | } 187 | 188 | async function createMessage(message: ChatMessageExOption) { 189 | if (!chat.value && !message.chatId) await createChat(); 190 | 191 | const chatId = message.chatId ?? (chat.value as ChatItem).id; 192 | 193 | message.chatId = chatId; 194 | message.active = message.active ?? true; 195 | message.show = message.show ?? true; 196 | message.error = message.error ?? false; 197 | message.errorMessage = message.errorMessage ?? undefined; 198 | message.sendDate = Date.now(); 199 | 200 | const id = await db.message.put({ ...message }); 201 | await getChatMessages(chatId); 202 | 203 | return id; 204 | } 205 | 206 | async function updateMessageContent(id: number, content: string) { 207 | await db.message.update(id, { content }); 208 | await getChatMessages((chat.value as ChatItem).id); 209 | } 210 | 211 | async function makeErrorMessage(id: number, errorMessage: string) { 212 | await db.message.update(id, { error: true, errorMessage }); 213 | await getChatMessages((chat.value as ChatItem).id); 214 | } 215 | 216 | function stop() { 217 | controller?.abort(); 218 | } 219 | 220 | function clearSendMessageContent() { 221 | messageContent.value = ""; 222 | } 223 | 224 | async function sendMessage(message: ChatMessageExOption) { 225 | if (talking.value) return; 226 | if (!message?.content.trim()) return; 227 | 228 | const chatId = message.chatId ?? chat.value?.id; 229 | console.log("store chatId", chat.value?.id); 230 | console.log("message chatId", message.chatId); 231 | 232 | if (!chatId) return; 233 | 234 | const setting = loadSetting(); 235 | if (!setting) { 236 | showSetting.value = true; 237 | return; 238 | } 239 | 240 | // 开始对话 (start a conversation) 241 | clearSendMessageContent(); 242 | startTalking(chatId); 243 | 244 | // 追加到消息队列 (append to message queue) 245 | await createMessage(message); 246 | const assistantMessageId = await createMessage({ 247 | role: "assistant", 248 | content: "", 249 | chatId, 250 | }); 251 | 252 | // 用于主动中断请求 (for unsolicited interrupt requests) 253 | controller = new AbortController(); 254 | 255 | try { 256 | // 打印标准列表 (print standard list) 257 | console.log(standardList.value); 258 | 259 | // 发送请求 (send request) 260 | const { status, statusText, body } = await fetch( 261 | "/api/chat/completions", 262 | { 263 | method: "post", 264 | headers: getHeaders(setting), 265 | body: JSON.stringify({ 266 | model: chat.value?.model ?? "gpt-3.5-turbo", 267 | messages: standardList.value, 268 | temperature: setting.temperature, 269 | stream: true, 270 | } as ApiRequest), 271 | signal: controller.signal, 272 | } 273 | ); 274 | 275 | // 读取 Stream 276 | let content = ""; 277 | const reader = body?.getReader(); 278 | 279 | let parsedCount = 0; 280 | let concatenatedValue = new Uint8Array(); 281 | 282 | while (reader) { 283 | const { value } = await reader.read(); 284 | 285 | // concatenate with the previous value 286 | concatenatedValue = new Uint8Array([...concatenatedValue, ...value!]); 287 | 288 | const text = decoder.decode(concatenatedValue); 289 | 290 | // 处理服务端返回的异常消息并终止读取 (Handle the exception message returned by the server and terminate the read) 291 | if (status !== 200) { 292 | const error = JSON.parse(text); 293 | content += `${status}: ${statusText}\n`; 294 | content += error.error?.message ?? error.message; 295 | return await makeErrorMessage(assistantMessageId, content); 296 | } 297 | 298 | // 读取正文 (read text) 299 | const line = text 300 | .split(/\r?\n/) 301 | .map((line) => line.replace(/(\n)?^data:\s*/, "").trim()) // remove prefix 302 | .filter((line) => line !== ""); // remove empty lines 303 | for (let i = parsedCount; i < line.length; i++) { 304 | if (line[i] === "[DONE]") return; 305 | 306 | try { 307 | const data = JSON.parse(line[i]); 308 | content += data.choices[0].delta.content ?? ""; 309 | await updateMessageContent(assistantMessageId, content); 310 | parsedCount++; 311 | } catch (e) { 312 | console.warn("Could not JSON parse stream message", e); 313 | continue; 314 | } 315 | } 316 | } 317 | } catch (e: any) { 318 | // 主动终止时触发 (Triggered on active termination) 319 | await makeErrorMessage( 320 | assistantMessageId, 321 | `\n\n**${ 322 | e.name === "AbortError" ? i18n.t("ChatStop.message") : e.message 323 | }**` 324 | ); 325 | } finally { 326 | endTalking(chatId); 327 | } 328 | } 329 | 330 | async function sendImageRequestMessage(message: ChatMessageExOption) { 331 | if (talking.value) return; 332 | if (!message?.content.trim()) return; 333 | 334 | const chatId = message.chatId ?? chat.value?.id; 335 | 336 | if (!chatId) return; 337 | 338 | const setting = loadSetting(); 339 | if (!setting) { 340 | showSetting.value = true; 341 | return; 342 | } 343 | 344 | clearSendMessageContent(); 345 | startTalking(chatId); 346 | 347 | await createMessage(message); 348 | const assistantMessageId = await createMessage({ 349 | role: "assistant", 350 | content: "", 351 | chatId, 352 | }); 353 | 354 | controller = new AbortController(); 355 | 356 | let prompt = message.content; 357 | 358 | // Prompt translation request 359 | try { 360 | const translationPrompt = ` 361 | You are a translation program. 362 | Below, define process to be executed, and the output constraints. 363 | 364 | # Process 365 | 1. Identify the language of {input}. 366 | 2. If the language is English, assign {input} as it is to {output}. 367 | 3. If the language is not English, translate {input} to English and assign the result to {output}. 368 | 369 | # Output Constraints 370 | - Output only {output}. 371 | - Do not add any explanatory text. 372 | `; 373 | const response = await fetch("/api/chat/completions", { 374 | method: "post", 375 | headers: getHeaders(setting), 376 | body: JSON.stringify({ 377 | model: "gpt-3.5-turbo", 378 | messages: [ 379 | { 380 | role: "system", 381 | content: trimPrompt(translationPrompt), 382 | }, 383 | { 384 | role: "user", 385 | content: prompt, 386 | }, 387 | ], 388 | } as CreateChatCompletionRequest), 389 | }); 390 | const translateResponse: CreateChatCompletionResponse = 391 | await response.json(); 392 | prompt = translateResponse.choices[0].message!.content; 393 | } catch (error) { 394 | console.error(error); 395 | } 396 | 397 | // Image generation request 398 | try { 399 | console.log(standardList.value); 400 | const response = await fetch("/api/images/generations", { 401 | method: "post", 402 | headers: getHeaders(setting), 403 | body: JSON.stringify({ 404 | prompt, 405 | n: message.imageN, 406 | size: message.imageSize, 407 | } as CreateImageRequest), 408 | signal: controller.signal, 409 | }); 410 | 411 | if (response.status !== 200) { 412 | const error = response.statusText; 413 | return await makeErrorMessage(assistantMessageId, error); 414 | } 415 | 416 | const imagesResponse: ImagesResponse = await response.json(); 417 | const imagesResponseDataInner = imagesResponse.data; 418 | 419 | let content = ""; 420 | imagesResponseDataInner.forEach((img) => { 421 | content += `![image](${img.url}) `; 422 | }); 423 | await updateMessageContent(assistantMessageId, content); 424 | } catch (e: any) { 425 | await makeErrorMessage( 426 | assistantMessageId, 427 | `\n\n**${ 428 | e.name === "AbortError" ? i18n.t("ChatStop.message") : e.message 429 | }**` 430 | ); 431 | } finally { 432 | endTalking(chatId); 433 | } 434 | } 435 | 436 | // locale 437 | 438 | function getLocale() { 439 | const setting = loadSetting(); 440 | return (setting && setting.locale) ?? i18n.getBrowserLocale() ?? "en"; 441 | } 442 | 443 | // color mode 444 | 445 | function getColorMode() { 446 | const setting = loadSetting(); 447 | return (setting && setting.colorMode) ?? "system"; 448 | } 449 | 450 | // headers 451 | 452 | function getHeaders(setting: ChatSettingItem) { 453 | return { 454 | "x-api-type": setting.apiType, 455 | "x-cipher-api-key": setting.apiKey ?? "", 456 | "x-api-host": setting.apiHost ?? "", 457 | "x-azure-api-version": setting.azureApiVersion ?? "", 458 | "x-azure-gpt35-deployment-id": setting.azureGpt35DeploymentId ?? "", 459 | "x-azure-gpt4-deployment-id": setting.azureGpt4DeploymentId ?? "", 460 | }; 461 | } 462 | 463 | return { 464 | showSetting, 465 | showHelp, 466 | chats, 467 | chat, 468 | messages, 469 | messageContent, 470 | imageN, 471 | imageSize, 472 | talking, 473 | standardList, 474 | stop, 475 | openChat, 476 | reChatName, 477 | getAvailableModels, 478 | isGpt4Supported, 479 | getChatModelNameById, 480 | changeChatModel, 481 | setNotActiveDbMessages, 482 | getChatMessages, 483 | getAllChats, 484 | createChat, 485 | createImageChat, 486 | clearMessages, 487 | removeChat, 488 | appendMessage: createMessage, 489 | sendMessage, 490 | sendImageRequestMessage, 491 | getLocale, 492 | getColorMode, 493 | getHeaders, 494 | }; 495 | }); 496 | --------------------------------------------------------------------------------