├── .dockerignore ├── .env.example ├── .eslintignore ├── .eslintrc.js ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report_when_use.yml │ ├── bus_report_when_deploying.yml │ ├── config.yml │ ├── feature_request.yml │ └── typo.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── build-docker.yml │ ├── lint.yml │ ├── release.yml │ └── sync.yml ├── .gitignore ├── .npmrc ├── .vscode ├── extensions.json ├── launch.json └── settings.json ├── Dockerfile ├── LICENSE ├── README.md ├── README.zh-CN.md ├── README_JA-JP.md ├── SECURITY.md ├── astro.config.mjs ├── docker-compose.yml ├── netlify.toml ├── package-lock.json ├── package.json ├── plugins └── disableBlocks.ts ├── public ├── apple-touch-icon.png ├── logo.svg ├── pwa-192.png ├── pwa-512.png └── robots.txt ├── shims.d.ts ├── src ├── assets │ ├── emoji-picker.css │ ├── prism.css │ ├── transition.css │ └── zag-components.css ├── components │ ├── Main.astro │ ├── Markdown.tsx │ ├── ModalsLayer.tsx │ ├── Send.tsx │ ├── StreamableText.tsx │ ├── client-only │ │ └── BuildStores.tsx │ ├── conversations │ │ ├── ConversationEdit.tsx │ │ ├── ConversationEditModal.tsx │ │ ├── ConversationSidebar.tsx │ │ ├── ConversationSidebarAdd.tsx │ │ └── ConversationSidebarItem.tsx │ ├── header │ │ ├── ConversationHeaderInfo.tsx │ │ ├── ConversationMessageClearButton.tsx │ │ ├── ConversationMessageSettingButton.tsx │ │ ├── ConversationMessageShareButton.tsx │ │ └── Header.tsx │ ├── main │ │ ├── Continuous.tsx │ │ ├── Conversation.tsx │ │ ├── ConversationEmpty.tsx │ │ ├── Image.tsx │ │ ├── MessageItem.tsx │ │ ├── Single.tsx │ │ └── Welcome.tsx │ ├── settings │ │ ├── AppGeneralSettings.tsx │ │ ├── ProviderGlobalSettings.tsx │ │ ├── SettingsSidebar.tsx │ │ └── SettingsUIComponent.tsx │ └── ui │ │ ├── BotSelect.tsx │ │ ├── Button.tsx │ │ ├── ConfirmModal.tsx │ │ ├── EmojiPickerModal.tsx │ │ ├── Modal.tsx │ │ ├── SelectMessageModal.tsx │ │ ├── SettingsApiKey.tsx │ │ ├── SettingsInput.tsx │ │ ├── SettingsNotDefined.tsx │ │ ├── SettingsSelect.tsx │ │ ├── SettingsSlider.tsx │ │ ├── SettingsToggle.tsx │ │ ├── ShareModal.tsx │ │ ├── Sidebar.tsx │ │ ├── ThemeToggle.tsx │ │ └── base │ │ ├── Checkbox.tsx │ │ ├── DropdownMenu.tsx │ │ ├── Select.tsx │ │ ├── Slider.tsx │ │ ├── Tabs.tsx │ │ ├── Toggle.tsx │ │ ├── Tooltip.tsx │ │ └── index.ts ├── env.d.ts ├── hooks │ ├── index.ts │ ├── useClickOutside.ts │ ├── useCopy.ts │ ├── useDark.ts │ ├── useDepGet.ts │ ├── useDisableTransition.ts │ ├── useI18n.ts │ ├── useLargeScreen.ts │ └── useMobileScreen.ts ├── layouts │ └── Layout.astro ├── locale │ ├── index.ts │ └── lang │ │ ├── en.ts │ │ ├── fr.ts │ │ ├── index.ts │ │ ├── zh-cn.ts │ │ └── zh-hk.ts ├── logics │ ├── conversation.ts │ ├── helper.ts │ └── stream.ts ├── pages │ ├── api │ │ └── handle │ │ │ └── [provider].ts │ └── index.astro ├── providers │ ├── azure │ │ ├── api.ts │ │ ├── handler.ts │ │ ├── index.ts │ │ └── parser.ts │ ├── google │ │ ├── api.ts │ │ ├── handler.ts │ │ ├── index.ts │ │ └── parser.ts │ ├── openai │ │ ├── api.ts │ │ ├── handler.ts │ │ ├── index.ts │ │ └── parser.ts │ └── replicate │ │ ├── api.ts │ │ ├── handler.ts │ │ └── index.ts ├── stores │ ├── conversation.ts │ ├── messages.ts │ ├── provider.ts │ ├── settings.ts │ ├── storage │ │ ├── conversation.ts │ │ ├── db.ts │ │ ├── message.ts │ │ └── settings.ts │ ├── streams.ts │ ├── tests │ │ ├── conversation.mock.ts │ │ └── message.mock.ts │ └── ui.ts └── types │ ├── app.ts │ ├── conversation.ts │ ├── message.ts │ └── provider.ts ├── tsconfig.json ├── unocss.config.ts └── vercel.json /.dockerignore: -------------------------------------------------------------------------------- 1 | *.md 2 | Dockerfile 3 | docker-compose.yml 4 | LICENSE 5 | netlify.toml 6 | vercel.json 7 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Your API Key for OpenAI 2 | OPENAI_API_KEY= 3 | # Custom base url for OpenAI API. default: https://api.openai.com 4 | OPENAI_API_BASE_URL= 5 | # Inject analytics or other scripts before of the page 6 | HEAD_SCRIPTS= 7 | # Secret string for the project. Use for generating signatures for API calls 8 | SECRET_KEY= 9 | # Set password for site. If not set, site will be public 10 | SITE_PASSWORD= 11 | # ID of the model to use. https://platform.openai.com/docs/api-reference/models/list 12 | OPENAI_API_MODEL= 13 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | dist 2 | public 3 | node_modules 4 | .netlify 5 | .vercel 6 | .github 7 | .changeset 8 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | extends: ['@evan-yang', 'plugin:astro/recommended'], 3 | rules: { 4 | 'no-console': 'off', 5 | 'react/display-name': 'off', 6 | 'react-hooks/rules-of-hooks': 'off', 7 | '@typescript-eslint/no-use-before-define': 'off', 8 | '@typescript-eslint/no-unused-vars': 'warn', 9 | 'react/jsx-key': 'off', 10 | 'import/namespace': 'off', 11 | 'react/jsx-closing-tag-location': 'off', 12 | }, 13 | overrides: [ 14 | { 15 | files: ['*.astro'], 16 | parser: 'astro-eslint-parser', 17 | parserOptions: { 18 | parser: '@typescript-eslint/parser', 19 | extraFileExtensions: ['.astro'], 20 | }, 21 | rules: { 22 | 'no-mixed-spaces-and-tabs': ['error', 'smart-tabs'], 23 | }, 24 | }, 25 | { 26 | // Define the configuration for ` 53 | -------------------------------------------------------------------------------- /src/locale/index.ts: -------------------------------------------------------------------------------- 1 | import * as Langs from './lang' 2 | import type { SelectOptionType } from '@/types/provider' 3 | 4 | export type LanguageType = keyof typeof Langs 5 | 6 | export interface TranslatePair { 7 | [key: string]: string | string[] | TranslatePair 8 | } 9 | 10 | export interface language { 11 | name: string 12 | desc: string 13 | locales: TranslatePair 14 | } 15 | 16 | export const locales = Object.fromEntries(Object.entries(Langs).map(([key, value]) => [key, value.locales])) 17 | 18 | export const localesOptions: SelectOptionType[] = Object.entries(Langs).map(([key, value]) => ({ label: value.desc, value: key })) 19 | -------------------------------------------------------------------------------- /src/locale/lang/en.ts: -------------------------------------------------------------------------------- 1 | import type { language } from '..' 2 | 3 | export const en = { 4 | name: 'en', 5 | desc: 'English', 6 | locales: { 7 | settings: { 8 | title: 'Settings', 9 | save: 'Save', 10 | general: { 11 | title: 'General', 12 | requestWithBackend: 'Request With Backend', 13 | locale: 'Change system language', 14 | }, 15 | openai: { 16 | title: 'OpenAI', 17 | key: '', 18 | }, 19 | replicate: {}, 20 | }, 21 | conversations: { 22 | title: 'Conversations', 23 | add: 'New', 24 | recent: 'Recents', 25 | noRecent: 'No recents', 26 | untitled: 'Untitled', 27 | promopt: { 28 | system: 'System Info', 29 | desc: 'You are a helpful assistant, answer as concisely as possible...', 30 | }, 31 | emoji: 'Search an emoji ~', 32 | confirm: { 33 | title: 'Delete all messages in this chat', 34 | desc: 'This action cannot be unback.', 35 | message: 'Delete this record', 36 | btn: 'confirm', 37 | cancel: 'cancel', 38 | submit: 'submit', 39 | }, 40 | share: { 41 | title: 'Share Conversation', 42 | link: { 43 | title: 'Share with link', 44 | copy: 'Copy Link', 45 | create: 'Create Link', 46 | }, 47 | save: 'Save', 48 | copy: 'Copy Context', 49 | messages: { 50 | title: 'Select Message', 51 | selected: 'Selected Messages', 52 | selectAll: 'Select All', 53 | }, 54 | tabs: { 55 | context: 'Share Context', 56 | image: 'Share Image', 57 | }, 58 | image: { 59 | btn: 'Generate Image', 60 | open: 'Open in New Tab', 61 | loading: 'Generating...', 62 | copy: 'Copy Image', 63 | }, 64 | }, 65 | }, 66 | docs: 'Docs', 67 | github: 'Github', 68 | scroll: 'Scroll to bottom', 69 | empty: 'No data', 70 | send: { 71 | placeholder: 'Enter Something...', 72 | button: 'Send', 73 | }, 74 | copyed: 'Copied', 75 | }, 76 | } as language 77 | -------------------------------------------------------------------------------- /src/locale/lang/fr.ts: -------------------------------------------------------------------------------- 1 | import type { language } from '..' 2 | 3 | export const fr = { 4 | name: 'fr', 5 | desc: 'Français', 6 | locales: { 7 | settings: { 8 | title: 'Paramètres', 9 | save: 'Enregistrer', 10 | general: { 11 | title: 'Général', 12 | requestWithBackend: 'Demande avec le backend', 13 | locale: 'Changer la langue du système', 14 | }, 15 | openai: { 16 | title: 'OpenAI', 17 | key: '', 18 | }, 19 | replicate: {}, 20 | }, 21 | conversations: { 22 | title: 'Conversations', 23 | add: 'Nouveau', 24 | recent: 'Récents', 25 | noRecent: 'No récents', 26 | untitled: 'Sans titre', 27 | promopt: { 28 | system: 'Info Système', 29 | desc: 'Vous êtes un assistant utile, répondez de manière aussi concise que possible ...', 30 | }, 31 | emoji: 'Rechercher un emoji ~', 32 | confirm: { 33 | title: 'Supprimer tous les messages de cette discussion', 34 | desc: 'Cette action ne peut pas être annulée.', 35 | message: 'Supprimer cet enregistrement', 36 | btn: 'confirmer', 37 | cancel: 'annuler', 38 | submit: 'soumettre', 39 | }, 40 | share: { 41 | title: 'Partager la conversation', 42 | link: { 43 | title: 'Partager avec un lien', 44 | copy: 'Copier le lien', 45 | create: 'Créer un lien', 46 | }, 47 | save: 'Enregistrer', 48 | copy: 'Copier le contexte', 49 | messages: { 50 | title: 'Sélectionner un message', 51 | selected: 'Messages sélectionnés', 52 | selectAll: 'Sélectionner tout', 53 | }, 54 | tabs: { 55 | context: 'Partager le contexte', 56 | image: 'Partager l\'image', 57 | }, 58 | image: { 59 | btn: 'Générer une image', 60 | open: 'Ouvrir dans un nouvel onglet', 61 | loading: 'Générer...', 62 | copy: 'Copier l\'image', 63 | }, 64 | }, 65 | }, 66 | docs: 'Docs', 67 | github: 'Github', 68 | scroll: 'Défiler vers le bas', 69 | empty: 'Aucune donnée', 70 | send: { 71 | placeholder: 'Saisir quelque chose ...', 72 | button: 'Envoyer', 73 | }, 74 | copyed: 'Copié', 75 | }, 76 | } as language 77 | -------------------------------------------------------------------------------- /src/locale/lang/index.ts: -------------------------------------------------------------------------------- 1 | export * from './en' 2 | export * from './fr' 3 | export * from './zh-cn' 4 | export * from './zh-hk' 5 | -------------------------------------------------------------------------------- /src/locale/lang/zh-cn.ts: -------------------------------------------------------------------------------- 1 | import type { language } from '..' 2 | 3 | export const zhCN = { 4 | name: 'zhCN', 5 | desc: '简体中文', 6 | locales: { 7 | settings: { 8 | title: '设置', 9 | save: '保存', 10 | general: { 11 | title: '通用', 12 | requestWithBackend: '请求代理后端', 13 | locale: '切换语言', 14 | }, 15 | openai: { 16 | title: 'OpenAI', 17 | key: '', 18 | }, 19 | replicate: {}, 20 | }, 21 | conversations: { 22 | title: '对话列表', 23 | add: '新对话', 24 | recent: '最近对话', 25 | noRecent: '暂无最近对话', 26 | untitled: '未命名对话', 27 | promopt: { 28 | system: '系统信息', 29 | desc: '你是个乐于助人的助手,回答尽量简洁...', 30 | }, 31 | emoji: '搜索一个表情 ~', 32 | confirm: { 33 | title: '删除本会话的所有消息', 34 | desc: '这将删除本会话的所有消息,且不可恢复', 35 | message: '删除这条记录', 36 | btn: '确认', 37 | cancel: '取消', 38 | submit: '提交', 39 | }, 40 | share: { 41 | title: '分享对话', 42 | link: { 43 | title: '分享链接', 44 | copy: '复制链接', 45 | create: '创建链接', 46 | }, 47 | save: '保存', 48 | copy: '复制上下文', 49 | messages: { 50 | title: '选择消息', 51 | selected: '已选择的消息', 52 | selectAll: '全选', 53 | }, 54 | tabs: { 55 | context: '分享上下文', 56 | image: '分享图片', 57 | }, 58 | image: { 59 | btn: '生成图片', 60 | open: '新窗口打开', 61 | loading: '生成中...', 62 | copy: '复制图片', 63 | }, 64 | }, 65 | }, 66 | docs: '文档', 67 | github: '源码', 68 | scroll: '滚动到底部', 69 | empty: '暂无数据', 70 | send: { 71 | placeholder: '输入内容...', 72 | button: '发送', 73 | }, 74 | copyed: '已拷贝!', 75 | }, 76 | } as language 77 | -------------------------------------------------------------------------------- /src/locale/lang/zh-hk.ts: -------------------------------------------------------------------------------- 1 | import type { language } from '..' 2 | 3 | export const zhHK = { 4 | name: 'zhHK', 5 | desc: '繁體中文', 6 | locales: { 7 | settings: { 8 | title: '設定', 9 | save: '儲存', 10 | general: { 11 | title: '通用', 12 | requestWithBackend: '請求代理後端', 13 | locale: '切換語言', 14 | }, 15 | openai: { 16 | title: 'OpenAI', 17 | key: '', 18 | }, 19 | replicate: {}, 20 | }, 21 | conversations: { 22 | title: '對話列表', 23 | add: '新增對話', 24 | recent: '最近對話', 25 | noRecent: '暫無最近對話', 26 | untitled: '未命名對話', 27 | promopt: { 28 | system: '系統訊息', 29 | desc: '你是個樂於助人的助手,回答盡量簡潔...', 30 | }, 31 | emoji: '搜尋表情符號 ~', 32 | confirm: { 33 | title: '刪除本會話的所有訊息', 34 | desc: '這將刪除本會話的所有訊息,且不可恢復', 35 | message: '刪除這條記錄', 36 | btn: '確認', 37 | cancel: '取消', 38 | submit: '提交', 39 | }, 40 | share: { 41 | title: '分享對話', 42 | link: { 43 | title: '分享連結', 44 | copy: '複製連結', 45 | create: '建立連結', 46 | }, 47 | save: '儲存', 48 | copy: '複製內容', 49 | messages: { 50 | title: '選擇訊息', 51 | selected: '已選擇的訊息', 52 | selectAll: '全選', 53 | }, 54 | tabs: { 55 | context: '分享內容', 56 | image: '分享圖片', 57 | }, 58 | image: { 59 | btn: '產生圖片', 60 | open: '在新視窗中開啟', 61 | loading: '產生中...', 62 | copy: '複製圖片', 63 | }, 64 | }, 65 | }, 66 | docs: '文件', 67 | github: '原始碼', 68 | scroll: '滾動到底部', 69 | empty: '暫無資料', 70 | send: { 71 | placeholder: '輸入內容...', 72 | button: '發送', 73 | }, 74 | copyed: '已複製!', 75 | }, 76 | } as language 77 | -------------------------------------------------------------------------------- /src/logics/helper.ts: -------------------------------------------------------------------------------- 1 | import { getSettingsByProviderId } from '@/stores/settings' 2 | import type { HandlerPayload } from '@/types/provider' 3 | 4 | export const generateRapidProviderPayload = (prompt: string, providerId: string) => { 5 | const payload = { 6 | conversationId: 'temp', 7 | conversationType: 'chat_single', 8 | botId: 'temp', 9 | globalSettings: getSettingsByProviderId(providerId), 10 | botSettings: {}, 11 | prompt, 12 | messages: [], 13 | } as HandlerPayload 14 | return payload 15 | } 16 | 17 | export const promptHelper = { 18 | summarizeText: (text: string) => { 19 | return [ 20 | 'Summarize a short and relevant title of input with no more than 5 words.', 21 | 'Rules:', 22 | '1. Must use the same language as input.', 23 | '2. Output the title directly, do not add any other content.', 24 | 'The input is:', 25 | text, 26 | ].join('\n') 27 | }, 28 | } 29 | -------------------------------------------------------------------------------- /src/logics/stream.ts: -------------------------------------------------------------------------------- 1 | import type { Setter } from 'solid-js' 2 | 3 | export const convertReadableStreamToAccessor = async(stream: ReadableStream, setter: Setter) => { 4 | let text = '' 5 | try { 6 | const reader = stream.getReader() 7 | const decoder = new TextDecoder('utf-8') 8 | let done = false 9 | while (!done) { 10 | const { value, done: readerDone } = await reader.read() 11 | if (value) { 12 | const char = decoder.decode(value, { stream: true }) 13 | if (char) { 14 | text += char 15 | setter(text) 16 | } 17 | } 18 | done = readerDone 19 | } 20 | return text 21 | } catch (error) { 22 | return text 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/pages/api/handle/[provider].ts: -------------------------------------------------------------------------------- 1 | import { callProviderHandler } from '@/logics/conversation' 2 | import type { APIRoute } from 'astro' 3 | import type { HandlerPayload } from '@/types/provider' 4 | import type { ErrorMessage } from '@/types/message' 5 | 6 | export const post: APIRoute = async({ params, request }) => { 7 | const providerId = params.provider as string 8 | const body = await request.json() as HandlerPayload 9 | 10 | try { 11 | if (!providerId) throw new Error('Provider ID is required') 12 | const providerResponse = await callProviderHandler(providerId, body) 13 | const isStream = providerResponse instanceof ReadableStream 14 | return new Response(providerResponse, { 15 | headers: { 16 | 'Content-Type': isStream ? 'text/html; charset=utf-8' : 'text/plain; charset=utf-8', 17 | }, 18 | }) 19 | } catch (e) { 20 | const error = e as Error 21 | const cause = error?.cause as ErrorMessage 22 | console.error(e) 23 | return new Response(JSON.stringify({ 24 | error: cause, 25 | }), { 26 | status: 500, 27 | }) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/pages/index.astro: -------------------------------------------------------------------------------- 1 | --- 2 | import Layout from '@/layouts/Layout.astro' 3 | import Main from '@/components/Main.astro' 4 | import ConversationSidebar from '@/components/conversations/ConversationSidebar' 5 | import Settings from '@/components/settings/SettingsSidebar' 6 | import ModalsLayer from '@/components/ModalsLayer' 7 | import Sidebar from '@/components/ui/Sidebar' 8 | import BuildStores from '@/components/client-only/BuildStores' 9 | --- 10 | 11 | 12 |
13 | 16 |
17 | 20 |
21 | 22 | 23 |
24 | -------------------------------------------------------------------------------- /src/providers/azure/api.ts: -------------------------------------------------------------------------------- 1 | export interface AzureFetchPayload { 2 | apiKey: string 3 | baseUrl: string 4 | body: Record 5 | model?: string 6 | signal?: AbortSignal 7 | } 8 | 9 | export const fetchChatCompletion = async(payload: AzureFetchPayload) => { 10 | const { baseUrl, apiKey, body, model, signal } = payload || {} 11 | const initOptions = { 12 | headers: { 'Content-Type': 'application/json', 'api-key': apiKey }, 13 | method: 'POST', 14 | body: JSON.stringify({ ...body }), 15 | signal, 16 | } 17 | return fetch(`${baseUrl}/openai/deployments/${model}/chat/completions?api-version=2024-02-15-preview`, initOptions) 18 | } 19 | 20 | export const fetchImageGeneration = async(payload: AzureFetchPayload) => { 21 | const { baseUrl, apiKey, body } = payload || {} 22 | const initOptions = { 23 | headers: { 'Content-Type': 'application/json', 'api-key': apiKey }, 24 | method: 'POST', 25 | body: JSON.stringify(body), 26 | } 27 | return fetch(`${baseUrl}.openai.azure.com/openai/images/generations:submit?api-version=2024-02-15-preview`, initOptions) 28 | } 29 | -------------------------------------------------------------------------------- /src/providers/azure/handler.ts: -------------------------------------------------------------------------------- 1 | import { fetchChatCompletion, fetchImageGeneration } from './api' 2 | import { parseStream } from './parser' 3 | import type { Message } from '@/types/message' 4 | import type { HandlerPayload, Provider } from '@/types/provider' 5 | 6 | export const handlePrompt: Provider['handlePrompt'] = async(payload, signal?: AbortSignal) => { 7 | if (payload.botId === 'chat_continuous') 8 | return handleChatCompletion(payload, signal) 9 | if (payload.botId === 'chat_single') 10 | return handleChatCompletion(payload, signal) 11 | if (payload.botId === 'image_generation') 12 | return handleImageGeneration(payload) 13 | } 14 | 15 | export const handleRapidPrompt: Provider['handleRapidPrompt'] = async(prompt, globalSettings) => { 16 | const rapidPromptPayload = { 17 | conversationId: 'temp', 18 | conversationType: 'chat_single', 19 | botId: 'temp', 20 | globalSettings: { 21 | ...globalSettings, 22 | temperature: 0.4, 23 | maxTokens: 2048, 24 | top_p: 1, 25 | stream: false, 26 | }, 27 | botSettings: {}, 28 | prompt, 29 | messages: [{ role: 'user', content: prompt }], 30 | } as HandlerPayload 31 | const result = await handleChatCompletion(rapidPromptPayload) 32 | if (typeof result === 'string') return result 33 | return '' 34 | } 35 | 36 | const handleChatCompletion = async(payload: HandlerPayload, signal?: AbortSignal) => { 37 | // An array to store the chat messages 38 | const messages: Message[] = [] 39 | 40 | let maxTokens = payload.globalSettings.maxTokens as number 41 | let messageHistorySize = payload.globalSettings.messageHistorySize as number 42 | 43 | // Iterate through the message history 44 | while (messageHistorySize > 0) { 45 | messageHistorySize-- 46 | // Get the last message from the payload 47 | const m = payload.messages.pop() 48 | if (m === undefined) 49 | break 50 | 51 | if (maxTokens - m.content.length < 0) 52 | break 53 | 54 | maxTokens -= m.content.length 55 | messages.unshift(m) 56 | } 57 | 58 | const response = await fetchChatCompletion({ 59 | apiKey: payload.globalSettings.apiKey as string, 60 | baseUrl: (payload.globalSettings.baseUrl as string).trim().replace(/\/$/, ''), 61 | body: { 62 | messages, 63 | max_tokens: maxTokens, 64 | temperature: payload.globalSettings.temperature as number, 65 | top_p: payload.globalSettings.topP as number, 66 | stream: payload.globalSettings.stream as boolean ?? true, 67 | }, 68 | model: payload.globalSettings.model as string, 69 | signal, 70 | }) 71 | if (!response.ok) { 72 | const responseJson = await response.json() 73 | console.log('responseJson', responseJson) 74 | const errMessage = responseJson.error?.message || response.statusText || 'Unknown error' 75 | throw new Error(errMessage, { cause: responseJson.error }) 76 | } 77 | const isStream = response.headers.get('content-type')?.includes('text/event-stream') 78 | if (isStream) { 79 | return parseStream(response) 80 | } else { 81 | const resJson = await response.json() 82 | return resJson.choices[0].message.content as string 83 | } 84 | } 85 | 86 | const handleImageGeneration = async(payload: HandlerPayload) => { 87 | const prompt = payload.prompt 88 | const response = await fetchImageGeneration({ 89 | apiKey: payload.globalSettings.apiKey as string, 90 | baseUrl: (payload.globalSettings.baseUrl as string).trim().replace(/\/$/, ''), 91 | body: { prompt, n: 1, size: '512x512' }, 92 | }) 93 | if (!response.ok) { 94 | const responseJson = await response.json() 95 | const errMessage = responseJson.error?.message || response.statusText || 'Unknown error' 96 | throw new Error(errMessage) 97 | } 98 | const resJson = await response.json() 99 | return resJson.data[0].url 100 | } 101 | -------------------------------------------------------------------------------- /src/providers/azure/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | handlePrompt, 3 | handleRapidPrompt, 4 | } from './handler' 5 | import type { Provider } from '@/types/provider' 6 | 7 | const providerOpenAI = () => { 8 | const provider: Provider = { 9 | id: 'provider-azure', 10 | icon: 'i-simple-icons:microsoftazure', // @unocss-include 11 | name: 'Azure OpenAI', 12 | globalSettings: [ 13 | { 14 | key: 'apiKey', 15 | name: 'API Key', 16 | type: 'api-key', 17 | }, 18 | { 19 | key: 'baseUrl', 20 | name: 'Endpoint', 21 | description: 'OpenAI Endpoint', 22 | type: 'input', 23 | }, 24 | { 25 | key: 'model', 26 | name: 'Azure deployment name', 27 | description: 'Custom model name for Azure OpenAI.', 28 | type: 'input', 29 | }, 30 | { 31 | key: 'maxTokens', 32 | name: 'Max Tokens', 33 | description: 'The maximum number of tokens to generate in the completion.', 34 | type: 'slider', 35 | min: 0, 36 | max: 32768, 37 | default: 2048, 38 | step: 1, 39 | }, 40 | { 41 | key: 'messageHistorySize', 42 | name: 'Max History Message Size', 43 | description: 'The number of retained historical messages will be truncated if the length of the message exceeds the MaxToken parameter.', 44 | type: 'slider', 45 | min: 1, 46 | max: 24, 47 | default: 5, 48 | step: 1, 49 | }, 50 | { 51 | key: 'temperature', 52 | name: 'Temperature', 53 | type: 'slider', 54 | description: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.', 55 | min: 0, 56 | max: 2, 57 | default: 0.7, 58 | step: 0.01, 59 | }, 60 | { 61 | key: 'top_p', 62 | name: 'Top P', 63 | description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.', 64 | type: 'slider', 65 | min: 0, 66 | max: 1, 67 | default: 1, 68 | step: 0.01, 69 | }, 70 | ], 71 | bots: [ 72 | { 73 | id: 'chat_continuous', 74 | type: 'chat_continuous', 75 | name: 'Continuous Chat', 76 | settings: [], 77 | }, 78 | { 79 | id: 'chat_single', 80 | type: 'chat_single', 81 | name: 'Single Chat', 82 | settings: [], 83 | }, 84 | { 85 | id: 'image_generation', 86 | type: 'image_generation', 87 | name: 'DALL·E', 88 | settings: [], 89 | }, 90 | ], 91 | handlePrompt, 92 | handleRapidPrompt, 93 | } 94 | return provider 95 | } 96 | 97 | export default providerOpenAI 98 | -------------------------------------------------------------------------------- /src/providers/azure/parser.ts: -------------------------------------------------------------------------------- 1 | import { createParser } from 'eventsource-parser' 2 | import type { ParsedEvent, ReconnectInterval } from 'eventsource-parser' 3 | 4 | export const parseStream = (rawResponse: Response) => { 5 | const encoder = new TextEncoder() 6 | const decoder = new TextDecoder() 7 | const rb = rawResponse.body as ReadableStream 8 | 9 | return new ReadableStream({ 10 | async start(controller) { 11 | const streamParser = (event: ParsedEvent | ReconnectInterval) => { 12 | if (event.type === 'event') { 13 | const data = event.data 14 | if (data === '[DONE]') { 15 | controller.close() 16 | return 17 | } 18 | try { 19 | const json = JSON.parse(data) 20 | const text = (json.choices?.[0]?.delta?.content) || '' 21 | const queue = encoder.encode(text) 22 | controller.enqueue(queue) 23 | } catch (e) { 24 | controller.error(e) 25 | } 26 | } 27 | } 28 | const reader = rb.getReader() 29 | const parser = createParser(streamParser) 30 | let done = false 31 | while (!done) { 32 | const { done: isDone, value } = await reader.read() 33 | if (isDone) { 34 | done = true 35 | controller.close() 36 | return 37 | } 38 | parser.feed(decoder.decode(value, { stream: true })) 39 | } 40 | }, 41 | }) 42 | } 43 | -------------------------------------------------------------------------------- /src/providers/google/api.ts: -------------------------------------------------------------------------------- 1 | export interface GoogleFetchPayload { 2 | apiKey: string 3 | stream: boolean 4 | body: Record 5 | model?: string 6 | signal?: AbortSignal 7 | } 8 | 9 | export const fetchChatCompletion = async(payload: GoogleFetchPayload) => { 10 | const { apiKey, body, model, stream } = payload || {} 11 | const initOptions = { 12 | headers: { 'Content-Type': 'application/json' }, 13 | method: 'POST', 14 | body: JSON.stringify({ ...body }), 15 | signal: payload.signal, 16 | } 17 | return fetch(`https://generativelanguage.googleapis.com/v1beta/models/${model}:streamGenerateContent?${stream ? 'alt=sse&' : ''}key=${apiKey}`, initOptions) 18 | } 19 | -------------------------------------------------------------------------------- /src/providers/google/handler.ts: -------------------------------------------------------------------------------- 1 | import { fetchChatCompletion } from './api' 2 | import { parseMessageList, parseStream } from './parser' 3 | import type { Message } from '@/types/message' 4 | import type { HandlerPayload, Provider } from '@/types/provider' 5 | 6 | export const handlePrompt: Provider['handlePrompt'] = async(payload, signal?: AbortSignal) => { 7 | if (payload.botId === 'chat_continuous') 8 | return handleChatCompletion(payload, signal) 9 | if (payload.botId === 'chat_single') 10 | return handleChatCompletion(payload, signal) 11 | } 12 | 13 | export const handleRapidPrompt: Provider['handleRapidPrompt'] = async(prompt, globalSettings) => { 14 | const rapidPromptPayload = { 15 | conversationId: 'temp', 16 | conversationType: 'chat_single', 17 | botId: 'temp', 18 | globalSettings: { 19 | ...globalSettings, 20 | model: 'gemini-pro', 21 | temperature: 0.4, 22 | maxTokens: 10240, 23 | maxOutputTokens: 1024, 24 | topP: 0.8, 25 | topK: 1, 26 | }, 27 | botSettings: {}, 28 | prompt, 29 | messages: { contents: [{ role: 'user', parts: [{ text: prompt }] }] }, 30 | } as unknown as HandlerPayload 31 | const result = await handleChatCompletion(rapidPromptPayload) 32 | if (typeof result === 'string') 33 | return result 34 | return '' 35 | } 36 | 37 | export const handleChatCompletion = async(payload: HandlerPayload, signal?: AbortSignal) => { 38 | // An array to store the chat messages 39 | const messages: Message[] = [] 40 | 41 | let maxTokens = payload.globalSettings.maxTokens as number 42 | let messageHistorySize = payload.globalSettings.messageHistorySize as number 43 | 44 | // Iterate through the message history 45 | while (messageHistorySize > 0) { 46 | messageHistorySize-- 47 | // Get the last message from the payload 48 | const m = payload.messages.pop() 49 | if (m === undefined) 50 | break 51 | 52 | if (maxTokens - m.content.length < 0) 53 | break 54 | 55 | maxTokens -= m.content.length 56 | messages.unshift(m) 57 | } 58 | 59 | const stream = payload.globalSettings.stream as boolean ?? true 60 | const response = await fetchChatCompletion({ 61 | apiKey: payload.globalSettings.apiKey as string, 62 | stream, 63 | body: { 64 | contents: parseMessageList(messages), 65 | generationConfig: { 66 | temperature: payload.globalSettings.temperature as number, 67 | maxOutputTokens: payload.globalSettings.maxOutputTokens as number, 68 | topP: payload.globalSettings.topP as number, 69 | topK: payload.globalSettings.topK as number, 70 | } 71 | }, 72 | signal, 73 | model: payload.globalSettings.model as string, 74 | }) 75 | 76 | if (response.ok) { 77 | if (stream) 78 | return parseStream(response) 79 | const json = await response.json() 80 | // console.log('json', json) 81 | const output = json.candidates[0].content.parts[0].text || json 82 | return output as string 83 | } 84 | 85 | const text = await response.text() 86 | throw new Error(`Failed to fetch chat completion: ${text}`) 87 | } 88 | -------------------------------------------------------------------------------- /src/providers/google/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | handlePrompt, 3 | handleRapidPrompt, 4 | } from './handler' 5 | import type { Provider } from '@/types/provider' 6 | 7 | const providerGoogle = () => { 8 | const provider: Provider = { 9 | id: 'provider-google', 10 | icon: 'i-simple-icons-google', // @unocss-include 11 | name: 'Google', 12 | globalSettings: [ 13 | { 14 | key: 'apiKey', 15 | name: 'API Key', 16 | type: 'api-key', 17 | }, 18 | { 19 | key: 'model', 20 | name: 'Google model', 21 | description: 'Custom model for Google API.', 22 | type: 'select', 23 | options: [ 24 | { value: 'gemini-pro', label: 'gemini-pro' }, 25 | ], 26 | default: 'gemini-pro', 27 | }, 28 | { 29 | key: 'maxTokens', 30 | name: 'Max Tokens', 31 | description: 'The maximum number of tokens to generate in the completion.', 32 | type: 'slider', 33 | min: 0, 34 | max: 32768, 35 | default: 10240, 36 | step: 1, 37 | }, 38 | { 39 | key: 'maxOutputTokens', 40 | name: 'Max Output Tokens', 41 | description: 'Specifies the maximum number of tokens that can be generated in the response. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words.', 42 | type: 'slider', 43 | min: 0, 44 | max: 4096, 45 | default: 1024, 46 | step: 1, 47 | }, 48 | { 49 | key: 'temperature', 50 | name: 'Temperature', 51 | description: 'The temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that require a more deterministic or less open-ended response.', 52 | type: 'slider', 53 | min: 0, 54 | max: 1, 55 | default: 0.4, 56 | step: 0.01, 57 | }, 58 | { 59 | key: 'topP', 60 | name: 'Top P', 61 | description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.', 62 | type: 'slider', 63 | min: 0, 64 | max: 1, 65 | default: 0.95, 66 | step: 0.01, 67 | }, 68 | { 69 | key: 'topK', 70 | name: 'Top K', 71 | description: 'Top K sampling chooses from the K most likely tokens.', 72 | type: 'slider', 73 | min: 0, 74 | max: 32768, 75 | default: 1, 76 | step: 1, 77 | }, 78 | { 79 | key: 'messageHistorySize', 80 | name: 'Max History Message Size', 81 | description: 'The number of retained historical messages will be truncated if the length of the message exceeds the MaxToken parameter.', 82 | type: 'slider', 83 | min: 1, 84 | max: 24, 85 | default: 5, 86 | step: 1, 87 | }, 88 | ], 89 | bots: [ 90 | { 91 | id: 'chat_continuous', 92 | type: 'chat_continuous', 93 | name: 'Continuous Chat', 94 | settings: [], 95 | }, 96 | { 97 | id: 'chat_single', 98 | type: 'chat_single', 99 | name: 'Single Chat', 100 | settings: [], 101 | }, 102 | 103 | ], 104 | handlePrompt, 105 | handleRapidPrompt, 106 | } 107 | return provider 108 | } 109 | 110 | export default providerGoogle 111 | -------------------------------------------------------------------------------- /src/providers/google/parser.ts: -------------------------------------------------------------------------------- 1 | import { createParser } from 'eventsource-parser' 2 | import type { Message } from '@/types/message' 3 | import type { ParsedEvent, ReconnectInterval } from 'eventsource-parser' 4 | 5 | export const parseMessageList = (rawList: Message[]) => { 6 | interface GoogleGeminiMessage { 7 | role: 'user' | 'model' 8 | // TODO: Add support for image input 9 | parts: [ 10 | { text: string }, 11 | ] 12 | } 13 | 14 | if (rawList.length === 0) 15 | return [] 16 | 17 | const parsedList: GoogleGeminiMessage[] = [] 18 | // if first message is system message, insert an empty message after it 19 | if (rawList[0].role === 'system') { 20 | parsedList.push({ role: 'user', parts: [{ text: rawList[0].content }] }) 21 | parsedList.push({ role: 'model', parts: [{ text: 'OK.' }] }) 22 | } 23 | // covert other messages 24 | const roleDict = { 25 | user: 'user', 26 | assistant: 'model', 27 | } as const 28 | for (const message of rawList) { 29 | if (message.role === 'system') 30 | continue 31 | parsedList.push({ 32 | role: roleDict[message.role], 33 | parts: [{ text: message.content }], 34 | }) 35 | } 36 | return parsedList 37 | } 38 | 39 | export const parseStream = (rawResponse: Response) => { 40 | const encoder = new TextEncoder() 41 | const decoder = new TextDecoder() 42 | const rb = rawResponse.body as ReadableStream 43 | 44 | return new ReadableStream({ 45 | async start(controller) { 46 | const streamParser = (event: ParsedEvent | ReconnectInterval) => { 47 | if (event.type === 'event') { 48 | const data = event.data 49 | try { 50 | const json = JSON.parse(data) 51 | const text = json.candidates[0].content.parts[0].text || '' 52 | const queue = encoder.encode(text) 53 | controller.enqueue(queue) 54 | } catch (e) { 55 | controller.error(e) 56 | } 57 | } 58 | } 59 | const reader = rb.getReader() 60 | const parser = createParser(streamParser) 61 | let done = false 62 | while (!done) { 63 | const { done: isDone, value } = await reader.read() 64 | if (isDone) { 65 | done = true 66 | controller.close() 67 | return 68 | } 69 | parser.feed(decoder.decode(value, { stream: true })) 70 | } 71 | }, 72 | }) 73 | } 74 | -------------------------------------------------------------------------------- /src/providers/openai/api.ts: -------------------------------------------------------------------------------- 1 | export interface OpenAIFetchPayload { 2 | apiKey: string 3 | baseUrl: string 4 | body: Record 5 | signal?: AbortSignal 6 | } 7 | 8 | export const fetchChatCompletion = async(payload: OpenAIFetchPayload) => { 9 | const initOptions = { 10 | headers: { 11 | 'Content-Type': 'application/json', 12 | 'Authorization': `Bearer ${payload.apiKey}`, 13 | }, 14 | method: 'POST', 15 | body: JSON.stringify(payload.body), 16 | signal: payload.signal, 17 | } 18 | return fetch(`${payload.baseUrl}/v1/chat/completions`, initOptions) 19 | } 20 | 21 | export const fetchImageGeneration = async(payload: OpenAIFetchPayload) => { 22 | const initOptions = { 23 | headers: { 24 | 'Content-Type': 'application/json', 25 | 'Authorization': `Bearer ${payload.apiKey}`, 26 | }, 27 | method: 'POST', 28 | body: JSON.stringify(payload.body), 29 | signal: payload.signal, 30 | } 31 | return fetch(`${payload.baseUrl}/v1/images/generations`, initOptions) 32 | } 33 | -------------------------------------------------------------------------------- /src/providers/openai/handler.ts: -------------------------------------------------------------------------------- 1 | import { fetchChatCompletion, fetchImageGeneration } from './api' 2 | import { parseStream } from './parser' 3 | import type { Message } from '@/types/message' 4 | import type { HandlerPayload, Provider } from '@/types/provider' 5 | 6 | export const handlePrompt: Provider['handlePrompt'] = async(payload, signal?: AbortSignal) => { 7 | if (payload.botId === 'chat_continuous') 8 | return handleChatCompletion(payload, signal) 9 | if (payload.botId === 'chat_single') 10 | return handleChatCompletion(payload, signal) 11 | if (payload.botId === 'image_generation') 12 | return handleImageGeneration(payload) 13 | } 14 | 15 | export const handleRapidPrompt: Provider['handleRapidPrompt'] = async(prompt, globalSettings) => { 16 | const rapidPromptPayload = { 17 | conversationId: 'temp', 18 | conversationType: 'chat_single', 19 | botId: 'temp', 20 | globalSettings: { 21 | ...globalSettings, 22 | model: 'gpt-3.5-turbo', 23 | temperature: 0.4, 24 | maxTokens: 2048, 25 | top_p: 1, 26 | stream: false, 27 | }, 28 | botSettings: {}, 29 | prompt, 30 | messages: [{ role: 'user', content: prompt }], 31 | } as HandlerPayload 32 | const result = await handleChatCompletion(rapidPromptPayload) 33 | if (typeof result === 'string') 34 | return result 35 | return '' 36 | } 37 | 38 | const handleChatCompletion = async(payload: HandlerPayload, signal?: AbortSignal) => { 39 | // An array to store the chat messages 40 | const messages: Message[] = [] 41 | 42 | let maxTokens = payload.globalSettings.maxTokens as number 43 | let messageHistorySize = payload.globalSettings.messageHistorySize as number 44 | 45 | // Iterate through the message history 46 | while (messageHistorySize > 0) { 47 | messageHistorySize-- 48 | // Get the last message from the payload 49 | const m = payload.messages.pop() 50 | if (m === undefined) 51 | break 52 | 53 | if (maxTokens - m.content.length < 0) 54 | break 55 | 56 | maxTokens -= m.content.length 57 | messages.unshift(m) 58 | } 59 | 60 | const response = await fetchChatCompletion({ 61 | apiKey: payload.globalSettings.apiKey as string, 62 | baseUrl: (payload.globalSettings.baseUrl as string).trim().replace(/\/$/, ''), 63 | body: { 64 | messages, 65 | max_tokens: maxTokens, 66 | model: payload.globalSettings.model as string, 67 | temperature: payload.globalSettings.temperature as number, 68 | top_p: payload.globalSettings.topP as number, 69 | stream: payload.globalSettings.stream as boolean ?? true, 70 | }, 71 | signal, 72 | }) 73 | if (!response.ok) { 74 | const responseJson = await response.json() 75 | console.log('responseJson', responseJson) 76 | const errMessage = responseJson.error?.message || response.statusText || 'Unknown error' 77 | throw new Error(errMessage, { cause: responseJson.error }) 78 | } 79 | const isStream = response.headers.get('content-type')?.includes('text/event-stream') 80 | if (isStream) { 81 | return parseStream(response) 82 | } else { 83 | const resJson = await response.json() 84 | return resJson.choices[0].message.content as string 85 | } 86 | } 87 | 88 | const handleImageGeneration = async(payload: HandlerPayload) => { 89 | const prompt = payload.prompt 90 | const response = await fetchImageGeneration({ 91 | apiKey: payload.globalSettings.apiKey as string, 92 | baseUrl: (payload.globalSettings.baseUrl as string).trim().replace(/\/$/, ''), 93 | body: { 94 | prompt, 95 | n: 1, 96 | size: '512x512', 97 | response_format: 'url', // TODO: support 'b64_json' 98 | }, 99 | }) 100 | if (!response.ok) { 101 | const responseJson = await response.json() 102 | const errMessage = responseJson.error?.message || response.statusText || 'Unknown error' 103 | throw new Error(errMessage) 104 | } 105 | const resJson = await response.json() 106 | return resJson.data[0].url 107 | } 108 | -------------------------------------------------------------------------------- /src/providers/openai/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | handlePrompt, 3 | handleRapidPrompt, 4 | } from './handler' 5 | import type { Provider } from '@/types/provider' 6 | 7 | const providerOpenAI = () => { 8 | const provider: Provider = { 9 | id: 'provider-openai', 10 | icon: 'i-simple-icons-openai', // @unocss-include 11 | name: 'OpenAI', 12 | globalSettings: [ 13 | { 14 | key: 'apiKey', 15 | name: 'API Key', 16 | type: 'api-key', 17 | }, 18 | { 19 | key: 'baseUrl', 20 | name: 'Base URL', 21 | description: 'Custom base url for OpenAI API.', 22 | type: 'input', 23 | default: 'https://api.openai.com', 24 | }, 25 | { 26 | key: 'model', 27 | name: 'OpenAI model', 28 | description: 'Custom gpt model for OpenAI API.', 29 | type: 'select', 30 | options: [ 31 | { value: 'gpt-4o', label: 'gpt-4o' }, 32 | { value: 'gpt-4o-2024-08-06', label: 'gpt-4o-2024-08-06' }, 33 | { value: 'gpt-4.1', label: 'gpt-4.1' }, 34 | { value: 'gpt-4-turbo', label: 'gpt-4-turbo' }, 35 | { value: 'gpt-4-turbo-2024-04-09', label: 'gpt-4-turbo-2024-04-09' }, 36 | { value: 'gpt-4-0125-preview', label: 'gpt-4-0125-preview' }, 37 | { value: 'gpt-4-1106-preview', label: 'gpt-4-1106-preview' }, 38 | { value: 'gpt-4', label: 'gpt-4' }, 39 | { value: 'gpt-4-0314', label: 'gpt-4-0314' }, 40 | { value: 'gpt-4-0613', label: 'gpt-4-0613' }, 41 | { value: 'gpt-3.5-turbo', label: 'gpt-3.5-turbo' }, 42 | { value: 'gpt-3.5-turbo-0125', label: 'gpt-3.5-turbo-0125' }, 43 | { value: 'gpt-3.5-turbo-1106', label: 'gpt-3.5-turbo-1106' }, 44 | { value: 'gpt-4.1-mini', label: 'gpt-4.1-mini' }, 45 | { value: 'gpt-4.1-nano', label: 'gpt-4.1-nano' }, 46 | { value: 'gpt-4o-mini', label: 'gpt-4o-mini' }, 47 | ], 48 | default: 'gpt-4o', 49 | }, 50 | { 51 | key: 'maxTokens', 52 | name: 'Max Tokens', 53 | description: 'The maximum number of tokens to generate in the completion.', 54 | type: 'slider', 55 | min: 0, 56 | max: 32768, 57 | default: 2048, 58 | step: 1, 59 | }, 60 | { 61 | key: 'messageHistorySize', 62 | name: 'Max History Message Size', 63 | description: 'The number of retained historical messages will be truncated if the length of the message exceeds the MaxToken parameter.', 64 | type: 'slider', 65 | min: 1, 66 | max: 24, 67 | default: 5, 68 | step: 1, 69 | }, 70 | { 71 | key: 'temperature', 72 | name: 'Temperature', 73 | type: 'slider', 74 | description: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.', 75 | min: 0, 76 | max: 2, 77 | default: 0.7, 78 | step: 0.01, 79 | }, 80 | { 81 | key: 'top_p', 82 | name: 'Top P', 83 | description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.', 84 | type: 'slider', 85 | min: 0, 86 | max: 1, 87 | default: 1, 88 | step: 0.01, 89 | }, 90 | ], 91 | bots: [ 92 | { 93 | id: 'chat_continuous', 94 | type: 'chat_continuous', 95 | name: 'Continuous Chat', 96 | settings: [], 97 | }, 98 | { 99 | id: 'chat_single', 100 | type: 'chat_single', 101 | name: 'Single Chat', 102 | settings: [], 103 | }, 104 | { 105 | id: 'image_generation', 106 | type: 'image_generation', 107 | name: 'DALL·E', 108 | settings: [], 109 | }, 110 | ], 111 | handlePrompt, 112 | handleRapidPrompt, 113 | } 114 | return provider 115 | } 116 | 117 | export default providerOpenAI 118 | -------------------------------------------------------------------------------- /src/providers/openai/parser.ts: -------------------------------------------------------------------------------- 1 | import { createParser } from 'eventsource-parser' 2 | import type { ParsedEvent, ReconnectInterval } from 'eventsource-parser' 3 | 4 | export const parseStream = (rawResponse: Response) => { 5 | const encoder = new TextEncoder() 6 | const decoder = new TextDecoder() 7 | const rb = rawResponse.body as ReadableStream 8 | 9 | return new ReadableStream({ 10 | async start(controller) { 11 | const streamParser = (event: ParsedEvent | ReconnectInterval) => { 12 | if (event.type === 'event') { 13 | const data = event.data 14 | if (data === '[DONE]') { 15 | controller.close() 16 | return 17 | } 18 | try { 19 | const json = JSON.parse(data) 20 | const text = json.choices[0].delta?.content || '' 21 | const queue = encoder.encode(text) 22 | controller.enqueue(queue) 23 | } catch (e) { 24 | controller.error(e) 25 | } 26 | } 27 | } 28 | const reader = rb.getReader() 29 | const parser = createParser(streamParser) 30 | let done = false 31 | while (!done) { 32 | const { done: isDone, value } = await reader.read() 33 | if (isDone) { 34 | done = true 35 | controller.close() 36 | return 37 | } 38 | parser.feed(decoder.decode(value, { stream: true })) 39 | } 40 | }, 41 | }) 42 | } 43 | -------------------------------------------------------------------------------- /src/providers/replicate/api.ts: -------------------------------------------------------------------------------- 1 | interface FetchPayload { 2 | method?: 'POST' | 'GET' 3 | token: string 4 | predictionId?: string 5 | body?: Record 6 | } 7 | 8 | export const fetchImageGeneration = async(payload: FetchPayload) => { 9 | const initOptions = { 10 | headers: { 11 | 'Content-Type': 'application/json', 12 | 'Authorization': `Token ${payload.token}`, 13 | }, 14 | method: payload.method || 'GET', 15 | body: payload.method === 'POST' ? JSON.stringify(payload.body || {}) : undefined, 16 | } 17 | let fetchUrl = 'https://api.replicate.com/v1/predictions' 18 | if (payload.predictionId) 19 | fetchUrl += `/${payload.predictionId}` 20 | return fetch(fetchUrl, initOptions) 21 | } 22 | -------------------------------------------------------------------------------- /src/providers/replicate/handler.ts: -------------------------------------------------------------------------------- 1 | import { fetchImageGeneration } from './api' 2 | import type { HandlerPayload, Provider } from '@/types/provider' 3 | 4 | export const handlePrompt: Provider['handlePrompt'] = async(payload, signal?: AbortSignal) => { 5 | if (payload.botId === 'stable-diffusion') 6 | return handleReplicateGenerate('ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4', payload) 7 | if (payload.botId === 'stable-diffusion-v1') 8 | return handleReplicateGenerate('b3d14e1cd1f9470bbb0bb68cac48e5f483e5be309551992cc33dc30654a82bb7', payload) 9 | if (payload.botId === 'waifu-diffusion') 10 | return handleReplicateGenerate('25d2f75ecda0c0bed34c806b7b70319a53a1bccad3ade1a7496524f013f48983', payload) 11 | if (payload.botId === 'sdxl') 12 | return handleReplicateGenerate('2a865c9a94c9992b6689365b75db2d678d5022505ed3f63a5f53929a31a46947', payload) 13 | } 14 | 15 | const handleReplicateGenerate = async(modelVersion: string, payload: HandlerPayload) => { 16 | const prompt = payload.prompt 17 | const response = await fetchImageGeneration({ 18 | token: payload.globalSettings.token as string, 19 | method: 'POST', 20 | body: { 21 | version: modelVersion, 22 | input: { 23 | prompt, 24 | }, 25 | }, 26 | }) 27 | if (!response.ok) { 28 | const responseJson = await response.json() 29 | const errMessage = responseJson.detail || response.statusText || 'Unknown error' 30 | throw new Error(errMessage, { 31 | cause: { 32 | code: response.status, 33 | message: errMessage, 34 | }, 35 | }) 36 | } 37 | const resJson = await response.json() 38 | 39 | return waitImageWithPrediction(resJson, payload.globalSettings.token as string) 40 | } 41 | 42 | interface Prediction { 43 | id: string 44 | input: { 45 | prompt: string 46 | } 47 | output: string[] | null 48 | status: 'starting' | 'succeeded' | 'failed' 49 | } 50 | 51 | const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) 52 | 53 | const waitImageWithPrediction = async(prediction: Prediction, token: string) => { 54 | let currentPrediction = prediction 55 | while (currentPrediction.status !== 'succeeded' && currentPrediction.status !== 'failed') { 56 | await sleep(1000) 57 | const response = await fetchImageGeneration({ 58 | predictionId: currentPrediction.id, 59 | token, 60 | }) 61 | if (!response.ok) { 62 | const responseJson = await response.json() 63 | const errMessage = responseJson.error?.message || 'Unknown error' 64 | throw new Error(errMessage) 65 | } 66 | prediction = await response.json() 67 | currentPrediction = prediction 68 | // console.log('currentPrediction', prediction) 69 | } 70 | if (!currentPrediction.output || currentPrediction.output.length === 0) 71 | throw new Error('No output') 72 | return currentPrediction.output[0] 73 | } 74 | -------------------------------------------------------------------------------- /src/providers/replicate/index.ts: -------------------------------------------------------------------------------- 1 | import { handlePrompt } from './handler' 2 | import type { Provider } from '@/types/provider' 3 | 4 | const providerReplicate = () => { 5 | const provider: Provider = { 6 | id: 'provider-replicate', 7 | icon: 'i-carbon-replicate', // @unocss-include 8 | name: 'Replicate', 9 | globalSettings: [ 10 | { 11 | key: 'token', 12 | name: 'Replicate API token', 13 | type: 'api-key', 14 | }, 15 | ], 16 | bots: [ 17 | { 18 | id: 'stable-diffusion', 19 | type: 'image_generation', 20 | name: 'Stable Diffusion 2.1', 21 | settings: [], 22 | }, 23 | { 24 | id: 'stable-diffusion-v1', 25 | type: 'image_generation', 26 | name: 'Stable Diffusion 1.5', 27 | settings: [], 28 | }, 29 | { 30 | id: 'waifu-diffusion', 31 | type: 'image_generation', 32 | name: 'Waifu Diffusion', 33 | settings: [], 34 | }, 35 | { 36 | id: 'sdxl', 37 | type: 'image_generation', 38 | name: 'sdxl', 39 | settings: [], 40 | }, 41 | ], 42 | supportCallMethod: 'backend', 43 | handlePrompt, 44 | } 45 | return provider 46 | } 47 | 48 | export default providerReplicate 49 | -------------------------------------------------------------------------------- /src/stores/conversation.ts: -------------------------------------------------------------------------------- 1 | import { action, atom, computed, map } from 'nanostores' 2 | import { botMetaList } from './provider' 3 | import { clearMessagesByConversationId } from './messages' 4 | import { conversationMapData } from './tests/conversation.mock' 5 | import { db } from './storage/conversation' 6 | import type { Conversation } from '@/types/conversation' 7 | 8 | export const conversationMap = map>({}) 9 | export const currentConversationId = atom('') 10 | export const currentConversation = computed(currentConversationId, (id) => { 11 | return id ? conversationMap.get()[id] as Conversation : null 12 | }) 13 | export const currentEditingConversationId = atom('') 14 | export const currentEditingConversation = computed(currentEditingConversationId, (id) => { 15 | return id ? conversationMap.get()[id] as Conversation : null 16 | }) 17 | export const conversationMapSortList = computed(conversationMap, (map) => { 18 | return Object.values(map).sort((a, b) => b.lastUseTime - a.lastUseTime) 19 | }) 20 | 21 | const migrateConversationStoreIfNeeded = () => { 22 | const rawData = conversationMap.get() 23 | Object.values(rawData).forEach((conversation) => { 24 | // @ts-expect-error migrate old data 25 | if (conversation.providerId && conversation.conversationType) { 26 | const typeDict = { 27 | single: 'chat_single', 28 | continuous: 'chat_continuous', 29 | image: 'image_generation', 30 | } 31 | const providerDict = { 32 | 'provider-stable-diffusion': 'provider-replicate', 33 | } 34 | const newConversationData = { 35 | id: conversation.id, 36 | // @ts-expect-error migrate old data 37 | bot: `${providerDict[conversation.providerId] || conversation.providerId}:${typeDict[conversation.conversationType] || 'chat_single'}`, 38 | name: conversation.name, 39 | icon: '', 40 | systemInfo: conversation.systemInfo, 41 | mockMessages: conversation.mockMessages, 42 | lastUseTime: conversation.lastUseTime, 43 | } 44 | conversationMap.setKey(conversation.id, newConversationData) 45 | db.setItem(conversation.id, newConversationData) 46 | } 47 | }) 48 | } 49 | 50 | export const rebuildConversationStore = async() => { 51 | const data = await db.exportData() || {} 52 | conversationMap.set(data) 53 | // conversationMap.set(conversationMapData) 54 | migrateConversationStoreIfNeeded() 55 | } 56 | 57 | export const addConversation = action(conversationMap, 'addConversation', (map, instance?: Partial) => { 58 | const instanceId = instance?.id || `id_${Date.now()}` 59 | const conversation: Conversation = { 60 | id: instanceId, 61 | bot: botMetaList[0]?.value, 62 | name: instance?.name || '', 63 | icon: instance?.icon || '', 64 | lastUseTime: Date.now(), 65 | } 66 | map.setKey(instanceId, conversation) 67 | db.setItem(instanceId, conversation) 68 | currentConversationId.set(instanceId) 69 | }) 70 | 71 | export const updateConversationById = action(conversationMap, 'updateConversationById', (map, id: string, payload: Partial) => { 72 | const conversation = { 73 | ...map.get()[id], 74 | ...payload, 75 | } 76 | map.setKey(id, conversation) 77 | db.updateItem(id, conversation) 78 | }) 79 | 80 | export const deleteConversationById = action(conversationMap, 'deleteConversationById', (map, id: string) => { 81 | map.set(Object.fromEntries(Object.entries(map.get()).filter(([key]) => key !== id))) 82 | db.deleteItem(id) 83 | clearMessagesByConversationId(id, true) 84 | }) 85 | -------------------------------------------------------------------------------- /src/stores/messages.ts: -------------------------------------------------------------------------------- 1 | import { action, atom, map } from 'nanostores' 2 | import { conversationMessagesMapData } from './tests/message.mock' 3 | import { db } from './storage/message' 4 | import { updateConversationById } from './conversation' 5 | import type { MessageInstance } from '@/types/message' 6 | 7 | export const conversationMessagesMap = map>({}) 8 | export const shareMessageIds = atom([]) 9 | 10 | export const rebuildMessagesStore = async() => { 11 | const data = await db.exportData() || {} 12 | conversationMessagesMap.set(data) 13 | // conversationMessagesMap.set(conversationMessagesMapData) 14 | } 15 | 16 | export const getMessagesByConversationId = (id: string) => { 17 | return conversationMessagesMap.get()[id] || [] 18 | } 19 | 20 | export const updateMessage = action( 21 | conversationMessagesMap, 22 | 'updateMessage', 23 | (map, conversationId: string, id: string, payload: Partial) => { 24 | const oldMessages = map.get()[conversationId] || [] 25 | const newMessages = oldMessages.map((message) => { 26 | if (message.id === id) { 27 | return { 28 | ...message, 29 | ...payload, 30 | } 31 | } 32 | return message 33 | }) 34 | map.setKey(conversationId, newMessages) 35 | db.setItem(conversationId, newMessages) 36 | }, 37 | ) 38 | 39 | export const pushMessageByConversationId = action( 40 | conversationMessagesMap, 41 | 'pushMessageByConversationId', 42 | (map, id: string, payload: MessageInstance) => { 43 | const oldMessages = map.get()[id] || [] 44 | if (oldMessages[oldMessages.length - 1]?.id === payload.id) return 45 | map.setKey(id, [...oldMessages, payload]) 46 | db.setItem(id, [...oldMessages, payload]) 47 | updateConversationById(id, { 48 | lastUseTime: Date.now(), 49 | }) 50 | }, 51 | ) 52 | 53 | export const clearMessagesByConversationId = action( 54 | conversationMessagesMap, 55 | 'clearMessagesByConversationId', 56 | (map, id: string, deleteChat?: boolean) => { 57 | map.setKey(id, []) 58 | db.deleteItem(id) 59 | !deleteChat && updateConversationById(id, { 60 | lastUseTime: Date.now(), 61 | }) 62 | }, 63 | ) 64 | 65 | export const deleteMessageByConversationId = action( 66 | conversationMessagesMap, 67 | 'deleteMessageByConversationId', 68 | (map, id: string, payload: MessageInstance) => { 69 | const oldMessages = map.get()[id] || [] 70 | map.setKey(id, [...oldMessages.filter(message => message.id !== payload.id)]) 71 | db.setItem(id, [...oldMessages.filter(message => message.id !== payload.id)]) 72 | updateConversationById(id, { 73 | lastUseTime: Date.now(), 74 | }) 75 | }, 76 | ) 77 | 78 | export const spliceMessageByConversationId = action( 79 | conversationMessagesMap, 80 | 'spliceMessagesByConversationId', 81 | (map, id: string, payload: MessageInstance) => { 82 | const oldMessages = map.get()[id] || [] 83 | const currentIndex = oldMessages.findIndex(message => message.id === payload.id) 84 | map.setKey(id, [...oldMessages.slice(0, currentIndex + 1)]) 85 | db.setItem(id, [...oldMessages.slice(0, currentIndex + 1)]) 86 | updateConversationById(id, { 87 | lastUseTime: Date.now(), 88 | }) 89 | }, 90 | ) 91 | 92 | export const spliceUpdateMessageByConversationId = action( 93 | conversationMessagesMap, 94 | 'spliceMessagesByConversationId', 95 | (map, id: string, payload: MessageInstance) => { 96 | const oldMessages = map.get()[id] || [] 97 | const currentIndex = oldMessages.findIndex(message => message.id === payload.id) 98 | map.setKey(id, [...oldMessages.slice(0, currentIndex), payload]) 99 | db.setItem(id, [...oldMessages.slice(0, currentIndex), payload]) 100 | updateConversationById(id, { 101 | lastUseTime: Date.now(), 102 | }) 103 | }, 104 | ) 105 | -------------------------------------------------------------------------------- /src/stores/provider.ts: -------------------------------------------------------------------------------- 1 | import providerOpenAI from '@/providers/openai' 2 | import providerAzure from '@/providers/azure' 3 | import providerGoogle from '@/providers/google' 4 | import providerReplicate from '@/providers/replicate' 5 | import { allConversationTypes } from '@/types/conversation' 6 | import type { BotMeta } from '@/types/app' 7 | 8 | export const providerList = [ 9 | providerOpenAI(), 10 | providerAzure(), 11 | providerReplicate(), 12 | providerGoogle(), 13 | ] 14 | 15 | export const providerMetaList = providerList.map(provider => ({ 16 | id: provider.id, 17 | name: provider.name, 18 | icon: provider.icon, 19 | bots: provider.bots, 20 | })) 21 | 22 | export const platformSettingsUIList = providerList.map(provider => ({ 23 | id: provider.id, 24 | icon: provider.icon, 25 | name: provider.name, 26 | settingsUI: provider.globalSettings, 27 | })) 28 | 29 | const botMetaMap = providerMetaList.reduce((acc, provider) => { 30 | provider.bots.forEach((bot) => { 31 | if (allConversationTypes.includes(bot.type)) { 32 | acc[`${provider.id}:${bot.id}`] = { 33 | value: `${provider.id}:${bot.id}`, 34 | type: bot.type, 35 | label: bot.name, 36 | provider: { 37 | id: provider.id, 38 | name: provider.name, 39 | icon: provider.icon, 40 | }, 41 | settingsUI: bot.settings, 42 | } 43 | } 44 | }) 45 | return acc 46 | }, {} as Record) 47 | 48 | export const botMetaList = Object.values(botMetaMap) 49 | 50 | export const getProviderById = (id: string) => { 51 | return providerList.find(provider => provider.id === id) 52 | } 53 | 54 | export const getBotMetaById = (id: string) => { 55 | return botMetaMap[id] || null 56 | } 57 | -------------------------------------------------------------------------------- /src/stores/settings.ts: -------------------------------------------------------------------------------- 1 | import { action, atom, map } from 'nanostores' 2 | import { db } from './storage/settings' 3 | import { getProviderById, providerMetaList } from './provider' 4 | import type { SettingsPayload } from '@/types/provider' 5 | import type { GeneralSettings } from '@/types/app' 6 | 7 | export const providerSettingsMap = map>({}) 8 | export const globalAbortController = atom(null) 9 | 10 | export const rebuildSettingsStore = async() => { 11 | const exportData = await db.exportData() 12 | const defaultData = defaultSettingsStore() 13 | const data: Record = {} 14 | providerMetaList.forEach((provider) => { 15 | const modelSetting = getProviderById(provider.id)?.globalSettings?.find(obj => obj.key === 'model') 16 | if (modelSetting?.type === 'select') { 17 | const modelList = modelSetting.options 18 | const isExistModel = modelList.some(model => model.value === exportData?.[provider.id]?.model) 19 | 20 | if (!isExistModel && exportData?.[provider.id]?.model) 21 | exportData[provider.id].model = modelList?.[0]?.value 22 | } 23 | data[provider.id] = { 24 | ...defaultData[provider.id] || {}, 25 | ...exportData?.[provider.id] || {}, 26 | } 27 | }) 28 | data.general = exportData?.general || {} 29 | providerSettingsMap.set(data) 30 | } 31 | 32 | export const getSettingsByProviderId = (id: string) => { 33 | return providerSettingsMap.get()[id] || {} 34 | } 35 | 36 | export const setSettingsByProviderId = action( 37 | providerSettingsMap, 38 | 'setSettingsByProviderId', 39 | (map, id: string, payload: SettingsPayload) => { 40 | const mergedSettings = { 41 | ...defaultSettingsByProviderId(id), 42 | ...payload, 43 | } 44 | map.setKey(id, mergedSettings) 45 | db.setItem(id, mergedSettings) 46 | }, 47 | ) 48 | 49 | export const getGeneralSettings = () => { 50 | return (providerSettingsMap.get().general || {}) as unknown as GeneralSettings 51 | } 52 | 53 | export const updateGeneralSettings = action( 54 | providerSettingsMap, 55 | 'setSettingsByProviderId', 56 | (map, payload: Partial) => { 57 | const mergedSettings = { 58 | ...map.get().general || {}, 59 | ...payload, 60 | } 61 | map.setKey('general', mergedSettings) 62 | db.setItem('general', mergedSettings) 63 | }, 64 | ) 65 | 66 | export const defaultSettingsStore = () => { 67 | const defaultSettings: Record = {} 68 | providerMetaList.forEach((provider) => { 69 | defaultSettings[provider.id] = defaultSettingsByProviderId(provider.id) 70 | }) 71 | return defaultSettings 72 | } 73 | 74 | export const defaultSettingsByProviderId = (id: string) => { 75 | const provider = getProviderById(id) 76 | if (!provider || !provider.globalSettings) 77 | return {} 78 | const globalSettings = provider.globalSettings 79 | const defaultSettings: SettingsPayload = {} 80 | globalSettings.forEach((setting) => { 81 | if (setting.default) 82 | defaultSettings[setting.key] = setting.default 83 | }) 84 | return defaultSettings 85 | } 86 | -------------------------------------------------------------------------------- /src/stores/storage/conversation.ts: -------------------------------------------------------------------------------- 1 | import { del, entries, get, set, update } from 'idb-keyval' 2 | import { conversations } from './db' 3 | import type { Conversation } from '@/types/conversation' 4 | 5 | const setItem = async(key: string, item: Conversation) => { 6 | const store = conversations.get() 7 | if (store) 8 | await set(key, item, store) 9 | } 10 | 11 | const getItem = async(key: string) => { 12 | const store = conversations.get() 13 | if (store) 14 | return get(key, store) 15 | return null 16 | } 17 | 18 | const updateItem = async(key: string, item: Conversation) => { 19 | const store = conversations.get() 20 | if (store) 21 | await update(key, () => item, store) 22 | } 23 | 24 | const deleteItem = async(key: string) => { 25 | const store = conversations.get() 26 | if (store) 27 | await del(key, store) 28 | } 29 | 30 | const exportData = async() => { 31 | const store = conversations.get() 32 | if (store) { 33 | const entriesData = await entries(store) 34 | return Object.fromEntries(entriesData) as Record 35 | } 36 | return null 37 | } 38 | 39 | export const db = { 40 | setItem, 41 | getItem, 42 | updateItem, 43 | deleteItem, 44 | exportData, 45 | } 46 | -------------------------------------------------------------------------------- /src/stores/storage/db.ts: -------------------------------------------------------------------------------- 1 | import { atom } from 'nanostores' 2 | import { createStore } from 'idb-keyval' 3 | import { rebuildConversationStore } from '../conversation' 4 | import { rebuildMessagesStore } from '../messages' 5 | import { rebuildSettingsStore } from '../settings' 6 | import type { UseStore } from 'idb-keyval' 7 | 8 | export const conversations = atom(null) 9 | export const messages = atom(null) 10 | export const settings = atom(null) 11 | 12 | export const createStores = () => { 13 | conversations.set(createStore('conversations', 'keyval')) 14 | messages.set(createStore('messages', 'keyval')) 15 | settings.set(createStore('settings', 'keyval')) 16 | } 17 | 18 | export const rebuildStores = async() => { 19 | await rebuildConversationStore() 20 | await rebuildMessagesStore() 21 | await rebuildSettingsStore() 22 | } 23 | -------------------------------------------------------------------------------- /src/stores/storage/message.ts: -------------------------------------------------------------------------------- 1 | import { del, entries, get, set, update } from 'idb-keyval' 2 | import { messages } from './db' 3 | import type { MessageInstance } from '@/types/message' 4 | 5 | const setItem = async(key: string, item: MessageInstance[]) => { 6 | const store = messages.get() 7 | if (store) 8 | await set(key, item, store) 9 | } 10 | 11 | const getItem = async(key: string) => { 12 | const store = messages.get() 13 | if (store) 14 | return get(key, store) 15 | return null 16 | } 17 | 18 | const updateItem = async(key: string, item: MessageInstance[]) => { 19 | const store = messages.get() 20 | if (store) 21 | await update(key, () => item, store) 22 | } 23 | 24 | const deleteItem = async(key: string) => { 25 | const store = messages.get() 26 | if (store) 27 | await del(key, store) 28 | } 29 | 30 | const exportData = async() => { 31 | const store = messages.get() 32 | if (store) { 33 | const entriesData = await entries(store) 34 | return Object.fromEntries(entriesData) as Record 35 | } 36 | return null 37 | } 38 | 39 | export const db = { 40 | setItem, 41 | getItem, 42 | updateItem, 43 | deleteItem, 44 | exportData, 45 | } 46 | -------------------------------------------------------------------------------- /src/stores/storage/settings.ts: -------------------------------------------------------------------------------- 1 | import { del, entries, get, set, update } from 'idb-keyval' 2 | import { settings } from './db' 3 | import type { SettingsPayload } from '@/types/provider' 4 | 5 | const setItem = async(key: string, item: SettingsPayload) => { 6 | const store = settings.get() 7 | if (store) 8 | await set(key, item, store) 9 | } 10 | 11 | const getItem = async(key: string) => { 12 | const store = settings.get() 13 | if (store) 14 | return get(key, store) 15 | return null 16 | } 17 | 18 | const updateItem = async(key: string, item: SettingsPayload) => { 19 | const store = settings.get() 20 | if (store) 21 | await update(key, () => item, store) 22 | } 23 | 24 | const deleteItem = async(key: string) => { 25 | const store = settings.get() 26 | if (store) 27 | await del(key, store) 28 | } 29 | 30 | const exportData = async() => { 31 | const store = settings.get() 32 | if (store) { 33 | const entriesData = await entries(store) 34 | return Object.fromEntries(entriesData) as Record 35 | } 36 | return null 37 | } 38 | 39 | export const db = { 40 | setItem, 41 | getItem, 42 | updateItem, 43 | deleteItem, 44 | exportData, 45 | } 46 | -------------------------------------------------------------------------------- /src/stores/streams.ts: -------------------------------------------------------------------------------- 1 | import { action, map } from 'nanostores' 2 | import type { StreamInstance } from '@/types/message' 3 | 4 | export const streamsMap = map>({}) 5 | export const loadingStateMap = map>({}) 6 | 7 | export const getStreamByConversationId = (id: string) => { 8 | return streamsMap.get()[id] || null 9 | } 10 | 11 | export const setStreamByConversationId = action( 12 | streamsMap, 13 | 'setStreamByConversationId', 14 | (map, id: string, payload: StreamInstance) => { 15 | map.setKey(id, payload) 16 | }, 17 | ) 18 | 19 | export const deleteStreamById = action(streamsMap, 'deleteStreamById', (map, id: string) => { 20 | map.set(Object.fromEntries(Object.entries(map.get()).filter(([key]) => key !== id))) 21 | }) 22 | 23 | export const setLoadingStateByConversationId = action( 24 | loadingStateMap, 25 | 'setLoadingStateByConversationId', 26 | (map, id: string, loading: boolean) => { 27 | map.setKey(id, loading) 28 | }, 29 | ) 30 | -------------------------------------------------------------------------------- /src/stores/tests/conversation.mock.ts: -------------------------------------------------------------------------------- 1 | import type { Conversation } from '@/types/conversation' 2 | 3 | const testMarkdown: Conversation = { 4 | id: 'test_markdown', 5 | providerId: 'provider-openai', 6 | conversationType: 'continuous', 7 | name: 'Test Markdown', 8 | icon: '', 9 | } 10 | 11 | export const conversationMapData = { 12 | test_markdown: testMarkdown, 13 | } 14 | -------------------------------------------------------------------------------- /src/stores/tests/message.mock.ts: -------------------------------------------------------------------------------- 1 | import type { MessageInstance } from '@/types/message' 2 | 3 | const testMarkdown: MessageInstance[] = [ 4 | { role: 'user', id: '0', content: 'Headings' }, 5 | { role: 'assistant', id: '0', content: '# Heading level 1\n## Heading level 2\n### Heading level 3\n#### Heading level 4\n##### Heading level 5\n###### Heading level 6\ncontent' }, 6 | { role: 'user', id: '0', content: 'Paragraphs' }, 7 | { role: 'assistant', id: '0', content: 'I really like using Markdown.\n\nI think I\'ll use it to format all of my documents from now on.' }, 8 | { role: 'user', id: '0', content: 'Emphasis' }, 9 | { role: 'assistant', id: '0', content: 'This is *emphasized* text.\nThis is _emphasized_ text.\n\nThis is **strong** text.\nThis is __strong__ text.\n\nThis is ***emphasized and strong*** text.\nThis is ___emphasized and strong___ text.\n\nThis is ~~strikethrough~~ text.' }, 10 | { role: 'assistant', id: '0', content: 'This is **bold** and _italic_ text.\nThis is __bold__ and *italic* text.\n\nThis is ***bold and italic*** text.\nThis is ___bold and italic___ text.' }, 11 | { role: 'user', id: '0', content: 'Blockquotes' }, 12 | { role: 'assistant', id: '0', content: '> Dorothy followed her through many of the beautiful rooms in her castle.\n>\n> The Witch bade her clean the pots and kettles and sweep the floor and keep the fire fed with wood.' }, 13 | { role: 'assistant', id: '0', content: '> Dorothy followed her through many of the beautiful rooms in her castle.\n>\n>> The Witch bade her clean the pots and kettles and sweep the floor and keep the fire fed with wood.' }, 14 | { role: 'assistant', id: '0', content: '> #### The quarterly results look great!\n>\n> - Revenue was off the chart.\n> - Profits were higher than ever.\n>\n> *Everything* is going according to **plan**.' }, 15 | { role: 'user', id: '0', content: 'Lists' }, 16 | { role: 'assistant', id: '0', content: '1. First item\n2. Second item\n3. Third item\n - Indented item\n - Indented item\n4. Fourth item' }, 17 | { role: 'user', id: '0', content: 'Code' }, 18 | { role: 'assistant', id: '0', content: 'At the command prompt, type `nano`.' }, 19 | { role: 'assistant', id: '0', content: '```\nfunction test() {\n console.log("notice the blank line before this function?");\n}\n```' }, 20 | { role: 'user', id: '0', content: 'Links' }, 21 | { role: 'assistant', id: '0', content: 'This is [an example](http://example.com/ "Title") inline link.\n\n[This link](http://example.net/) has no title attribute.' }, 22 | { role: 'assistant', id: '0', content: 'I love supporting the **[Site](http://example.com)**.\nThis is the *[Site](http://example.com)*.\nSee the section on [`code`](#code).' }, 23 | { role: 'user', id: '0', content: 'Tables' }, 24 | { role: 'assistant', id: '0', content: 'First Header | Second Header\n------------ | -------------\nContent from cell 1 | Content from cell 2\nContent in the first column | Content in the second column' }, 25 | { role: 'assistant', id: '0', content: '| Syntax | Description |\n| ----------- | ----------- |\n| Header | Title |\n| Paragraph | Text |' }, 26 | { role: 'assistant', id: '0', content: '| Syntax | Description | Test Text |\n| :--- | :----: | ---: |\n| Header | Title | Here\'s this |\n| Paragraph | Text | And more |' }, 27 | { role: 'user', id: '0', content: 'GFM Features' }, 28 | { role: 'assistant', id: '0', content: '## Autolink literals\n\nwww.example.com, https://example.com, and contact@example.com.\n\n## Footnote\n\nA note[^1]\n\n[^1]: Big note.\n\n## Strikethrough\n\n~one~ or ~~two~~ tildes.\n\n## Table\n\n| a | b | c | d |\n| - | :- | -: | :-: |\n\n## Tag filter\n\n\n\n## Tasklist\n\n* [ ] to do\n* [x] done' }, 29 | ] 30 | 31 | export const conversationMessagesMapData = { 32 | test_markdown: testMarkdown, 33 | } 34 | -------------------------------------------------------------------------------- /src/stores/ui.ts: -------------------------------------------------------------------------------- 1 | import { atom } from 'nanostores' 2 | import type { ErrorMessage } from '@/types/message' 3 | 4 | export const showConversationSidebar = atom(false) 5 | export const showSettingsSidebar = atom(false) 6 | export const showConversationEditModal = atom(false) 7 | export const showEmojiPickerModal = atom(false) 8 | export const showConfirmModal = atom(false) 9 | export const showShareModal = atom(false) 10 | export const showSelectMessageModal = atom(false) 11 | 12 | export const isSendBoxFocus = atom(false) 13 | export const currentErrorMessage = atom<ErrorMessage | null>(null) 14 | export const emojiPickerCurrentPick = atom<string | undefined>() 15 | 16 | export const scrollController = () => { 17 | const elementList = () => Array.from(document.getElementsByClassName('scroll-list')) 18 | return { 19 | scrollToTop: () => elementList().forEach(element => element.scrollTo({ top: 0, behavior: 'smooth' })), 20 | scrollToBottom: () => elementList().forEach(element => element.scrollTo({ top: element.scrollHeight, behavior: 'smooth' })), 21 | instantToBottom: () => elementList().forEach(element => element.scrollTo({ top: element.scrollHeight })), 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/types/app.ts: -------------------------------------------------------------------------------- 1 | import type { ConversationType } from './conversation' 2 | import type { SettingsUI } from './provider' 3 | 4 | export interface GeneralSettings { 5 | /** Default request directly, can choose to request via proxy */ 6 | requestWithBackend: boolean 7 | locale: string 8 | } 9 | 10 | export interface BotMeta { 11 | value: string 12 | type: ConversationType 13 | label: string 14 | provider: { 15 | id: string 16 | name: string 17 | icon: string 18 | } 19 | settingsUI: SettingsUI[] 20 | } 21 | -------------------------------------------------------------------------------- /src/types/conversation.ts: -------------------------------------------------------------------------------- 1 | export const allConversationTypes = ['chat_single', 'chat_continuous', 'image_generation'] as const 2 | export type ConversationType = typeof allConversationTypes[number] 3 | 4 | export interface Conversation { 5 | id: string 6 | bot: string 7 | name: string 8 | icon: string 9 | systemInfo?: string 10 | mockMessages?: string 11 | lastUseTime: number 12 | } 13 | -------------------------------------------------------------------------------- /src/types/message.ts: -------------------------------------------------------------------------------- 1 | export interface Message { 2 | role: 'system' | 'user' | 'assistant' 3 | content: string 4 | } 5 | 6 | /** Used in app */ 7 | export interface MessageInstance extends Message { 8 | id: string 9 | stream?: boolean 10 | dateTime?: number 11 | isSelected?: boolean 12 | } 13 | 14 | export interface ErrorMessage { 15 | code: string 16 | message: string 17 | } 18 | 19 | export interface StreamInstance { 20 | messageId: string 21 | stream: ReadableStream 22 | } 23 | -------------------------------------------------------------------------------- /src/types/provider.ts: -------------------------------------------------------------------------------- 1 | import type { ConversationType } from './conversation' 2 | import type { Message } from './message' 3 | 4 | export interface Provider { 5 | id: string 6 | /** Icon of provider. Only support `@unocss/preset-icons` class name for now. */ 7 | icon: string 8 | /** Name of provider. */ 9 | name: string 10 | /** Global settings of the provider. */ 11 | globalSettings?: SettingsUI[] 12 | /** Bots list. Each bot provides a list of presets including conversation types, settings items, etc. */ 13 | bots: Bot[] 14 | /** Whether the Provider can accept frontend or backend calls, or both. */ 15 | supportCallMethod?: 'both' | 'frontend' | 'backend' 16 | // Handle a prompt in conversation 17 | handlePrompt: (payload: HandlerPayload, signal?: AbortSignal) => Promise<PromptResponse> 18 | /** Handle a temporary, rapidly prompt, used for interface display like conversation title's generation. */ 19 | handleRapidPrompt?: (prompt: string, globalSettings: SettingsPayload) => Promise<string> 20 | } 21 | 22 | export interface Bot { 23 | id: string 24 | type: ConversationType 25 | name: string 26 | settings: SettingsUI[] 27 | } 28 | 29 | export type SettingsPayload = Record<string, string | number | boolean> 30 | 31 | export interface HandlerPayload { 32 | conversationId: string 33 | conversationType: ConversationType 34 | botId: string 35 | globalSettings: SettingsPayload 36 | botSettings: SettingsPayload 37 | prompt?: string 38 | messages: Message[] 39 | } 40 | 41 | export type PromptResponse = string | ReadableStream | null | undefined 42 | 43 | interface SettingsUIBase { 44 | key: string 45 | name: string 46 | description?: string 47 | default?: string | number | boolean 48 | } 49 | 50 | export interface SelectOptionType { 51 | value: any 52 | label: string 53 | icon?: string 54 | } 55 | 56 | export interface SettingsApiKey extends SettingsUIBase { 57 | type: 'api-key' 58 | } 59 | 60 | export interface SettingsUIInput extends SettingsUIBase { 61 | type: 'input' 62 | } 63 | 64 | export interface SettingsUISelect extends SettingsUIBase { 65 | type: 'select' 66 | options: SelectOptionType[] 67 | } 68 | 69 | export interface SettingsUISlider extends SettingsUIBase { 70 | type: 'slider' 71 | min: number 72 | max: number 73 | step: number 74 | } 75 | 76 | export interface SettingsUIToggle extends SettingsUIBase { 77 | type: 'toggle' 78 | } 79 | 80 | export type SettingsUI = SettingsApiKey | SettingsUIInput | SettingsUISelect | SettingsUISlider | SettingsUIToggle 81 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "astro/tsconfigs/strict", 3 | "compilerOptions": { 4 | "baseUrl": ".", 5 | "jsx": "preserve", 6 | "jsxImportSource": "solid-js", 7 | "types": ["vite-plugin-pwa/info"], 8 | "paths": { 9 | "@/*": ["src/*"], 10 | }, 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /unocss.config.ts: -------------------------------------------------------------------------------- 1 | import { 2 | defineConfig, 3 | presetAttributify, 4 | presetIcons, 5 | presetTypography, 6 | presetUno, 7 | transformerDirectives, 8 | transformerVariantGroup, 9 | } from 'unocss' 10 | 11 | export default defineConfig({ 12 | presets: [ 13 | presetUno({ 14 | dark: 'class', 15 | }), 16 | presetAttributify(), 17 | presetIcons(), 18 | presetTypography({ 19 | cssExtend: { 20 | '*:first-child': { 21 | 'margin-top': 0, 22 | }, 23 | '*:last-child': { 24 | 'margin-bottom': 0, 25 | }, 26 | 'h1': { 27 | 'font-size': '1.25em', 28 | 'margin': '1rem 0', 29 | }, 30 | 'h2': { 31 | 'font-size': '1.16em', 32 | 'margin': '1rem 0', 33 | }, 34 | 'h3': { 35 | 'font-size': '1.1em', 36 | 'margin': '1rem 0', 37 | }, 38 | 'h4, h5, h6': { 39 | 'font-size': '1em', 40 | 'margin': '1rem 0', 41 | }, 42 | ':not(pre) > code': { 43 | 'font-weight': 400, 44 | 'padding': '0 0.2em', 45 | 'color': 'var(--prism-keyword)', 46 | }, 47 | 'pre': { 48 | 'background-color': 'var(--prism-background) !important', 49 | }, 50 | }, 51 | }), 52 | ], 53 | transformers: [transformerVariantGroup(), transformerDirectives()], 54 | shortcuts: [{ 55 | 'bg-base': 'bg-white dark:bg-[#101010]', 56 | 'bg-base-100': 'bg-light-200/50 dark:bg-[#181818]', 57 | 'bg-base-200': 'bg-light-400 dark:bg-[#202020]', 58 | 'bg-blur': 'bg-light-200/85 dark:bg-[#101010]/85 backdrop-blur-xl backdrop-saturate-150', 59 | 'bg-sidebar': 'bg-white dark:bg-[#101010]', 60 | 'bg-modal': 'bg-white dark:bg-[#181818]', 61 | 'bg-darker': 'bg-black/4 dark:bg-white/4', 62 | 'fg-base': 'text-dark dark:text-[#dadada]', 63 | 'border-base': 'border-light-700 dark:border-[#2a2a2a]', 64 | 'border-b-base': 'border-b-light-700 dark:border-b-[#2a2a2a]', 65 | 'border-base-100': 'border-light-900 dark:border-[#404040]', 66 | 'hv-base': 'transition-colors cursor-pointer hover:bg-darker', 67 | 'hv-foreground': 'transition-opacity cursor-pointer op-70 hover:op-100', 68 | 'input-base': 'bg-transparent placeholder:op-50 dark:placeholder:op-20 focus:(ring-0 outline-none) resize-none', 69 | 'button': 'mt-4 px-3 py-2 text-xs border border-base rounded-lg hv-base hover:border-base-100', 70 | 'emerald-button': 'button bg-emerald-600 !hover:bg-emerald-700 text-white', 71 | 'emerald-light-button': 'button text-emerald-400 bg-emerald/12 !border-emerald-400 !hover-bg-emerald-600 !hover-border-emerald-600 !hover-text-light-700', 72 | 'max-w-base': 'max-w-3xl mx-auto', 73 | 'text-error': 'text-red-700 dark:text-red-400/80', 74 | 'border-error': 'border border-red-700 dark:border-red-400/80', 75 | 'text-info': 'text-gray-400 dark:text-gray-200', 76 | 'menu-icon': 'cursor-pointer text-base fg-base hover-text-emerald-600', 77 | 'fc': 'flex justify-center', 78 | 'fi': 'flex items-center', 79 | 'fcc': 'fc items-center', 80 | 'fb': 'flex justify-between', 81 | 'code-copy-btn': 'absolute z-3 op-90 w-8 h-8 p-1 top-12px right-12px bg-light-300 dark:bg-dark-300 hover-text-emerald-600 fcc border rounded-md b-transparent cursor-pointer', 82 | 'code-copy-tips': 'absolute z-1 op-0 px-2 py-1 -top-8 bg-dark-600 dark:bg-dark fcc box-border rounded-md text-xs c-white transition-opacity duration-300 whitespace-nowrap', 83 | }], 84 | preflights: [{ 85 | layer: 'base', 86 | getCSS: () => ` 87 | :root { 88 | --c-scroll: #d9d9d9; 89 | --c-scroll-hover: #bbbbbb; 90 | --c-shadow: #00000008; 91 | } 92 | 93 | html,body { 94 | height: 100%; 95 | } 96 | 97 | html.dark { 98 | --c-scroll: #333333; 99 | --c-scroll-hover: #555555; 100 | --c-shadow: #ffffff08; 101 | } 102 | 103 | ::-webkit-scrollbar { 104 | width: 4px; 105 | height: 4px; 106 | } 107 | 108 | ::-webkit-scrollbar-thumb { 109 | background-color: var(--c-scroll); 110 | } 111 | 112 | ::-webkit-scrollbar-thumb:hover { 113 | background-color: var(--c-scroll-hover); 114 | } 115 | 116 | ::selection { 117 | background: rgba(0, 0, 0, 0.12); 118 | } 119 | 120 | .dark ::selection { 121 | background: rgba(255, 255, 255, 0.12); 122 | } 123 | 124 | button,select,input,option { 125 | outline: none; 126 | -webkit-appearance: none 127 | } 128 | img { display: initial; } 129 | .clipped { 130 | clip-path: inset(0 100% 0 0); 131 | } 132 | `, 133 | }], 134 | }) 135 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "buildCommand": "OUTPUT=vercel astro build", 3 | "github": { 4 | "silent": true 5 | } 6 | } --------------------------------------------------------------------------------