├── composables ├── useAtModel.ts ├── useMediaBreakpoints.ts ├── utils.ts ├── fetchWithAuth.ts ├── store.ts ├── useChatWorker.ts ├── helpers.ts ├── useTools.ts ├── useMenus.ts ├── useOpenAIModels.ts ├── useInstructionsCache.ts ├── useCreateChatSession.ts ├── useFeatures.ts └── useKatexClient.ts ├── .dockerignore ├── server ├── models │ ├── openai │ │ ├── tools │ │ │ ├── index.ts │ │ │ ├── custom.ts │ │ │ └── tests │ │ │ │ ├── custom.int.test.ts │ │ │ │ └── dalle.int.test.ts │ │ ├── tests │ │ │ ├── data │ │ │ │ ├── hotdog.jpg │ │ │ │ └── screenshot.jpg │ │ │ ├── prompts.int.test.ts │ │ │ ├── azure │ │ │ │ ├── chat_models.standard.test.ts │ │ │ │ ├── chat_models.standard.int.test.ts │ │ │ │ └── embeddings.int.test.ts │ │ │ ├── chat_models.standard.test.ts │ │ │ ├── chat_models_responses.standard.int.test.ts │ │ │ ├── chat_models_responses.standard.test.ts │ │ │ └── chat_models-vision.int.test.ts │ │ ├── index.ts │ │ └── utils │ │ │ ├── errors.ts │ │ │ ├── prompts.ts │ │ │ └── headers.ts │ └── genai │ │ ├── types.ts │ │ └── output_parsers.ts ├── tsconfig.json ├── api │ ├── auth │ │ ├── acl-status.get.ts │ │ ├── logout.post.ts │ │ ├── user.get.ts │ │ ├── google │ │ │ └── login.get.ts │ │ └── signup.post.ts │ ├── models │ │ ├── index.delete.ts │ │ └── pull │ │ │ └── index.post.ts │ ├── instruction │ │ ├── [id].delete.ts │ │ ├── index.get.ts │ │ ├── index.post.ts │ │ └── [id].put.ts │ ├── mcp-servers │ │ ├── index.get.ts │ │ ├── index.post.ts │ │ ├── [id].get.ts │ │ ├── [id].delete.ts │ │ ├── [id] │ │ │ └── toggle.post.ts │ │ └── [id].put.ts │ ├── knowledgebases │ │ ├── [id].get.ts │ │ ├── index.get.ts │ │ └── [id].delete.ts │ ├── audio │ │ └── session.post.ts │ ├── sessions │ │ └── [id] │ │ │ └── title.post.ts │ └── proxy.ts ├── utils │ ├── mcpFeature.ts │ ├── realtimeChat.ts │ ├── modelsManagement.ts │ ├── prisma.ts │ ├── proxyToken.ts │ ├── index.ts │ ├── vectorstores.ts │ ├── instructions.ts │ ├── knowledgeBase.ts │ ├── ollama.ts │ └── http.ts ├── middleware │ ├── proxyAuth.ts │ ├── auth.ts │ └── keys.ts ├── store │ └── redis.ts ├── types │ └── index.ts ├── coref │ └── index.ts └── retriever │ └── index.ts ├── config ├── nuxtjsI18n.ts ├── index.ts ├── i18n.ts └── models.ts ├── i18n.config.ts ├── public ├── favicon.ico ├── logo.svg └── worklets │ └── audio-processing.js ├── types ├── auth.d.ts ├── context.d.ts ├── markdown-it.d.ts ├── svg.d.ts ├── helper.d.ts ├── markdown-it-katex.d.ts ├── katex.d.ts ├── chat.d.ts └── multimodal-live-types.ts ├── .vscode ├── extensions.json └── settings.json ├── prisma └── migrations │ ├── 20250814165319_remove_instruction_name_unique_constraint │ └── migration.sql │ └── migration_lock.toml ├── utils ├── auth.ts ├── multimodal-live.ts ├── audio-worklets │ └── audio-processing.ts ├── artifactRenderers.ts └── settings.ts ├── plugins ├── analytics.client.ts ├── instructions.client.ts └── features.server.ts ├── app.config.ts ├── components ├── Heading.vue ├── IconSpinner.vue ├── IconMicrophone.vue ├── IconStop.vue ├── Source.vue ├── ArtifactButton.vue ├── MessageImages.vue ├── ChatConfigInfo.vue ├── settings │ ├── SettingsRealtimeChat.vue │ ├── SettingsCard.vue │ ├── CreateCustomServer.vue │ ├── LanguageSelectMenu.vue │ └── SettingsChatSettings.vue ├── Gemini.vue ├── MessageHeader.vue ├── MarkdownPreview.vue ├── TheLogo.vue ├── MessageToggleCollapseButton.vue ├── MobileMenu.vue ├── Sources.vue ├── ColorMode.vue ├── FileSelector.vue ├── ChatMessageActionMore.vue ├── FileButton.vue ├── MessageActionBar.vue ├── ModelsSelectMenu.vue ├── ComponentPreview.vue ├── ToolCallDisplay.vue ├── QuickChatButton.vue ├── Auth.vue ├── ChatSessionListActionMore.vue ├── ModelMentionText.vue └── MermaidRenderer.vue ├── pages ├── models │ └── index.vue ├── settings │ └── index.vue ├── logout │ └── index.vue ├── realtime │ └── index.vue ├── agents │ └── index.vue └── chat │ └── index.vue ├── tsconfig.json ├── middleware ├── defaultPage.global.ts ├── realtime-chat.ts ├── models-management.ts └── knowledge-base.ts ├── tailwind.config.ts ├── assets ├── svg │ ├── anthropic.svg │ ├── siliconcloud.svg │ ├── groq.svg │ ├── azure.svg │ ├── openai.svg │ ├── deepseek.svg │ └── gemini.svg ├── katex.scss └── index.scss ├── .editorconfig ├── global.d.ts ├── app.vue ├── Dockerfile ├── .github └── workflows │ ├── claude.yml │ └── docker-image.yaml ├── blogs ├── 2025-09-18-smart-quick-chat-dialog-positioning-zh.md ├── 20250819-chatollama-deepagents-integration_zh.md └── 20250825-langchain-upgrade-chat-fix_zh.md ├── docs └── guide │ └── README.md ├── docker-compose_arm.yaml ├── .env.example ├── docker-compose.yaml ├── LICENSE └── docker-compose_gpu.yaml /composables/useAtModel.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | docker-compose.yaml 2 | .env* 3 | node_modules 4 | -------------------------------------------------------------------------------- /server/models/openai/tools/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./dalle.js"; 2 | -------------------------------------------------------------------------------- /config/nuxtjsI18n.ts: -------------------------------------------------------------------------------- 1 | export default defineI18nConfig(() => ({ 2 | })) 3 | -------------------------------------------------------------------------------- /i18n.config.ts: -------------------------------------------------------------------------------- 1 | export default { 2 | fallbackLocale: 'en', 3 | } 4 | -------------------------------------------------------------------------------- /server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../.nuxt/tsconfig.server.json" 3 | } 4 | -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sugarforever/chat-ollama/main/public/favicon.ico -------------------------------------------------------------------------------- /types/auth.d.ts: -------------------------------------------------------------------------------- 1 | declare module '#auth' { 2 | interface SessionData { 3 | role: 'superadmin' | 'admin' | 'user' 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "lokalise.i18n-ally", 4 | "vue.volar", 5 | "usernamehw.errorlens" 6 | ] 7 | } -------------------------------------------------------------------------------- /prisma/migrations/20250814165319_remove_instruction_name_unique_constraint/migration.sql: -------------------------------------------------------------------------------- 1 | -- DropIndex 2 | DROP INDEX "Instruction_name_key"; 3 | -------------------------------------------------------------------------------- /server/models/openai/tests/data/hotdog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sugarforever/chat-ollama/main/server/models/openai/tests/data/hotdog.jpg -------------------------------------------------------------------------------- /server/models/openai/tests/data/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sugarforever/chat-ollama/main/server/models/openai/tests/data/screenshot.jpg -------------------------------------------------------------------------------- /utils/auth.ts: -------------------------------------------------------------------------------- 1 | const { token } = useAuth() 2 | 3 | export const authHeaders = computed(() => { 4 | return { 5 | 'Authorization': token.value 6 | } 7 | }) 8 | -------------------------------------------------------------------------------- /prisma/migrations/migration_lock.toml: -------------------------------------------------------------------------------- 1 | # Please do not edit this file manually 2 | # It should be added in your version-control system (e.g., Git) 3 | provider = "postgresql" -------------------------------------------------------------------------------- /types/context.d.ts: -------------------------------------------------------------------------------- 1 | import type { ContextKeys } from '~/server/middleware/keys' 2 | 3 | declare module 'h3' { 4 | interface H3EventContext { 5 | keys: ContextKeys 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /config/index.ts: -------------------------------------------------------------------------------- 1 | export const APP_NAME = 'ChatOllama' 2 | 3 | export const DEFAULT_ATTACHED_MESSAGES_COUNT = 10 4 | 5 | export const MODEL_FAMILY_SEPARATOR = '/' 6 | 7 | export * from './models' 8 | -------------------------------------------------------------------------------- /plugins/analytics.client.ts: -------------------------------------------------------------------------------- 1 | import { inject } from '@vercel/analytics' 2 | 3 | export default defineNuxtPlugin(() => { 4 | if (!process.env.DISABLE_VERCEL_ANALYTICS) { 5 | inject() 6 | } 7 | }) -------------------------------------------------------------------------------- /server/api/auth/acl-status.get.ts: -------------------------------------------------------------------------------- 1 | import { isAclEnabled } from '~/server/utils/auth' 2 | 3 | export default defineEventHandler(() => { 4 | return { 5 | aclEnabled: isAclEnabled() 6 | } 7 | }) 8 | -------------------------------------------------------------------------------- /app.config.ts: -------------------------------------------------------------------------------- 1 | export default defineAppConfig({ 2 | ui: { 3 | notifications: { 4 | position: 'top-0 bottom-auto' 5 | }, 6 | primary: 'indigo', 7 | gray: 'zinc' 8 | } 9 | }) 10 | -------------------------------------------------------------------------------- /components/Heading.vue: -------------------------------------------------------------------------------- 1 | 6 | 9 | -------------------------------------------------------------------------------- /types/markdown-it.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'markdown-it-abbr'; 2 | declare module 'markdown-it-footnote'; 3 | declare module 'markdown-it-sub'; 4 | declare module 'markdown-it-sup'; 5 | declare module 'markdown-it-task-lists'; 6 | -------------------------------------------------------------------------------- /server/utils/mcpFeature.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Server-side utility to check if MCP feature is enabled 3 | */ 4 | export function isMcpEnabled(): boolean { 5 | const config = useRuntimeConfig() 6 | return config.mcpEnabled 7 | } 8 | -------------------------------------------------------------------------------- /pages/models/index.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | 12 | -------------------------------------------------------------------------------- /server/utils/realtimeChat.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Server-side utility to check if realtime chat feature is enabled 3 | */ 4 | export function isRealtimeChatEnabled(): boolean { 5 | const config = useRuntimeConfig() 6 | return config.realtimeChatEnabled 7 | } 8 | -------------------------------------------------------------------------------- /server/utils/modelsManagement.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Server-side utility to check if models management feature is enabled 3 | */ 4 | export function isModelsManagementEnabled(): boolean { 5 | const config = useRuntimeConfig() 6 | return config.modelsManagementEnabled 7 | } 8 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // https://nuxt.com/docs/guide/concepts/typescript 3 | "extends": "./.nuxt/tsconfig.json", 4 | "compilerOptions": { 5 | "moduleResolution": "node", 6 | "types": [ 7 | "vue" 8 | ], 9 | // ... other options 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /middleware/defaultPage.global.ts: -------------------------------------------------------------------------------- 1 | 2 | export default defineNuxtRouteMiddleware((to, from) => { 3 | const defaultPage = useCookie('default-page', { path: '/', default: () => DEFAULT_PAGE_LINK }) 4 | 5 | if (to.path === '/') { 6 | return navigateTo(defaultPage.value) 7 | } 8 | }) 9 | -------------------------------------------------------------------------------- /composables/useMediaBreakpoints.ts: -------------------------------------------------------------------------------- 1 | import { breakpointsTailwind, useBreakpoints } from '@vueuse/core' 2 | 3 | export function useMediaBreakpoints() { 4 | const breakpoints = useBreakpoints(breakpointsTailwind) 5 | 6 | const isMobile = computed(() => breakpoints.smaller('md').value) 7 | 8 | return { isMobile } 9 | } 10 | -------------------------------------------------------------------------------- /middleware/realtime-chat.ts: -------------------------------------------------------------------------------- 1 | export default defineNuxtRouteMiddleware((to) => { 2 | const features = useFeatures() 3 | 4 | // Check if realtime chat feature is enabled 5 | if (!features.realtimeChatEnabled) { 6 | // Redirect to home page if realtime chat feature is disabled 7 | return navigateTo('/welcome') 8 | } 9 | }) 10 | -------------------------------------------------------------------------------- /components/IconSpinner.vue: -------------------------------------------------------------------------------- 1 | 6 | -------------------------------------------------------------------------------- /middleware/models-management.ts: -------------------------------------------------------------------------------- 1 | export default defineNuxtRouteMiddleware((to) => { 2 | const features = useFeatures() 3 | 4 | // Check if models management feature is enabled 5 | if (!features.modelsManagementEnabled) { 6 | // Redirect to home page if models management feature is disabled 7 | return navigateTo('/welcome') 8 | } 9 | }) 10 | -------------------------------------------------------------------------------- /components/IconMicrophone.vue: -------------------------------------------------------------------------------- 1 | 6 | -------------------------------------------------------------------------------- /tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from 'tailwindcss' 2 | 3 | export default >{ 4 | theme: { 5 | extend: { 6 | aspectRatio: { 7 | auto: 'auto', 8 | square: '1 / 1', 9 | video: '16 / 9' 10 | }, 11 | colors: { 12 | muted: 'rgba(140, 140, 140, 0.9)', 13 | } 14 | }, 15 | }, 16 | } 17 | -------------------------------------------------------------------------------- /assets/svg/anthropic.svg: -------------------------------------------------------------------------------- 1 | Anthropic -------------------------------------------------------------------------------- /types/svg.d.ts: -------------------------------------------------------------------------------- 1 | declare module '*.svg' { 2 | import type { DefineComponent } from 'vue' 3 | const component: DefineComponent 4 | export default component 5 | } 6 | 7 | declare module '*.svg?component' { 8 | import type { DefineComponent } from 'vue' 9 | const component: DefineComponent 10 | export default component 11 | } 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /server/api/auth/logout.post.ts: -------------------------------------------------------------------------------- 1 | import { eventHandler, deleteCookie } from 'h3' 2 | 3 | export default eventHandler((event) => { 4 | // Clear the auth-token cookie 5 | deleteCookie(event, 'auth-token', { 6 | httpOnly: true, 7 | secure: process.env.NODE_ENV === 'production', 8 | sameSite: 'lax' 9 | }) 10 | 11 | return { status: 'OK', message: 'Successfully logged out' } 12 | }) 13 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | indent_style = space 6 | indent_size = 2 7 | end_of_line = lf 8 | insert_final_newline = true 9 | trim_trailing_whitespace = true 10 | 11 | [*.md] 12 | trim_trailing_whitespace = false 13 | 14 | [*.{yml,yaml}] 15 | indent_size = 2 16 | 17 | [*.{vue,js,ts}] 18 | indent_style = space 19 | indent_size = 2 20 | 21 | [docker-compose.yml] 22 | indent_size = 4 23 | -------------------------------------------------------------------------------- /components/IconStop.vue: -------------------------------------------------------------------------------- 1 | 7 | -------------------------------------------------------------------------------- /server/api/auth/user.get.ts: -------------------------------------------------------------------------------- 1 | import { createError, eventHandler, getRequestHeader, H3Event } from 'h3' 2 | import { requireAuth } from '../../utils/auth' 3 | 4 | export default eventHandler((event) => { 5 | const user = requireAuth(event) 6 | 7 | // Return user data in the format expected by @sidebase/nuxt-auth 8 | return { 9 | id: user.id, 10 | name: user.name, 11 | email: user.email, 12 | role: user.role 13 | } 14 | }) 15 | -------------------------------------------------------------------------------- /server/middleware/proxyAuth.ts: -------------------------------------------------------------------------------- 1 | import { proxyTokenValidate } from '~/server/utils/proxyToken' 2 | 3 | export default defineEventHandler(event => { 4 | const uri = new URL(event.path, 'http://localhost') 5 | 6 | if (/^\/api\/proxy\/?$/.test(uri.pathname)) { 7 | const query = getQuery<{ token: string }>(event) 8 | if (!proxyTokenValidate(query.token)) { 9 | setResponseStatus(event, 400) 10 | return 'Illegal request' 11 | } 12 | } 13 | }) 14 | -------------------------------------------------------------------------------- /composables/utils.ts: -------------------------------------------------------------------------------- 1 | export function noop() { 2 | // do nothing 3 | } 4 | 5 | export function urlGlob2Regexp(pattern: string) { 6 | const s = pattern.replace(/([.?+^$[\]\\(){}|\/-])/g, "\\$1").replace(/(? { 4 | const options: RedisOptions = { 5 | host: process.env.REDIS_HOST || 'localhost', 6 | port: Number(process.env.REDIS_PORT) || 6379, 7 | username: process.env.REDIS_USERNAME || undefined, 8 | password: process.env.REDIS_PASSWORD || undefined 9 | }; 10 | console.log("Redis client options: ", options); 11 | return new Redis(options); 12 | } 13 | -------------------------------------------------------------------------------- /plugins/instructions.client.ts: -------------------------------------------------------------------------------- 1 | export default defineNuxtPlugin(() => { 2 | // Preload instructions when the app starts 3 | if (process.client) { 4 | // Use nextTick to ensure the app is fully initialized 5 | nextTick(async () => { 6 | try { 7 | const { preloadInstructions } = useInstructionsCache() 8 | await preloadInstructions() 9 | } catch (error) { 10 | console.error('Failed to preload instructions on app start:', error) 11 | } 12 | }) 13 | } 14 | }) -------------------------------------------------------------------------------- /server/middleware/auth.ts: -------------------------------------------------------------------------------- 1 | import { H3Event } from 'h3' 2 | import { parseAuthUser } from '../utils/auth' 3 | 4 | export default defineEventHandler((event) => { 5 | const uri = new URL(event.path, 'http://localhost') 6 | const user = parseAuthUser(event) 7 | event.context.user = user 8 | 9 | const pathname = uri.pathname.replace(/\/+$/, '') 10 | if (pathname.startsWith('/api') && pathname !== '/api/auth/user') { 11 | console.log(`URL: ${pathname} User: ${JSON.stringify(user)}`) 12 | } 13 | }) 14 | -------------------------------------------------------------------------------- /types/helper.d.ts: -------------------------------------------------------------------------------- 1 | export type TransformTypes = { 2 | [P in T]: P extends `${infer A}.${infer B}` 3 | ? B extends 'proxy' ? boolean : string 4 | : string 5 | } 6 | 7 | export type PickupPathKey< 8 | T extends Record, 9 | K extends (string | null) = null, 10 | M = keyof T 11 | > = M extends string 12 | ? (T[M] extends Record 13 | ? PickupPathKey 14 | : (K extends null ? M : `${K}.${M}`)) 15 | : K 16 | -------------------------------------------------------------------------------- /assets/svg/siliconcloud.svg: -------------------------------------------------------------------------------- 1 | SiliconCloud -------------------------------------------------------------------------------- /components/Source.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | 18 | -------------------------------------------------------------------------------- /global.d.ts: -------------------------------------------------------------------------------- 1 | import type { DefineLocaleMessage } from 'vue-i18n' 2 | import type enUs from './locales/en-US.json' 3 | 4 | declare module 'vue-i18n' { 5 | type En = typeof enUs 6 | 7 | export interface DefineLocaleMessage extends En { } 8 | } 9 | 10 | declare module '@nuxt/schema' { 11 | interface PublicRuntimeConfig { 12 | kb: { 13 | create: { 14 | role: string 15 | } 16 | }, 17 | modelProxyEnabled: boolean 18 | chatMaxAttachedMessages: number 19 | appName: string 20 | } 21 | } 22 | 23 | export { } 24 | -------------------------------------------------------------------------------- /assets/svg/groq.svg: -------------------------------------------------------------------------------- 1 | Groq -------------------------------------------------------------------------------- /components/ArtifactButton.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | -------------------------------------------------------------------------------- /server/models/openai/index.ts: -------------------------------------------------------------------------------- 1 | export { OpenAI as OpenAIClient, type ClientOptions, toFile } from "openai"; 2 | export * from "./chat_models.js"; 3 | export * from "./azure/chat_models.js"; 4 | export * from "./llms.js"; 5 | export * from "./azure/llms.js"; 6 | export * from "./azure/embeddings.js"; 7 | export * from "./embeddings.js"; 8 | export * from "./types.js"; 9 | export * from "./utils/openai.js"; 10 | export * from "./utils/azure.js"; 11 | export * from "./tools/index.js"; 12 | export { convertPromptToOpenAI } from "./utils/prompts.js"; 13 | export { customTool } from "./tools/custom.js"; 14 | -------------------------------------------------------------------------------- /app.vue: -------------------------------------------------------------------------------- 1 | 15 | 16 | 25 | -------------------------------------------------------------------------------- /components/MessageImages.vue: -------------------------------------------------------------------------------- 1 | 8 | 9 | -------------------------------------------------------------------------------- /server/api/models/index.delete.ts: -------------------------------------------------------------------------------- 1 | import { getOllama } from '@/server/utils/ollama' 2 | 3 | export default defineEventHandler(async (event) => { 4 | // Check if models management feature is enabled 5 | if (!isModelsManagementEnabled()) { 6 | setResponseStatus(event, 403, 'Models management feature is disabled') 7 | return { error: 'Models management feature is disabled' } 8 | } 9 | 10 | const { model } = await readBody(event) 11 | 12 | const ollama = await getOllama(event, true) 13 | if (!ollama) return 14 | 15 | const response = await ollama.delete({ model }) 16 | return response 17 | }) 18 | -------------------------------------------------------------------------------- /components/ChatConfigInfo.vue: -------------------------------------------------------------------------------- 1 | 8 | 9 | 20 | -------------------------------------------------------------------------------- /server/utils/prisma.ts: -------------------------------------------------------------------------------- 1 | import { PrismaClient } from '@prisma/client' 2 | 3 | const prismaClientSingleton = () => { 4 | return new PrismaClient({ 5 | log: process.env.NODE_ENV === 'development' ? ['query', 'error', 'warn'] : ['error'], 6 | }) 7 | } 8 | 9 | type PrismaClientSingleton = ReturnType 10 | 11 | const globalForPrisma = globalThis as unknown as { 12 | prisma: PrismaClientSingleton | undefined 13 | } 14 | 15 | const prisma = globalForPrisma.prisma ?? prismaClientSingleton() 16 | 17 | export default prisma 18 | 19 | if (process.env.NODE_ENV !== 'production') globalForPrisma.prisma = prisma 20 | -------------------------------------------------------------------------------- /pages/settings/index.vue: -------------------------------------------------------------------------------- 1 | 5 | 6 | 25 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG NODE_VERSION=20.13.1 2 | 3 | FROM node:${NODE_VERSION}-slim 4 | 5 | RUN apt-get update && apt-get install -y openssl iputils-ping net-tools python3 make g++ sqlite3 postgresql-client 6 | 7 | WORKDIR /app 8 | 9 | # DATABASE_URL environment variable takes precedence over .env file configuration 10 | ENV DATABASE_URL=file:/app/sqlite/chatollama.sqlite 11 | 12 | COPY pnpm-lock.yaml package.json ./ 13 | RUN npm install -g pnpm 14 | RUN pnpm i 15 | 16 | COPY . . 17 | 18 | # Make scripts executable 19 | RUN chmod +x /app/scripts/*.sh 20 | 21 | RUN pnpm run prisma-generate 22 | 23 | RUN pnpm run build 24 | 25 | EXPOSE 3000 26 | 27 | CMD ["sh", "/app/scripts/startup.sh"] 28 | -------------------------------------------------------------------------------- /composables/fetchWithAuth.ts: -------------------------------------------------------------------------------- 1 | export const fetchWithAuth: typeof fetch = (request, opts?) => { 2 | const { token } = useAuth() 3 | return fetch(request, { 4 | ...opts, 5 | headers: { 6 | ...opts?.headers, 7 | Authorization: token.value!, 8 | } 9 | }) 10 | } 11 | 12 | function _fetchWithAuth(request: any, opts?: any) { 13 | const { token } = useAuth() 14 | return $fetch(request, { 15 | ...opts, 16 | headers: { 17 | ...opts?.headers, 18 | Authorization: token.value!, 19 | } 20 | }) 21 | } 22 | 23 | _fetchWithAuth.raw = $fetch.raw 24 | _fetchWithAuth.create = $fetch.create 25 | 26 | export const $fetchWithAuth = _fetchWithAuth as typeof $fetch 27 | -------------------------------------------------------------------------------- /middleware/knowledge-base.ts: -------------------------------------------------------------------------------- 1 | export default defineNuxtRouteMiddleware((to) => { 2 | const features = useFeatures() 3 | 4 | console.log(`[${process.server ? 'SERVER' : 'CLIENT'}] Knowledge base middleware - knowledgeBaseEnabled:`, features.knowledgeBaseEnabled) 5 | 6 | // Check if knowledge base feature is enabled 7 | if (!features.knowledgeBaseEnabled) { 8 | console.log(`[${process.server ? 'SERVER' : 'CLIENT'}] Redirecting to /welcome because knowledgeBaseEnabled is false`) 9 | // Redirect to home page if knowledge base feature is disabled 10 | return navigateTo('/welcome') 11 | } 12 | 13 | console.log(`[${process.server ? 'SERVER' : 'CLIENT'}] Knowledge base access allowed`) 14 | }) 15 | -------------------------------------------------------------------------------- /server/utils/proxyToken.ts: -------------------------------------------------------------------------------- 1 | import { createHash } from 'node:crypto' 2 | 3 | const radomKey = Math.random().toString(36).slice(-4) + Date.now().toString(36).slice(-4) 4 | 5 | export function proxyTokenGenerate() { 6 | const key = Date.now().toString() 7 | const token = `${key}-${createHash('sha256').update(key + radomKey).digest('base64')}` 8 | return encodeURIComponent(token) 9 | } 10 | 11 | export function proxyTokenValidate(token: string) { 12 | if (!token) return false 13 | 14 | const [key, hash] = decodeURIComponent(token).split('-') 15 | 16 | if (!key || !hash) return false 17 | 18 | const expectedHash = createHash('sha256').update(key + radomKey).digest('base64') 19 | return expectedHash === hash 20 | } 21 | -------------------------------------------------------------------------------- /components/settings/SettingsRealtimeChat.vue: -------------------------------------------------------------------------------- 1 | 11 | 12 | 21 | -------------------------------------------------------------------------------- /components/Gemini.vue: -------------------------------------------------------------------------------- 1 | 3 | 4 | 25 | -------------------------------------------------------------------------------- /config/i18n.ts: -------------------------------------------------------------------------------- 1 | interface LanguageItem { 2 | // Used for saving, it should be able to automatically match the browser's default language in the future 3 | code: string, 4 | // Used to locate files 5 | file: string, 6 | // Display in language switching UI 7 | name: string, 8 | } 9 | export const LanguageList: LanguageItem[] = [ 10 | { code: "en-US", file: "en-US.json", name: "English" }, 11 | { code: "zh-CN", file: "zh-CN.json", name: "简体中文" }, 12 | ] 13 | export function findLanguageItemByLanguageName(Code: string): LanguageItem { 14 | for (const languageItem of LanguageList) { 15 | if (languageItem.code == Code) return languageItem 16 | } 17 | return { code: Code, file: "Language file not found", name: `This language '${Code}' is not supported` } 18 | } 19 | -------------------------------------------------------------------------------- /types/markdown-it-katex.d.ts: -------------------------------------------------------------------------------- 1 | import MarkdownIt from 'markdown-it' 2 | 3 | declare module 'markdown-it-katex' { 4 | interface KatexOptions { 5 | throwOnError?: boolean 6 | errorColor?: string 7 | macros?: Record 8 | colorIsTextColor?: boolean 9 | maxSize?: number 10 | maxExpand?: number 11 | displayMode?: boolean 12 | output?: 'html' | 'mathml' | 'htmlAndMathml' 13 | leqno?: boolean 14 | fleqn?: boolean 15 | trust?: boolean | ((context: { command: string, url: string, protocol: string }) => boolean) 16 | strict?: boolean | 'ignore' | 'warn' | 'error' 17 | } 18 | 19 | const plugin: MarkdownIt.PluginWithOptions 20 | export default plugin 21 | } 22 | -------------------------------------------------------------------------------- /server/utils/index.ts: -------------------------------------------------------------------------------- 1 | import type { H3Event, EventHandlerRequest } from 'h3' 2 | 3 | export const setEventStreamResponse = (event: H3Event) => { 4 | setResponseHeader(event, 'Content-Type', 'text/event-stream'); 5 | setResponseHeader(event, 'Cache-Control', 'no-cache'); 6 | setResponseHeader(event, 'Connection', 'keep-alive'); 7 | }; 8 | 9 | export async function FetchWithAuth(this: { username: string | null, password: string | null }, input: RequestInfo | URL, init?: RequestInit) { 10 | const headers = new Headers(init?.headers); 11 | const authorization = btoa(`${this?.username}:${this?.password}`); 12 | console.log(`Authorization: ${authorization}`); 13 | headers.set('Authorization', `Basic ${authorization}`); 14 | return fetch(input, { ...init, headers }); 15 | } 16 | -------------------------------------------------------------------------------- /server/models/openai/utils/errors.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/no-explicit-any */ 2 | /* eslint-disable no-param-reassign */ 3 | 4 | // Duplicate of core 5 | // TODO: Remove once we stop supporting 0.2.x core versions 6 | export type LangChainErrorCodes = 7 | | "INVALID_PROMPT_INPUT" 8 | | "INVALID_TOOL_RESULTS" 9 | | "MESSAGE_COERCION_FAILURE" 10 | | "MODEL_AUTHENTICATION" 11 | | "MODEL_NOT_FOUND" 12 | | "MODEL_RATE_LIMIT" 13 | | "OUTPUT_PARSING_FAILURE"; 14 | 15 | export function addLangChainErrorFields( 16 | error: any, 17 | lc_error_code: LangChainErrorCodes 18 | ) { 19 | (error as any).lc_error_code = lc_error_code; 20 | error.message = `${error.message}\n\nTroubleshooting URL: https://js.langchain.com/docs/troubleshooting/errors/${lc_error_code}/\n`; 21 | return error; 22 | } 23 | -------------------------------------------------------------------------------- /server/api/instruction/[id].delete.ts: -------------------------------------------------------------------------------- 1 | import prisma from "@/server/utils/prisma" 2 | import { requireInstruction, requireInstructionOwner } from "@/server/utils/instructions" 3 | 4 | const deleteInstructions = async (id: string) => { 5 | try { 6 | return await prisma.instruction.delete({ 7 | where: { id: parseInt(id) }, 8 | }) 9 | } catch (error) { 10 | console.error("Error delete instructions: ", error) 11 | return null 12 | } 13 | } 14 | 15 | export default defineEventHandler(async (event) => { 16 | const id = event?.context?.params?.id 17 | if (!id) return 18 | 19 | // Check if instruction exists and user has permission 20 | const existingInstruction = await requireInstruction(id) 21 | requireInstructionOwner(event, existingInstruction) 22 | 23 | const result = await deleteInstructions(id) 24 | return result 25 | }) 26 | -------------------------------------------------------------------------------- /server/types/index.ts: -------------------------------------------------------------------------------- 1 | import { MultiPartData } from 'h3' 2 | 3 | export type PageParser = 'default' | 'jinaReader' 4 | 5 | export type KnowledgeBaseFormData = { 6 | name: string 7 | description: string 8 | embedding: string 9 | isPublic: boolean 10 | knowledgeBaseId: number | null 11 | uploadedFiles: MultiPartData[] 12 | urls: string[] 13 | pageParser: PageParser 14 | maxDepth: number 15 | excludeGlobs: string[], 16 | chunking: { 17 | parentChunkSize: number, 18 | parentChunkOverlap: number, 19 | childChunkSize: number, 20 | childChunkOverlap: number, 21 | parentK: number, 22 | childK: number, 23 | } 24 | } 25 | 26 | export type ChunkSettings = { 27 | parentChunkSize: number 28 | parentChunkOverlap: number 29 | childChunkSize: number 30 | childChunkOverlap: number 31 | parentK: number 32 | childK: number 33 | } 34 | -------------------------------------------------------------------------------- /server/models/openai/tests/prompts.int.test.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from "openai"; 2 | import { ChatPromptTemplate } from "@langchain/core/prompts"; 3 | 4 | import { convertPromptToOpenAI } from "../utils/prompts.js"; 5 | 6 | test("Convert hub prompt to OpenAI payload and invoke", async () => { 7 | const prompt = ChatPromptTemplate.fromMessages([ 8 | ["system", "You are a world class comedian"], 9 | ["human", "Tell me a joke about {topic}"], 10 | ]); 11 | const formattedPrompt = await prompt.invoke({ 12 | topic: "cats", 13 | }); 14 | 15 | const { messages } = convertPromptToOpenAI(formattedPrompt); 16 | 17 | const openAIClient = new OpenAI(); 18 | 19 | const openAIResponse = await openAIClient.chat.completions.create({ 20 | model: "gpt-4o-mini", 21 | messages, 22 | }); 23 | 24 | expect(openAIResponse.choices.length).toBeGreaterThan(0); 25 | }); 26 | -------------------------------------------------------------------------------- /composables/store.ts: -------------------------------------------------------------------------------- 1 | import { useStorage } from '@vueuse/core' 2 | import { DEFAULT_ATTACHED_MESSAGES_COUNT, MODEL_FAMILY_SEPARATOR } from '~/config' 3 | 4 | export const chatDefaultSettings = useStorage('chat-default-settings', { 5 | models: [] as string[], 6 | attachedMessagesCount: DEFAULT_ATTACHED_MESSAGES_COUNT, 7 | enableToolUsage: false, 8 | }) 9 | 10 | // incompatible with old data format 11 | const model = (chatDefaultSettings.value as any).model 12 | if (model) { 13 | if (Array.isArray(model)) { 14 | const models = [model.concat().reverse().join(MODEL_FAMILY_SEPARATOR)] 15 | chatDefaultSettings.value = { 16 | models, 17 | attachedMessagesCount: chatDefaultSettings.value.attachedMessagesCount || DEFAULT_ATTACHED_MESSAGES_COUNT, 18 | enableToolUsage: false, 19 | } 20 | } else { 21 | chatDefaultSettings.value = null 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /components/settings/SettingsCard.vue: -------------------------------------------------------------------------------- 1 | 21 | 22 | 35 | -------------------------------------------------------------------------------- /components/MessageHeader.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | -------------------------------------------------------------------------------- /utils/multimodal-live.ts: -------------------------------------------------------------------------------- 1 | export async function blobToJSON(blob: Blob): Promise { 2 | const text = await blob.text() 3 | try { 4 | return JSON.parse(text) 5 | } catch (e) { 6 | console.error('Error parsing blob to JSON:', e) 7 | throw e 8 | } 9 | } 10 | 11 | export function base64ToArrayBuffer(base64: string): ArrayBuffer { 12 | const binaryString = atob(base64) 13 | const bytes = new Uint8Array(binaryString.length) 14 | for (let i = 0; i < binaryString.length; i++) { 15 | bytes[i] = binaryString.charCodeAt(i) 16 | } 17 | return bytes.buffer 18 | } 19 | 20 | export function arrayBufferToBase64(buffer: ArrayBuffer): string { 21 | const bytes = new Uint8Array(buffer) 22 | let binary = '' 23 | for (let i = 0; i < bytes.byteLength; i++) { 24 | binary += String.fromCharCode(bytes[i]) 25 | } 26 | return btoa(binary) 27 | } 28 | -------------------------------------------------------------------------------- /components/MarkdownPreview.vue: -------------------------------------------------------------------------------- 1 | 13 | 14 | 30 | -------------------------------------------------------------------------------- /server/api/auth/google/login.get.ts: -------------------------------------------------------------------------------- 1 | import { google } from 'googleapis' 2 | import { createError } from 'h3' 3 | 4 | const oauth2Client = new google.auth.OAuth2( 5 | process.env.GOOGLE_CLIENT_ID, 6 | process.env.GOOGLE_CLIENT_SECRET, 7 | `${process.env.APP_BASE_URL || 'http://localhost:3000'}/api/auth/google/callback` 8 | ) 9 | 10 | export default defineEventHandler(async (event) => { 11 | if (!process.env.GOOGLE_CLIENT_ID || !process.env.GOOGLE_CLIENT_SECRET) { 12 | throw createError({ 13 | statusCode: 500, 14 | statusMessage: 'Google OAuth is not configured' 15 | }) 16 | } 17 | 18 | const authUrl = oauth2Client.generateAuthUrl({ 19 | access_type: 'offline', 20 | scope: [ 21 | 'https://www.googleapis.com/auth/userinfo.profile', 22 | 'https://www.googleapis.com/auth/userinfo.email' 23 | ], 24 | prompt: 'consent' 25 | }) 26 | 27 | return sendRedirect(event, authUrl) 28 | }) 29 | -------------------------------------------------------------------------------- /pages/logout/index.vue: -------------------------------------------------------------------------------- 1 | 22 | 33 | -------------------------------------------------------------------------------- /types/katex.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'katex' { 2 | export interface KatexOptions { 3 | displayMode?: boolean 4 | output?: string 5 | leqno?: boolean 6 | fleqn?: boolean 7 | throwOnError?: boolean 8 | errorColor?: string 9 | macros?: any 10 | minRuleThickness?: number 11 | colorIsTextColor?: boolean 12 | maxSize?: number 13 | maxExpand?: number 14 | strict?: boolean | string 15 | trust?: boolean | ((context: { command: string; url: string; protocol: string }) => boolean) 16 | globalGroup?: boolean 17 | } 18 | 19 | export function renderToString( 20 | tex: string, 21 | options?: KatexOptions 22 | ): string 23 | 24 | export function renderToElement( 25 | element: HTMLElement, 26 | tex: string, 27 | options?: KatexOptions 28 | ): void 29 | 30 | export const version: string 31 | } 32 | -------------------------------------------------------------------------------- /components/TheLogo.vue: -------------------------------------------------------------------------------- 1 | 4 | 5 | 15 | -------------------------------------------------------------------------------- /server/api/mcp-servers/index.get.ts: -------------------------------------------------------------------------------- 1 | import { McpServiceSingleton } from '~/server/utils/mcp' 2 | import { requireAdminIfAclEnabled } from '~/server/utils/auth' 3 | 4 | export default defineEventHandler(async (event) => { 5 | // Check if MCP feature is enabled 6 | if (!isMcpEnabled()) { 7 | setResponseStatus(event, 403, 'MCP feature is disabled') 8 | return { error: 'MCP feature is disabled' } 9 | } 10 | 11 | // Require admin privileges for MCP server management (if ACL is enabled) 12 | requireAdminIfAclEnabled(event) 13 | 14 | try { 15 | const servers = await McpServiceSingleton.getAllServers() 16 | return { 17 | success: true, 18 | data: servers 19 | } 20 | } catch (error) { 21 | console.error('Failed to fetch MCP servers:', error) 22 | throw createError({ 23 | statusCode: 500, 24 | statusMessage: 'Failed to fetch MCP servers' 25 | }) 26 | } 27 | // No need to close singleton 28 | }) 29 | -------------------------------------------------------------------------------- /public/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /types/chat.d.ts: -------------------------------------------------------------------------------- 1 | export interface ToolCall { 2 | id: string 3 | name: string 4 | args: any 5 | } 6 | 7 | export interface ToolResult { 8 | tool_call_id: string 9 | content: string 10 | } 11 | 12 | export type ChatContent = string | Array<{ type: string; text?: string; image_url?: { url: string } }> 13 | 14 | export interface ChatMessage { 15 | id?: number 16 | role: 'system' | 'assistant' | 'user' 17 | model: string, 18 | contentType: 'string' | 'array' | 'tool', 19 | content: ChatContent 20 | sanitizedContent?: ChatContent 21 | type?: 'loading' | 'canceled' | 'error' | 'tool' 22 | startTime: number 23 | endTime: number 24 | relevantDocs?: RelevantDocument[] 25 | toolResult: boolean 26 | toolCallId: string 27 | toolCalls: ToolCall[] 28 | toolResults: ToolResult[] 29 | // Agent-specific properties 30 | messageType?: string 31 | toolName?: string 32 | additionalKwargs?: any 33 | } 34 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true, 3 | "javascript.format.semicolons": "remove", 4 | "javascript.preferences.quoteStyle": "single", 5 | "typescript.format.semicolons": "remove", 6 | "typescript.preferences.quoteStyle": "single", 7 | "html.format.wrapAttributes": "preserve-aligned", 8 | "vue.format.wrapAttributes": "preserve-aligned", 9 | "html.format.wrapLineLength": 0, 10 | "prettier.enable": false, 11 | "[typescript]": { 12 | "editor.defaultFormatter": "vscode.typescript-language-features" 13 | }, 14 | "[json]": { 15 | "editor.defaultFormatter": "vscode.json-language-features" 16 | }, 17 | "[vue]": { 18 | "editor.defaultFormatter": "Vue.volar" 19 | }, 20 | "cSpell.words": [ 21 | "composables", 22 | "dexie", 23 | "groq", 24 | "jina", 25 | "knowledgebase", 26 | "nuxt", 27 | "Slideover" 28 | ], 29 | "i18n-ally.localesPaths": [ 30 | "locales" 31 | ], 32 | "i18n-ally.keystyle": "nested", 33 | } -------------------------------------------------------------------------------- /composables/useChatWorker.ts: -------------------------------------------------------------------------------- 1 | import type { WorkerReceivedMessage, WorkerSendMessage } from './worker-chatRequest' 2 | 3 | type Handler = (data: WorkerSendMessage) => void 4 | 5 | let worker: Worker 6 | 7 | export function useChatWorker() { 8 | const handlers: Handler[] = [] 9 | 10 | if (!worker) { 11 | worker = new Worker(new URL('./worker-chatRequest', import.meta.url)) 12 | } 13 | 14 | worker.addEventListener('message', onMessage) 15 | 16 | if (getCurrentScope()) { 17 | onScopeDispose(() => { 18 | worker.removeEventListener('message', onMessage) 19 | }) 20 | } 21 | 22 | function onMessage(e: MessageEvent) { 23 | handlers.forEach(h => h(e.data)) 24 | } 25 | 26 | function sendMessage(data: WorkerReceivedMessage) { 27 | worker.postMessage(JSON.parse(JSON.stringify(data))) 28 | } 29 | 30 | function onReceivedMessage(handler: Handler) { 31 | handlers.push(handler) 32 | } 33 | 34 | return { sendMessage, onReceivedMessage } 35 | } 36 | -------------------------------------------------------------------------------- /components/MessageToggleCollapseButton.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | 14 | 15 | 44 | -------------------------------------------------------------------------------- /pages/realtime/index.vue: -------------------------------------------------------------------------------- 1 | 12 | 13 | 28 | -------------------------------------------------------------------------------- /composables/helpers.ts: -------------------------------------------------------------------------------- 1 | export function omit, K extends keyof O>(obj: O, keys: K[]) { 2 | return (Object.keys(obj) as K[]).reduce((acc, key) => { 3 | if (!keys.includes(key)) 4 | Object.assign(acc, { [key]: obj[key] }) 5 | return acc 6 | }, {} as Omit) 7 | } 8 | 9 | export function pick, K extends keyof O>(obj: O, keys: K[]) { 10 | return keys.reduce((acc, key) => { 11 | acc[key] = obj[key] 12 | return acc 13 | }, {} as Pick) 14 | } 15 | 16 | export function deepClone(obj: T): T { 17 | if (typeof obj !== 'object' || obj === null) { 18 | return obj // primitive value or null 19 | } 20 | 21 | if (Array.isArray(obj)) { 22 | return obj.map((item) => deepClone(item)) as T 23 | } 24 | 25 | const clone: { [key: string]: any } = {} 26 | 27 | for (const key in obj) { 28 | if (Object.prototype.hasOwnProperty.call(obj, key)) { 29 | clone[key] = deepClone(obj[key]) 30 | } 31 | } 32 | 33 | return clone as T 34 | } 35 | -------------------------------------------------------------------------------- /server/api/knowledgebases/[id].get.ts: -------------------------------------------------------------------------------- 1 | import { type KnowledgeBase } from "@prisma/client" 2 | import prisma from "@/server/utils/prisma" 3 | 4 | const listKnowledgeBase = async ( 5 | id?: string 6 | ): Promise => { 7 | try { 8 | let knowledgeBase = null 9 | 10 | if (id) { 11 | knowledgeBase = await prisma.knowledgeBase.findUnique({ 12 | where: { 13 | id: parseInt(id), 14 | }, 15 | }) 16 | } 17 | 18 | return knowledgeBase 19 | } catch (error) { 20 | console.error(`Error fetching knowledge base with id ${id}:`, error) 21 | return null 22 | } 23 | } 24 | 25 | export default defineEventHandler(async (event) => { 26 | // Check if knowledge base feature is enabled 27 | if (!isKnowledgeBaseEnabled()) { 28 | setResponseStatus(event, 403, 'Knowledge base feature is disabled') 29 | return { error: 'Knowledge base feature is disabled' } 30 | } 31 | 32 | const id = event?.context?.params?.id 33 | const knowledgeBase = await listKnowledgeBase(id) 34 | return { knowledgeBase } 35 | }) 36 | -------------------------------------------------------------------------------- /server/api/audio/session.post.ts: -------------------------------------------------------------------------------- 1 | import { defineEventHandler } from 'h3' 2 | 3 | export default defineEventHandler(async (event) => { 4 | // Check if realtime chat feature is enabled 5 | if (!isRealtimeChatEnabled()) { 6 | setResponseStatus(event, 403, 'Realtime chat feature is disabled') 7 | return { error: 'Realtime chat feature is disabled' } 8 | } 9 | 10 | try { 11 | const apiKey = event.context.keys.openai.key || '' 12 | const response = await fetch('https://api.openai.com/v1/realtime/sessions', { 13 | method: 'POST', 14 | headers: { 15 | 'Authorization': `Bearer ${apiKey}`, 16 | 'Content-Type': 'application/json' 17 | }, 18 | body: JSON.stringify({ 19 | model: 'gpt-4o-realtime-preview-2024-12-17', 20 | voice: 'alloy' 21 | }) 22 | }) 23 | 24 | const data = await response.json() 25 | return data 26 | 27 | } catch (error) { 28 | console.error('Error creating audio session:', error) 29 | throw createError({ 30 | statusCode: 500, 31 | message: 'Failed to create audio session' 32 | }) 33 | } 34 | }) 35 | -------------------------------------------------------------------------------- /components/MobileMenu.vue: -------------------------------------------------------------------------------- 1 | 4 | 5 | 28 | -------------------------------------------------------------------------------- /server/api/instruction/index.get.ts: -------------------------------------------------------------------------------- 1 | import { type Instruction } from "@prisma/client" 2 | import prisma from "@/server/utils/prisma" 3 | 4 | const listInstructions = async (userId: number | null): Promise => { 5 | try { 6 | const whereClause = (userId !== null && userId !== undefined) ? 7 | { 8 | OR: [ 9 | { user_id: userId }, 10 | { is_public: true } 11 | ] 12 | } 13 | : { 14 | is_public: true 15 | } 16 | 17 | return await prisma.instruction.findMany({ 18 | where: whereClause, 19 | orderBy: { 20 | id: 'desc' 21 | }, 22 | include: { 23 | user: { 24 | select: { 25 | id: true, 26 | name: true 27 | } 28 | } 29 | } 30 | }) 31 | } catch (error) { 32 | console.error("Error fetching instructions: ", error) 33 | return null 34 | } 35 | } 36 | 37 | export default defineEventHandler(async (event) => { 38 | console.log("user:", event.context.user) 39 | const instructions = await listInstructions(event.context.user?.id) 40 | return { instructions } 41 | }) 42 | -------------------------------------------------------------------------------- /pages/agents/index.vue: -------------------------------------------------------------------------------- 1 | 22 | 23 | 40 | -------------------------------------------------------------------------------- /server/utils/vectorstores.ts: -------------------------------------------------------------------------------- 1 | import { Chroma } from "@langchain/community/vectorstores/chroma" 2 | import { Milvus } from "@langchain/community/vectorstores/milvus" 3 | import { Embeddings } from "@langchain/core/embeddings" 4 | import type { VectorStoreInterface } from "@langchain/core/vectorstores" 5 | 6 | const createChromaVectorStore = (embeddings: Embeddings, collectionName: string): VectorStoreInterface => { 7 | console.log("Creating Chroma vector store") 8 | return new Chroma(embeddings, { 9 | collectionName, 10 | url: process.env.CHROMADB_URL 11 | }) 12 | } 13 | 14 | const createMilvusVectorStore = (embeddings: Embeddings, collectionName: string): VectorStoreInterface => { 15 | console.log("Creating Milvus vector store") 16 | return new Milvus(embeddings, { 17 | collectionName, 18 | url: process.env.MILVUS_URL 19 | }) 20 | } 21 | 22 | export const createVectorStore = (embeddings: Embeddings, collectionName: string): VectorStoreInterface => { 23 | if (process.env.VECTOR_STORE === 'milvus') { 24 | return createMilvusVectorStore(embeddings, collectionName) 25 | } else { 26 | return createChromaVectorStore(embeddings, collectionName) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /server/api/instruction/index.post.ts: -------------------------------------------------------------------------------- 1 | import prisma from "@/server/utils/prisma" 2 | 3 | const saveInstructions = async (name: string, instruction: string, userId: number, isPublic: boolean = false) => { 4 | try { 5 | return await prisma.instruction.create({ 6 | data: { 7 | name, 8 | instruction, 9 | user_id: userId, 10 | is_public: isPublic 11 | } 12 | }) 13 | } catch (error) { 14 | console.error("Error saving instructions: ", error) 15 | return null 16 | } 17 | } 18 | 19 | export default defineEventHandler(async (event) => { 20 | // Require authentication for instruction creation 21 | if (!event.context.user) { 22 | throw createError({ 23 | statusCode: 401, 24 | statusMessage: 'Authentication required to create instructions' 25 | }) 26 | } 27 | 28 | const { name, instruction, is_public = false } = await readBody(event) 29 | if (!name || !instruction) { 30 | throw createError({ 31 | statusCode: 400, 32 | statusMessage: 'Name and instruction are required' 33 | }) 34 | } 35 | 36 | const result = await saveInstructions(name, instruction, event.context.user.id, is_public) 37 | return result 38 | }) 39 | -------------------------------------------------------------------------------- /server/api/models/pull/index.post.ts: -------------------------------------------------------------------------------- 1 | import { Readable } from 'stream' 2 | import { setEventStreamResponse } from '@/server/utils' 3 | import { getOllama } from '@/server/utils/ollama' 4 | 5 | export default defineEventHandler(async (event) => { 6 | // Check if models management feature is enabled 7 | if (!isModelsManagementEnabled()) { 8 | setResponseStatus(event, 403, 'Models management feature is disabled') 9 | return { error: 'Models management feature is disabled' } 10 | } 11 | 12 | const { model, stream } = await readBody(event) 13 | 14 | setEventStreamResponse(event) 15 | 16 | const ollama = await getOllama(event, true) 17 | if (!ollama) return 18 | 19 | const response = await ollama.pull({ model, stream }) 20 | 21 | const readableStream = Readable.from((async function* () { 22 | try { 23 | for await (const chunk of response) { 24 | yield `${JSON.stringify(chunk)}\n\n` 25 | } 26 | } catch (error: any) { 27 | const error_response = JSON.stringify({ "error": error.message }) 28 | yield `${error_response}\n\n`// You can choose to yield an empty string or any other value to indicate the error 29 | } 30 | })()) 31 | return sendStream(event, readableStream) 32 | }) 33 | -------------------------------------------------------------------------------- /server/api/instruction/[id].put.ts: -------------------------------------------------------------------------------- 1 | import prisma from "@/server/utils/prisma" 2 | import { requireInstruction, requireInstructionOwner } from "@/server/utils/instructions" 3 | 4 | const updateInstructions = async ( 5 | id: string, 6 | name: string, 7 | instruction: string, 8 | isPublic?: boolean 9 | ) => { 10 | try { 11 | const updateData: any = { name, instruction } 12 | if (isPublic !== undefined) { 13 | updateData.is_public = isPublic 14 | } 15 | 16 | return await prisma.instruction.update({ 17 | where: { id: parseInt(id) }, 18 | data: updateData 19 | }) 20 | } catch (error) { 21 | console.error("Error editing instructions: ", error) 22 | return null 23 | } 24 | } 25 | 26 | export default defineEventHandler(async (event) => { 27 | const id = event?.context?.params?.id 28 | const { name, instruction, is_public } = await readBody(event) 29 | if (!id || !name || !instruction) { 30 | return 31 | } 32 | 33 | // Check if instruction exists and user has permission 34 | const existingInstruction = await requireInstruction(id) 35 | requireInstructionOwner(event, existingInstruction) 36 | 37 | const result = await updateInstructions(id, name, instruction, is_public) 38 | return result 39 | }) 40 | -------------------------------------------------------------------------------- /.github/workflows/claude.yml: -------------------------------------------------------------------------------- 1 | name: Claude Code 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_review_comment: 7 | types: [created] 8 | issues: 9 | types: [opened, assigned] 10 | pull_request_review: 11 | types: [submitted] 12 | 13 | jobs: 14 | claude: 15 | if: | 16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || 17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || 18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || 19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) 20 | runs-on: ubuntu-latest 21 | permissions: 22 | contents: read 23 | pull-requests: read 24 | issues: read 25 | id-token: write 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v4 29 | with: 30 | fetch-depth: 1 31 | 32 | - name: Run Claude Code 33 | id: claude 34 | uses: anthropics/claude-code-action@beta 35 | with: 36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} 37 | 38 | -------------------------------------------------------------------------------- /components/settings/CreateCustomServer.vue: -------------------------------------------------------------------------------- 1 | 25 | 26 | 44 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yaml: -------------------------------------------------------------------------------- 1 | 2 | name: Build and push Docker Images for multiple platforms 3 | 4 | 5 | on: 6 | push: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | build_and_push: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | 15 | - uses: actions/checkout@v4 16 | 17 | # Generate a timestamp and store it in a variable 18 | - name: Set up Docker Buildx 19 | uses: docker/setup-buildx-action@v3 20 | - name: Cache Docker layers 21 | uses: actions/cache@v4 22 | with: 23 | path: /tmp/.buildx-cache 24 | key: ${{ runner.os }}-buildx-${{ github.sha }} 25 | restore-keys: | 26 | ${{ runner.os }}-buildx- 27 | 28 | - name: Login to DockerHub 29 | uses: docker/login-action@v3 30 | with: 31 | username: ${{ secrets.DOCKER_USERNAME }} 32 | password: ${{ secrets.DOCKER_PASSWORD }} 33 | 34 | - name: Build and push 35 | uses: docker/build-push-action@v5 36 | with: 37 | context: . 38 | file: ./Dockerfile 39 | platforms: linux/amd64,linux/arm64/v8 40 | push: true 41 | tags: 0001coder/chatollama:latest 42 | cache-from: type=local,src=/tmp/.buildx-cache 43 | cache-to: type=local,dest=/tmp/.buildx-cache 44 | 45 | 46 | -------------------------------------------------------------------------------- /server/models/openai/tools/custom.ts: -------------------------------------------------------------------------------- 1 | import { 2 | patchConfig, 3 | pickRunnableConfigKeys, 4 | RunnableFunc, 5 | } from "@langchain/core/runnables"; 6 | import { AsyncLocalStorageProviderSingleton } from "@langchain/core/singletons"; 7 | import { DynamicTool, ToolRunnableConfig } from "@langchain/core/tools"; 8 | import OpenAI from "openai"; 9 | 10 | export type CustomToolFields = Omit; 11 | 12 | export function customTool( 13 | func: RunnableFunc, 14 | fields: CustomToolFields 15 | ): DynamicTool { 16 | return new DynamicTool({ 17 | ...fields, 18 | description: "", 19 | metadata: { 20 | customTool: fields, 21 | }, 22 | func: async (input, runManager, config) => 23 | new Promise((resolve, reject) => { 24 | const childConfig = patchConfig(config, { 25 | callbacks: runManager?.getChild(), 26 | }); 27 | void AsyncLocalStorageProviderSingleton.runWithConfig( 28 | pickRunnableConfigKeys(childConfig), 29 | async () => { 30 | try { 31 | resolve(func(input, childConfig)); 32 | } catch (e) { 33 | reject(e); 34 | } 35 | } 36 | ); 37 | }), 38 | }); 39 | } 40 | -------------------------------------------------------------------------------- /server/models/openai/tests/azure/chat_models.standard.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | import { test, expect } from "@jest/globals"; 3 | import { ChatModelUnitTests } from "@langchain/standard-tests"; 4 | import { AIMessageChunk } from "@langchain/core/messages"; 5 | import { AzureChatOpenAI } from "../../azure/chat_models.js"; 6 | import { ChatOpenAICallOptions } from "../../chat_models.js"; 7 | 8 | class AzureChatOpenAIStandardUnitTests extends ChatModelUnitTests< 9 | ChatOpenAICallOptions, 10 | AIMessageChunk 11 | > { 12 | constructor() { 13 | super({ 14 | Cls: AzureChatOpenAI, 15 | chatModelHasToolCalling: true, 16 | chatModelHasStructuredOutput: true, 17 | constructorArgs: {}, 18 | }); 19 | process.env.AZURE_OPENAI_API_KEY = "test"; 20 | process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = "test"; 21 | process.env.AZURE_OPENAI_API_VERSION = "test"; 22 | process.env.AZURE_OPENAI_BASE_PATH = "test"; 23 | } 24 | 25 | testChatModelInitApiKey() { 26 | console.warn( 27 | "AzureChatOpenAI does not require a single API key. Skipping..." 28 | ); 29 | } 30 | } 31 | 32 | const testClass = new AzureChatOpenAIStandardUnitTests(); 33 | 34 | test("AzureChatOpenAIStandardUnitTests", () => { 35 | const testResults = testClass.runTests(); 36 | expect(testResults).toBe(true); 37 | }); 38 | -------------------------------------------------------------------------------- /pages/chat/index.vue: -------------------------------------------------------------------------------- 1 | 31 | 32 | 40 | -------------------------------------------------------------------------------- /plugins/features.server.ts: -------------------------------------------------------------------------------- 1 | export default defineNuxtPlugin(async () => { 2 | // Only run on server 3 | if (process.client) return 4 | 5 | try { 6 | const config = useRuntimeConfig() 7 | const features = { 8 | knowledgeBaseEnabled: Boolean(config.knowledgeBaseEnabled), 9 | realtimeChatEnabled: Boolean(config.realtimeChatEnabled), 10 | modelsManagementEnabled: Boolean(config.modelsManagementEnabled), 11 | mcpEnabled: Boolean(config.mcpEnabled) 12 | } 13 | 14 | // Inject features into SSR context and payload 15 | const nuxtApp = useNuxtApp() 16 | if (nuxtApp.ssrContext) { 17 | nuxtApp.ssrContext.features = features 18 | } 19 | nuxtApp.payload.features = features 20 | } catch (error) { 21 | console.error('Error setting up features:', error) 22 | // Provide safe defaults 23 | const nuxtApp = useNuxtApp() 24 | const fallbackFeatures = { 25 | knowledgeBaseEnabled: false, 26 | realtimeChatEnabled: false, 27 | modelsManagementEnabled: false, 28 | mcpEnabled: false 29 | } 30 | if (nuxtApp.ssrContext) { 31 | nuxtApp.ssrContext.features = fallbackFeatures 32 | } 33 | nuxtApp.payload.features = fallbackFeatures 34 | } 35 | }) 36 | -------------------------------------------------------------------------------- /server/api/knowledgebases/index.get.ts: -------------------------------------------------------------------------------- 1 | import type { KnowledgeBase } from '@prisma/client' 2 | import prisma from '@/server/utils/prisma' 3 | 4 | const listKnowledgeBases = async (userId: number | null): Promise => { 5 | try { 6 | const whereClause = (userId !== null && userId !== undefined) ? 7 | { 8 | OR: [ 9 | { user_id: userId }, 10 | { user_id: null }, 11 | { is_public: true } 12 | ] 13 | } 14 | : { 15 | OR: [ 16 | { user_id: null }, 17 | { is_public: true } 18 | ] 19 | } 20 | 21 | return await prisma.knowledgeBase.findMany({ 22 | where: whereClause, 23 | orderBy: { 24 | id: 'desc' 25 | }, 26 | include: { 27 | files: true 28 | } 29 | }) 30 | } catch (error) { 31 | console.error("Error fetching knowledge bases: ", error) 32 | return null 33 | } 34 | } 35 | 36 | export default defineEventHandler(async (event) => { 37 | // Check if knowledge base feature is enabled 38 | if (!isKnowledgeBaseEnabled()) { 39 | setResponseStatus(event, 403, 'Knowledge base feature is disabled') 40 | return { error: 'Knowledge base feature is disabled' } 41 | } 42 | 43 | const knowledgeBases = await listKnowledgeBases(event.context.user?.id) 44 | return { knowledgeBases } 45 | }) 46 | -------------------------------------------------------------------------------- /server/api/mcp-servers/index.post.ts: -------------------------------------------------------------------------------- 1 | import { McpServiceSingleton } from '~/server/utils/mcp' 2 | import { McpServerCreateInput } from '~/server/types/mcp' 3 | import { requireAdminIfAclEnabled } from '~/server/utils/auth' 4 | 5 | export default defineEventHandler(async (event) => { 6 | // Check if MCP feature is enabled 7 | if (!isMcpEnabled()) { 8 | setResponseStatus(event, 403, 'MCP feature is disabled') 9 | return { error: 'MCP feature is disabled' } 10 | } 11 | 12 | // Require admin privileges for MCP server management (if ACL is enabled) 13 | requireAdminIfAclEnabled(event) 14 | 15 | try { 16 | const body = await readBody(event) as McpServerCreateInput 17 | 18 | const result = await McpServiceSingleton.createServer(body) 19 | 20 | if (result.success) { 21 | return { 22 | success: true, 23 | data: result.server 24 | } 25 | } else { 26 | throw createError({ 27 | statusCode: 400, 28 | statusMessage: result.errors?.join(', ') || 'Failed to create server' 29 | }) 30 | } 31 | } catch (error: any) { 32 | console.error('Failed to create MCP server:', error) 33 | 34 | if (error.statusCode) { 35 | throw error 36 | } 37 | 38 | throw createError({ 39 | statusCode: 500, 40 | statusMessage: 'Failed to create MCP server' 41 | }) 42 | } 43 | // No need to close singleton 44 | }) 45 | -------------------------------------------------------------------------------- /components/Sources.vue: -------------------------------------------------------------------------------- 1 | 13 | 14 | 40 | -------------------------------------------------------------------------------- /server/api/mcp-servers/[id].get.ts: -------------------------------------------------------------------------------- 1 | import { McpService } from '~/server/utils/mcp' 2 | import { requireAdminIfAclEnabled } from '~/server/utils/auth' 3 | 4 | export default defineEventHandler(async (event) => { 5 | // Check if MCP feature is enabled 6 | if (!isMcpEnabled()) { 7 | setResponseStatus(event, 403, 'MCP feature is disabled') 8 | return { error: 'MCP feature is disabled' } 9 | } 10 | 11 | // Require admin privileges for MCP server management (if ACL is enabled) 12 | requireAdminIfAclEnabled(event) 13 | 14 | const mcpService = new McpService() 15 | 16 | try { 17 | const id = parseInt(getRouterParam(event, 'id') || '0') 18 | 19 | if (!id || isNaN(id)) { 20 | throw createError({ 21 | statusCode: 400, 22 | statusMessage: 'Invalid server ID' 23 | }) 24 | } 25 | 26 | const server = await mcpService.getServerById(id) 27 | 28 | if (!server) { 29 | throw createError({ 30 | statusCode: 404, 31 | statusMessage: 'Server not found' 32 | }) 33 | } 34 | 35 | return { 36 | success: true, 37 | data: server 38 | } 39 | } catch (error: any) { 40 | console.error('Failed to fetch MCP server:', error) 41 | 42 | if (error.statusCode) { 43 | throw error 44 | } 45 | 46 | throw createError({ 47 | statusCode: 500, 48 | statusMessage: 'Failed to fetch MCP server' 49 | }) 50 | } finally { 51 | await mcpService.close() 52 | } 53 | }) 54 | -------------------------------------------------------------------------------- /server/api/auth/signup.post.ts: -------------------------------------------------------------------------------- 1 | import prisma from "@/server/utils/prisma" 2 | import bcrypt from "bcryptjs" 3 | 4 | export enum Role { 5 | USER = 0, 6 | ADMIN = 1, 7 | SUPERADMIN = 2 8 | } 9 | 10 | const signUp = async (name: string, email: string, password: string) => { 11 | if (!name || !password) { 12 | throw createError({ 13 | statusCode: 400, 14 | statusMessage: 'Name and password cannot be empty' 15 | }) 16 | } 17 | 18 | const exist = await prisma.user.count({ where: { name: name } }) > 0 19 | if (exist) { 20 | throw createError({ 21 | statusCode: 409, 22 | statusMessage: `User ${name} already exist` 23 | }) 24 | } 25 | 26 | const hashedPassword = await bcrypt.hash(password, 10) 27 | return await prisma.user.create({ 28 | data: { 29 | name, 30 | email, 31 | password: hashedPassword, 32 | role: process.env.SUPER_ADMIN_NAME === name ? Role.SUPERADMIN : Role.USER 33 | } 34 | }) 35 | } 36 | 37 | export default defineEventHandler(async (event) => { 38 | const { name, email, password } = await readBody(event) 39 | try { 40 | const result = await signUp(name, email, password) 41 | return { 42 | status: "success", 43 | user: { 44 | id: result?.id 45 | } 46 | } 47 | } catch (error) { 48 | throw error 49 | } 50 | }) 51 | -------------------------------------------------------------------------------- /composables/useTools.ts: -------------------------------------------------------------------------------- 1 | import { ref, type Ref } from 'vue' 2 | 3 | export interface Tool { 4 | type: 'function' 5 | name: string 6 | description: string 7 | parameters?: { 8 | type: 'object' 9 | properties: Record 13 | } 14 | handler: (args: any) => Promise 15 | } 16 | 17 | const globalTools: Ref = ref([]) 18 | 19 | export function useTools() { 20 | const registerTool = (tool: Tool) => { 21 | const existingIndex = globalTools.value.findIndex(t => t.name === tool.name) 22 | if (existingIndex >= 0) { 23 | globalTools.value[existingIndex] = tool 24 | } else { 25 | globalTools.value.push(tool) 26 | } 27 | } 28 | 29 | const unregisterTool = (toolName: string) => { 30 | const index = globalTools.value.findIndex(t => t.name === toolName) 31 | if (index >= 0) { 32 | globalTools.value.splice(index, 1) 33 | } 34 | } 35 | 36 | const getTools = () => globalTools.value 37 | 38 | const executeToolHandler = async (toolName: string, args: any) => { 39 | console.log('Executing tool handler:', toolName, args) 40 | const tool = globalTools.value.find(t => t.name === toolName) 41 | if (!tool) { 42 | throw new Error(`Tool ${toolName} not found`) 43 | } 44 | return await tool.handler(args) 45 | } 46 | 47 | return { 48 | registerTool, 49 | unregisterTool, 50 | getTools, 51 | executeToolHandler 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /server/models/openai/tests/chat_models.standard.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | import { test, expect } from "@jest/globals"; 3 | import { ChatModelUnitTests } from "@langchain/standard-tests"; 4 | import { AIMessageChunk } from "@langchain/core/messages"; 5 | import { ChatOpenAI, ChatOpenAICallOptions } from "../chat_models.js"; 6 | 7 | class ChatOpenAIStandardUnitTests extends ChatModelUnitTests< 8 | ChatOpenAICallOptions, 9 | AIMessageChunk 10 | > { 11 | constructor() { 12 | super({ 13 | Cls: ChatOpenAI, 14 | chatModelHasToolCalling: true, 15 | chatModelHasStructuredOutput: true, 16 | constructorArgs: {}, 17 | }); 18 | // This must be set so method like `.bindTools` or `.withStructuredOutput` 19 | // which we call after instantiating the model will work. 20 | // (constructor will throw if API key is not set) 21 | process.env.OPENAI_API_KEY = "test"; 22 | } 23 | 24 | testChatModelInitApiKey() { 25 | // Unset the API key env var here so this test can properly check 26 | // the API key class arg. 27 | process.env.OPENAI_API_KEY = ""; 28 | super.testChatModelInitApiKey(); 29 | // Re-set the API key env var here so other tests can run properly. 30 | process.env.OPENAI_API_KEY = "test"; 31 | } 32 | } 33 | 34 | const testClass = new ChatOpenAIStandardUnitTests(); 35 | 36 | test("ChatOpenAIStandardUnitTests", () => { 37 | const testResults = testClass.runTests(); 38 | expect(testResults).toBe(true); 39 | }); 40 | -------------------------------------------------------------------------------- /server/models/openai/tests/chat_models_responses.standard.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | import { test } from "@jest/globals"; 3 | import { ChatModelIntegrationTests } from "@langchain/standard-tests"; 4 | import { AIMessageChunk } from "@langchain/core/messages"; 5 | import { ChatOpenAI, ChatOpenAICallOptions } from "../chat_models.js"; 6 | 7 | class ChatOpenAIResponsesStandardIntegrationTests extends ChatModelIntegrationTests< 8 | ChatOpenAICallOptions, 9 | AIMessageChunk 10 | > { 11 | constructor() { 12 | if (!process.env.OPENAI_API_KEY) { 13 | throw new Error( 14 | "OPENAI_API_KEY must be set to run standard integration tests." 15 | ); 16 | } 17 | super({ 18 | Cls: ChatOpenAI, 19 | chatModelHasToolCalling: true, 20 | chatModelHasStructuredOutput: true, 21 | supportsParallelToolCalls: true, 22 | constructorArgs: { 23 | model: "gpt-4o-mini", 24 | useResponsesApi: true, 25 | }, 26 | }); 27 | } 28 | 29 | async testInvokeMoreComplexTools() { 30 | this.skipTestMessage( 31 | "testInvokeMoreComplexTools", 32 | "ChatOpenAI", 33 | "OpenAI Responses API does not support Record" 34 | ); 35 | } 36 | } 37 | 38 | const testClass = new ChatOpenAIResponsesStandardIntegrationTests(); 39 | 40 | test("ChatOpenAIResponsesStandardIntegrationTests", async () => { 41 | const testResults = await testClass.runTests(); 42 | expect(testResults).toBe(true); 43 | }); 44 | -------------------------------------------------------------------------------- /server/models/openai/tests/chat_models_responses.standard.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | import { test, expect } from "@jest/globals"; 3 | import { ChatModelUnitTests } from "@langchain/standard-tests"; 4 | import { AIMessageChunk } from "@langchain/core/messages"; 5 | import { ChatOpenAI, ChatOpenAICallOptions } from "../chat_models.js"; 6 | 7 | class ChatOpenAIResponsesStandardUnitTests extends ChatModelUnitTests< 8 | ChatOpenAICallOptions, 9 | AIMessageChunk 10 | > { 11 | constructor() { 12 | super({ 13 | Cls: ChatOpenAI, 14 | chatModelHasToolCalling: true, 15 | chatModelHasStructuredOutput: true, 16 | constructorArgs: { useResponsesApi: true }, 17 | }); 18 | // This must be set so method like `.bindTools` or `.withStructuredOutput` 19 | // which we call after instantiating the model will work. 20 | // (constructor will throw if API key is not set) 21 | process.env.OPENAI_API_KEY = "test"; 22 | } 23 | 24 | testChatModelInitApiKey() { 25 | // Unset the API key env var here so this test can properly check 26 | // the API key class arg. 27 | process.env.OPENAI_API_KEY = ""; 28 | super.testChatModelInitApiKey(); 29 | // Re-set the API key env var here so other tests can run properly. 30 | process.env.OPENAI_API_KEY = "test"; 31 | } 32 | } 33 | 34 | const testClass = new ChatOpenAIResponsesStandardUnitTests(); 35 | 36 | test("ChatOpenAIResponsesStandardUnitTests", () => { 37 | const testResults = testClass.runTests(); 38 | expect(testResults).toBe(true); 39 | }); 40 | -------------------------------------------------------------------------------- /server/api/mcp-servers/[id].delete.ts: -------------------------------------------------------------------------------- 1 | import { McpService } from '~/server/utils/mcp' 2 | import { requireAdminIfAclEnabled } from '~/server/utils/auth' 3 | 4 | export default defineEventHandler(async (event) => { 5 | // Check if MCP feature is enabled 6 | if (!isMcpEnabled()) { 7 | setResponseStatus(event, 403, 'MCP feature is disabled') 8 | return { error: 'MCP feature is disabled' } 9 | } 10 | 11 | // Require admin privileges for MCP server management (if ACL is enabled) 12 | requireAdminIfAclEnabled(event) 13 | 14 | const mcpService = new McpService() 15 | 16 | try { 17 | const id = parseInt(getRouterParam(event, 'id') || '0') 18 | 19 | if (!id || isNaN(id)) { 20 | throw createError({ 21 | statusCode: 400, 22 | statusMessage: 'Invalid server ID' 23 | }) 24 | } 25 | 26 | const result = await mcpService.deleteServer(id) 27 | 28 | if (result.success) { 29 | return { 30 | success: true, 31 | message: 'Server deleted successfully' 32 | } 33 | } else { 34 | throw createError({ 35 | statusCode: 400, 36 | statusMessage: result.errors?.join(', ') || 'Failed to delete server' 37 | }) 38 | } 39 | } catch (error: any) { 40 | console.error('Failed to delete MCP server:', error) 41 | 42 | if (error.statusCode) { 43 | throw error 44 | } 45 | 46 | throw createError({ 47 | statusCode: 500, 48 | statusMessage: 'Failed to delete MCP server' 49 | }) 50 | } finally { 51 | await mcpService.close() 52 | } 53 | }) 54 | -------------------------------------------------------------------------------- /assets/svg/azure.svg: -------------------------------------------------------------------------------- 1 | Azure -------------------------------------------------------------------------------- /server/models/openai/utils/prompts.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable import/no-extraneous-dependencies */ 2 | import type { BasePromptValue } from "@langchain/core/prompt_values"; 3 | import type { OpenAI } from "openai"; 4 | 5 | import { _convertMessagesToOpenAIParams } from "../chat_models.js"; 6 | 7 | /** 8 | * Convert a formatted LangChain prompt (e.g. pulled from the hub) into 9 | * a format expected by OpenAI's JS SDK. 10 | * 11 | * Requires the "@langchain/openai" package to be installed in addition 12 | * to the OpenAI SDK. 13 | * 14 | * @example 15 | * ```ts 16 | * import { convertPromptToOpenAI } from "langsmith/utils/hub/openai"; 17 | * import { pull } from "langchain/hub"; 18 | * 19 | * import OpenAI from 'openai'; 20 | * 21 | * const prompt = await pull("jacob/joke-generator"); 22 | * const formattedPrompt = await prompt.invoke({ 23 | * topic: "cats", 24 | * }); 25 | * 26 | * const { messages } = convertPromptToOpenAI(formattedPrompt); 27 | * 28 | * const openAIClient = new OpenAI(); 29 | * 30 | * const openaiResponse = await openAIClient.chat.completions.create({ 31 | * model: "gpt-4o-mini", 32 | * messages, 33 | * }); 34 | * ``` 35 | * @param formattedPrompt 36 | * @returns A partial OpenAI payload. 37 | */ 38 | export function convertPromptToOpenAI(formattedPrompt: BasePromptValue): { 39 | messages: OpenAI.Chat.ChatCompletionMessageParam[]; 40 | } { 41 | const messages = formattedPrompt.toChatMessages(); 42 | return { 43 | messages: _convertMessagesToOpenAIParams( 44 | messages 45 | ) as OpenAI.Chat.ChatCompletionMessageParam[], 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /assets/katex.scss: -------------------------------------------------------------------------------- 1 | /* KaTeX styles */ 2 | @import 'katex/dist/katex.min.css'; 3 | 4 | /* Custom KaTeX styling */ 5 | .katex-block { 6 | overflow-x: auto; 7 | overflow-y: hidden; 8 | padding: 1rem 0; 9 | } 10 | 11 | /* Custom styling for LaTeX code blocks */ 12 | .md-body { 13 | .katex-block { 14 | background-color: var(--color-canvas-subtle); 15 | border-radius: 0.375rem; 16 | padding: 1rem; 17 | margin: 1rem 0; 18 | text-align: center; 19 | } 20 | 21 | .katex-block-ssr { 22 | background-color: var(--color-canvas-subtle); 23 | border-radius: 0.375rem; 24 | padding: 1rem; 25 | margin: 1rem 0; 26 | text-align: center; 27 | min-height: 2rem; 28 | display: flex; 29 | align-items: center; 30 | justify-content: center; 31 | 32 | &::before { 33 | content: "Loading LaTeX..."; 34 | color: var(--color-gray-500); 35 | font-style: italic; 36 | } 37 | } 38 | 39 | .katex-error { 40 | color: #cc0000; 41 | background-color: #ffeeee; 42 | padding: 0.5rem; 43 | border-radius: 0.25rem; 44 | white-space: pre-wrap; 45 | font-family: monospace; 46 | font-size: 0.875rem; 47 | } 48 | } 49 | 50 | /* Dark mode styling */ 51 | .dark { 52 | .katex { 53 | .katex-html { 54 | color: var(--color-gray-200); 55 | } 56 | } 57 | 58 | .md-body { 59 | 60 | .katex-block, 61 | .katex-block-ssr { 62 | background-color: var(--color-gray-800); 63 | } 64 | 65 | .katex-error { 66 | background-color: rgba(204, 0, 0, 0.2); 67 | color: #ff6666; 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /blogs/2025-09-18-smart-quick-chat-dialog-positioning-zh.md: -------------------------------------------------------------------------------- 1 | # 智能快速聊天对话框定位:更好的用户体验 2 | 3 | 我们刚刚推出了 ChatOllama 快速聊天功能的重大改进,解决了用户在屏幕边缘选择文本时遇到的常见问题。快速聊天对话框现在可以智能定位,确保始终保持在视口内,同时为 AI 回复提供更多空间。 4 | 5 | ## 问题所在 6 | 7 | 之前,当用户在屏幕右下角或视口边缘附近选择文本时,快速聊天对话框会部分出现在可见区域之外或完全被截断。这使得阅读 AI 回复和与对话框交互变得困难。此外,对话框相当狭窄(320px),限制了可以舒适显示的文本量。 8 | 9 | ## 解决方案 10 | 11 | 我们的新智能定位算法通过几个关键改进解决了这些问题: 12 | 13 | ### 1. 智能定位逻辑 14 | 15 | 对话框现在遵循复杂的定位策略: 16 | 17 | - **水平定位**:首先尝试定位在选定文本的右侧,如果空间不足则定位在左侧,如果两侧都不行则水平居中 18 | - **垂直定位**:首先尝试定位在选择区域下方,如果需要则定位在上方,最后选择垂直居中 19 | - **视口感知**:始终确保对话框保持在屏幕边界内,具有适当的边距 20 | 21 | ### 2. 更大的对话框尺寸 22 | 23 | - **宽度增加**:从 320px 增加到 480px,提高可读性 24 | - **动态高度**:根据回复内容长度自动调整 25 | - **回复区域**:最大高度从 160px 增加到 320px 26 | - **更好的排版**:回复文本大小从超小号增加到小号,提高可读性 27 | 28 | ### 3. 动态内容适应 29 | 30 | 对话框现在根据 AI 回复长度计算最佳尺寸,确保较长的回复有足够的空间,同时保持较短回复的紧凑性。 31 | 32 | ## 技术实现 33 | 34 | 定位算法使用几个关键常量: 35 | 36 | ```typescript 37 | const DIALOG_WIDTH = 480 // 从 320px 增加 38 | const DIALOG_MIN_HEIGHT = 280 39 | const DIALOG_MAX_HEIGHT = 600 // 回复较长时的最大高度 40 | const VIEWPORT_PADDING = 20 41 | const OFFSET_FROM_SELECTION = 10 42 | ``` 43 | 44 | 智能定位逻辑确保对话框: 45 | - 与视口边缘保持 20px 边距 46 | - 与选定文本保持 10px 距离 47 | - 根据回复内容动态调整高度 48 | - 永远不会被截断或出现在可见区域之外 49 | 50 | ## 对用户体验的影响 51 | 52 | 这些改进带来了几个实质性的好处: 53 | 54 | 1. **更好的可访问性**:用户现在可以在屏幕的任何地方选择文本,无需担心对话框定位问题 55 | 2. **改善的可读性**:更大的对话框和文本大小使 AI 回复更容易阅读 56 | 3. **更多可见内容**:回复区域高度翻倍,允许显示更长的回复而无需过度滚动 57 | 4. **更智能的行为**:对话框自动适应不同的屏幕尺寸和选择位置 58 | 59 | ## 接下来的计划 60 | 61 | 此次更新体现了我们对改善 ChatOllama 用户体验的持续承诺。我们持续收集反馈并进行增量改进,确保我们的 AI 聊天界面尽可能直观和实用。 62 | 63 | 通过在屏幕不同区域选择文本来尝试新的快速聊天定位功能 - 您会注意到对话框现在智能地定位自己以获得最佳的可见性和可用性! -------------------------------------------------------------------------------- /components/settings/LanguageSelectMenu.vue: -------------------------------------------------------------------------------- 1 | 25 | 26 | 49 | -------------------------------------------------------------------------------- /server/api/mcp-servers/[id]/toggle.post.ts: -------------------------------------------------------------------------------- 1 | import { McpService } from '~/server/utils/mcp' 2 | import { requireAdminIfAclEnabled } from '~/server/utils/auth' 3 | 4 | export default defineEventHandler(async (event) => { 5 | // Check if MCP feature is enabled 6 | if (!isMcpEnabled()) { 7 | setResponseStatus(event, 403, 'MCP feature is disabled') 8 | return { error: 'MCP feature is disabled' } 9 | } 10 | 11 | // Require admin privileges for MCP server management (if ACL is enabled) 12 | requireAdminIfAclEnabled(event) 13 | 14 | const mcpService = new McpService() 15 | 16 | try { 17 | const id = parseInt(getRouterParam(event, 'id') || '0') 18 | 19 | if (!id || isNaN(id)) { 20 | throw createError({ 21 | statusCode: 400, 22 | statusMessage: 'Invalid server ID' 23 | }) 24 | } 25 | 26 | const result = await mcpService.toggleServer(id) 27 | 28 | if (result.success) { 29 | return { 30 | success: true, 31 | data: result.server, 32 | message: `Server ${result.server?.enabled ? 'enabled' : 'disabled'} successfully` 33 | } 34 | } else { 35 | throw createError({ 36 | statusCode: 400, 37 | statusMessage: result.errors?.join(', ') || 'Failed to toggle server' 38 | }) 39 | } 40 | } catch (error: any) { 41 | console.error('Failed to toggle MCP server:', error) 42 | 43 | if (error.statusCode) { 44 | throw error 45 | } 46 | 47 | throw createError({ 48 | statusCode: 500, 49 | statusMessage: 'Failed to toggle MCP server' 50 | }) 51 | } finally { 52 | await mcpService.close() 53 | } 54 | }) 55 | -------------------------------------------------------------------------------- /server/coref/index.ts: -------------------------------------------------------------------------------- 1 | import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts" 2 | import { BaseMessage } from '@langchain/core/messages' 3 | import { ChatOpenAI } from '~/server/models/openai' 4 | import { JsonOutputParser } from "@langchain/core/output_parsers" 5 | import { BaseChatModel } from '@langchain/core/language_models/chat_models' 6 | 7 | const PROMPT = ` 8 | Given a chat history and the latest user question which might reference context in the chat history, formulate a standalone question which can be understood without the chat history. 9 | Do NOT answer the question, just reformulate it if needed and otherwise return it as is. 10 | 11 | Respond with the following JSON format: 12 | 13 | {{ 14 | "input": "What is its capital?", 15 | "output": "What is the capital of France?" 16 | }} 17 | ` 18 | const CoreferenceResolutionPrompt = ChatPromptTemplate.fromMessages([ 19 | ["system", PROMPT], 20 | new MessagesPlaceholder("chat_history"), 21 | ["human", "{input}"] 22 | ]) 23 | 24 | export type CorefResult = { 25 | input: string 26 | output: string 27 | } 28 | 29 | export const resolveCoreference = async ( 30 | userInput: string, 31 | chatHistory: BaseMessage[], 32 | chatModel: BaseChatModel 33 | ): Promise => { 34 | if (chatModel !== undefined) { 35 | const prompt = await CoreferenceResolutionPrompt.format({ 36 | chat_history: chatHistory, 37 | input: userInput 38 | }) 39 | return await chatModel.pipe(new JsonOutputParser()).invoke(prompt) 40 | } else { 41 | return { 42 | input: userInput, 43 | output: userInput 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /components/ColorMode.vue: -------------------------------------------------------------------------------- 1 | 32 | 33 | 51 | -------------------------------------------------------------------------------- /components/settings/SettingsChatSettings.vue: -------------------------------------------------------------------------------- 1 | 8 | 9 | 44 | -------------------------------------------------------------------------------- /assets/svg/openai.svg: -------------------------------------------------------------------------------- 1 | OpenAI -------------------------------------------------------------------------------- /server/middleware/keys.ts: -------------------------------------------------------------------------------- 1 | import { tryParseJson } from '~/composables/utils' 2 | 3 | export interface ContextKeys { 4 | ollama: { 5 | endpoint: string 6 | username: string 7 | password: string 8 | }, 9 | openai: { 10 | key: string 11 | endpoint: string 12 | proxy: boolean 13 | }, 14 | azureOpenai: { 15 | key: string 16 | endpoint: string 17 | deploymentName: string 18 | proxy: boolean 19 | }, 20 | anthropic: { 21 | key: string 22 | endpoint: string 23 | proxy: boolean 24 | }, 25 | moonshot: { 26 | key: string 27 | endpoint: string 28 | }, 29 | gemini: { 30 | key: string 31 | endpoint: string 32 | proxy: boolean 33 | }, 34 | groq: { 35 | key: string 36 | endpoint: string 37 | proxy: boolean 38 | }, 39 | mistral: { 40 | key: string 41 | endpoint: string 42 | proxy: boolean 43 | }, 44 | /** custom model base on OpenAI API */ 45 | custom: Array<{ 46 | name: string 47 | aiType: Exclude 48 | key: string 49 | endpoint: string 50 | modelsEndpoint: string | undefined 51 | proxy: boolean 52 | models: string[] 53 | }> 54 | } 55 | 56 | export default defineEventHandler((event) => { 57 | const headers = getRequestHeaders(event) 58 | const value = headers['x-chat-ollama-keys'] 59 | const data = (value ? tryParseJson(decodeURIComponent(value), {}) : {}) as ContextKeys 60 | 61 | event.context.keys = { 62 | ...data, 63 | ollama: { 64 | ...data.ollama, 65 | endpoint: (data.ollama?.endpoint || 'http://127.0.0.1:11434').replace(/\/$/, ''), 66 | } 67 | } 68 | }) 69 | -------------------------------------------------------------------------------- /server/api/mcp-servers/[id].put.ts: -------------------------------------------------------------------------------- 1 | import { McpService } from '~/server/utils/mcp' 2 | import { McpServerUpdateInput } from '~/server/types/mcp' 3 | import { requireAdminIfAclEnabled } from '~/server/utils/auth' 4 | 5 | export default defineEventHandler(async (event) => { 6 | // Check if MCP feature is enabled 7 | if (!isMcpEnabled()) { 8 | setResponseStatus(event, 403, 'MCP feature is disabled') 9 | return { error: 'MCP feature is disabled' } 10 | } 11 | 12 | // Require admin privileges for MCP server management (if ACL is enabled) 13 | requireAdminIfAclEnabled(event) 14 | 15 | const mcpService = new McpService() 16 | 17 | try { 18 | const id = parseInt(getRouterParam(event, 'id') || '0') 19 | 20 | if (!id || isNaN(id)) { 21 | throw createError({ 22 | statusCode: 400, 23 | statusMessage: 'Invalid server ID' 24 | }) 25 | } 26 | 27 | const body = await readBody(event) as McpServerUpdateInput 28 | 29 | const result = await mcpService.updateServer(id, body) 30 | 31 | if (result.success) { 32 | return { 33 | success: true, 34 | data: result.server 35 | } 36 | } else { 37 | throw createError({ 38 | statusCode: 400, 39 | statusMessage: result.errors?.join(', ') || 'Failed to update server' 40 | }) 41 | } 42 | } catch (error: any) { 43 | console.error('Failed to update MCP server:', error) 44 | 45 | if (error.statusCode) { 46 | throw error 47 | } 48 | 49 | throw createError({ 50 | statusCode: 500, 51 | statusMessage: 'Failed to update MCP server' 52 | }) 53 | } finally { 54 | await mcpService.close() 55 | } 56 | }) 57 | -------------------------------------------------------------------------------- /server/api/sessions/[id]/title.post.ts: -------------------------------------------------------------------------------- 1 | import { createChatModel } from '@/server/utils/models' 2 | 3 | interface RequestBody { 4 | model: string 5 | family: string 6 | userMessage: string 7 | systemPrompt?: string 8 | maxWords?: number 9 | style?: 'concise' | 'descriptive' | 'technical' | 'casual' 10 | } 11 | 12 | const TITLE_PROMPTS = { 13 | concise: (maxWords: number) => `Generate a ${maxWords}-word title for this chat. Respond with only the title.`, 14 | descriptive: (maxWords: number) => `Generate a descriptive ${maxWords}-word title that captures the main topic of this chat. Respond with only the title.`, 15 | technical: (maxWords: number) => `Generate a technical ${maxWords}-word title focusing on the specific subject matter. Respond with only the title.`, 16 | casual: (maxWords: number) => `Generate a casual, friendly ${maxWords}-word title for this chat. Respond with only the title.` 17 | } 18 | 19 | export default defineEventHandler(async (event) => { 20 | const { 21 | model, 22 | family, 23 | userMessage, 24 | systemPrompt, 25 | maxWords = 6, 26 | style = 'concise' 27 | } = await readBody(event) 28 | 29 | // Create the same chat model as the regular chat endpoint 30 | const llm = createChatModel(model, family, event) 31 | 32 | // Use custom prompt or generate based on style 33 | const prompt = systemPrompt || TITLE_PROMPTS[style](maxWords) 34 | 35 | const response = await llm.invoke([ 36 | ['system', prompt], 37 | ['user', userMessage] 38 | ]) 39 | 40 | return { 41 | title: typeof response?.content === 'string' ? response.content.trim() : response?.content.toString().trim() 42 | } 43 | }) -------------------------------------------------------------------------------- /public/worklets/audio-processing.js: -------------------------------------------------------------------------------- 1 | class AudioRecordingProcessor extends AudioWorkletProcessor { 2 | constructor() { 3 | super() 4 | this.buffer = new Float32Array() 5 | this.bufferSize = 1024 6 | this.bytesPerSample = 2 7 | this.maxValue = 32767 // Max value for 16-bit integer 8 | } 9 | 10 | process(inputs, outputs, parameters) { 11 | const input = inputs[0][0] 12 | if (!input) return true 13 | 14 | // Append new data to buffer 15 | const newBuffer = new Float32Array(this.buffer.length + input.length) 16 | newBuffer.set(this.buffer) 17 | newBuffer.set(input, this.buffer.length) 18 | this.buffer = newBuffer 19 | 20 | // Process buffer when we have enough samples 21 | while (this.buffer.length >= this.bufferSize) { 22 | const chunk = this.buffer.slice(0, this.bufferSize) 23 | this.buffer = this.buffer.slice(this.bufferSize) 24 | 25 | // Convert to 16-bit PCM 26 | const int16Array = new Int16Array(chunk.length) 27 | for (let i = 0; i < chunk.length; i++) { 28 | const s = Math.max(-1, Math.min(1, chunk[i])) 29 | int16Array[i] = s < 0 ? s * this.maxValue : s * (this.maxValue - 1) 30 | } 31 | 32 | // Send the buffer to the main thread 33 | this.port.postMessage({ 34 | data: { 35 | int16arrayBuffer: int16Array.buffer 36 | } 37 | }, [int16Array.buffer]) 38 | } 39 | 40 | return true 41 | } 42 | } 43 | 44 | registerProcessor('audio-recorder-worklet', AudioRecordingProcessor) 45 | -------------------------------------------------------------------------------- /utils/audio-worklets/audio-processing.ts: -------------------------------------------------------------------------------- 1 | const AudioRecordingWorklet = ` 2 | class AudioProcessingWorklet extends AudioWorkletProcessor { 3 | // send and clear buffer every 2048 samples, 4 | // which at 16khz is about 8 times a second 5 | buffer = new Int16Array(2048); 6 | 7 | // current write index 8 | bufferWriteIndex = 0; 9 | 10 | constructor() { 11 | super(); 12 | this.hasAudio = false; 13 | } 14 | 15 | /** 16 | * @param inputs Float32Array[][] [input#][channel#][sample#] so to access first inputs 1st channel inputs[0][0] 17 | * @param outputs Float32Array[][] 18 | */ 19 | process(inputs) { 20 | if (inputs[0].length) { 21 | const channel0 = inputs[0][0]; 22 | this.processChunk(channel0); 23 | } 24 | return true; 25 | } 26 | 27 | sendAndClearBuffer() { 28 | this.port.postMessage({ 29 | event: "chunk", 30 | data: { 31 | int16arrayBuffer: this.buffer.slice(0, this.bufferWriteIndex).buffer, 32 | }, 33 | }, [this.buffer.slice(0, this.bufferWriteIndex).buffer]); 34 | this.bufferWriteIndex = 0; 35 | } 36 | 37 | processChunk(float32Array) { 38 | const l = float32Array.length; 39 | 40 | for (let i = 0; i < l; i++) { 41 | // convert float32 -1 to 1 to int16 -32768 to 32767 42 | const int16Value = float32Array[i] * 32768; 43 | this.buffer[this.bufferWriteIndex++] = int16Value; 44 | if(this.bufferWriteIndex >= this.buffer.length) { 45 | this.sendAndClearBuffer(); 46 | } 47 | } 48 | 49 | if(this.bufferWriteIndex >= this.buffer.length) { 50 | this.sendAndClearBuffer(); 51 | } 52 | } 53 | } 54 | 55 | registerProcessor('audio-recorder-worklet', AudioProcessingWorklet) 56 | ` 57 | 58 | export default AudioRecordingWorklet 59 | -------------------------------------------------------------------------------- /components/FileSelector.vue: -------------------------------------------------------------------------------- 1 | 21 | 22 | 45 | 46 | 55 | -------------------------------------------------------------------------------- /server/models/openai/utils/headers.ts: -------------------------------------------------------------------------------- 1 | type HeaderValue = string | undefined | null; 2 | export type HeadersLike = 3 | | Headers 4 | | readonly HeaderValue[][] 5 | | Record 6 | | undefined 7 | | null 8 | // NullableHeaders 9 | | { values: Headers; [key: string]: unknown }; 10 | 11 | const iife = (fn: () => T) => fn(); 12 | 13 | export function isHeaders(headers: unknown): headers is Headers { 14 | return ( 15 | typeof Headers !== "undefined" && 16 | headers !== null && 17 | typeof headers === "object" && 18 | Object.prototype.toString.call(headers) === "[object Headers]" 19 | ); 20 | } 21 | 22 | export function normalizeHeaders( 23 | headers: HeadersLike 24 | ): Record { 25 | const output = iife(() => { 26 | // If headers is a Headers instance 27 | if (isHeaders(headers)) { 28 | return headers; 29 | } 30 | // If headers is an array of [key, value] pairs 31 | else if (Array.isArray(headers)) { 32 | return new Headers(headers); 33 | } 34 | // If headers is a NullableHeaders-like object (has 'values' property that is a Headers) 35 | else if ( 36 | typeof headers === "object" && 37 | headers !== null && 38 | "values" in headers && 39 | isHeaders(headers.values) 40 | ) { 41 | return headers.values; 42 | } 43 | // If headers is a plain object 44 | else if (typeof headers === "object" && headers !== null) { 45 | const entries: [string, string][] = Object.entries(headers) 46 | .filter(([, v]) => typeof v === "string") 47 | .map(([k, v]) => [k, v as string]); 48 | return new Headers(entries); 49 | } 50 | return new Headers(); 51 | }); 52 | 53 | return Object.fromEntries(output.entries()); 54 | } 55 | -------------------------------------------------------------------------------- /server/utils/instructions.ts: -------------------------------------------------------------------------------- 1 | import type { Instruction } from '@prisma/client' 2 | import prisma from '@/server/utils/prisma' 3 | 4 | // Instructions feature is now always enabled 5 | 6 | /** 7 | * Require an instruction to exist and return it 8 | */ 9 | export async function requireInstruction(id?: string): Promise { 10 | if (!id) { 11 | throw createError({ 12 | statusCode: 400, 13 | statusMessage: 'Instruction ID is required' 14 | }) 15 | } 16 | 17 | const instruction = await prisma.instruction.findUnique({ 18 | where: { id: parseInt(id) } 19 | }) 20 | 21 | if (!instruction) { 22 | throw createError({ 23 | statusCode: 404, 24 | statusMessage: 'Instruction not found' 25 | }) 26 | } 27 | 28 | return instruction 29 | } 30 | 31 | /** 32 | * Require the current user to be the owner of the instruction 33 | */ 34 | export function requireInstructionOwner(event: any, instruction: Instruction): void { 35 | const currentUser = event.context.user 36 | 37 | // If instruction has no owner (user_id is null), it's a legacy instruction - only allow modification if user is authenticated 38 | if (instruction.user_id === null) { 39 | if (!currentUser) { 40 | throw createError({ 41 | statusCode: 401, 42 | statusMessage: 'Authentication required' 43 | }) 44 | } 45 | return 46 | } 47 | 48 | // If user is not authenticated 49 | if (!currentUser) { 50 | throw createError({ 51 | statusCode: 401, 52 | statusMessage: 'Authentication required' 53 | }) 54 | } 55 | 56 | // If user is not the owner 57 | if (instruction.user_id !== currentUser.id) { 58 | throw createError({ 59 | statusCode: 403, 60 | statusMessage: 'Access denied: You are not the owner of this instruction' 61 | }) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /assets/index.scss: -------------------------------------------------------------------------------- 1 | @import 'highlight.js/scss/github.scss'; 2 | @import './markdown.scss'; 3 | @import './katex.scss'; 4 | 5 | .dark { 6 | @import 'highlight.js/scss/github-dark.scss'; 7 | } 8 | 9 | html, 10 | body, 11 | #__nuxt { 12 | height: 100%; 13 | font-family: "Inter", SF Pro SC, SF Pro Text, SF Pro Icons, PingFang SC, Helvetica Neue, Helvetica, Arial, sans-serif; 14 | } 15 | 16 | body { 17 | 18 | code, 19 | pre, 20 | kbd, 21 | samp { 22 | font-family: inherit; 23 | } 24 | } 25 | 26 | .md-body { 27 | --color-neutral-muted: rgb(var(--color-primary-400) / 0.1); 28 | --color-canvas-subtle: rgb(var(--color-primary-50)); 29 | 30 | background-color: transparent; 31 | font-size: 0.875rem; 32 | line-height: 1.5rem; 33 | 34 | code { 35 | white-space: pre-wrap; 36 | margin: 0 0.4em; 37 | } 38 | 39 | .hljs { 40 | background-color: transparent; 41 | } 42 | 43 | ol, 44 | ul { 45 | list-style: initial; 46 | } 47 | 48 | li>p { 49 | display: inline; 50 | } 51 | } 52 | 53 | .dark .md-body { 54 | --color-neutral-muted: rgb(var(--color-primary-200) / 0.2); 55 | --color-canvas-subtle: rgb(var(--color-gray-900)); 56 | } 57 | 58 | .table-list { 59 | .action-btn { 60 | visibility: hidden; 61 | display: inline-flex; 62 | transition: all 0.3s ease-in-out; 63 | opacity: 0; 64 | transform-origin: center; 65 | transform: scale(0); 66 | } 67 | 68 | tr:hover { 69 | .action-btn { 70 | visibility: visible; 71 | opacity: 1; 72 | transform: scale(1); 73 | } 74 | } 75 | } 76 | 77 | @supports (-webkit-touch-callout: none) and (overflow: -webkit-paged-x) { 78 | 79 | textarea:focus, 80 | input[type="input"]:focus, 81 | input[type="password"]:focus, 82 | input[type="number"]:focus { 83 | font-size: 16px !important; 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /server/utils/knowledgeBase.ts: -------------------------------------------------------------------------------- 1 | import type { KnowledgeBase } from '@prisma/client' 2 | import prisma from '@/server/utils/prisma' 3 | 4 | /** 5 | * Server-side utility to check if knowledge base feature is enabled 6 | */ 7 | export function isKnowledgeBaseEnabled(): boolean { 8 | const config = useRuntimeConfig() 9 | return config.knowledgeBaseEnabled 10 | } 11 | 12 | /** 13 | * Require a knowledge base to exist and return it 14 | */ 15 | export async function requireKnowledgeBase(id?: string): Promise { 16 | if (!id) { 17 | throw createError({ 18 | statusCode: 400, 19 | statusMessage: 'Knowledge base ID is required' 20 | }) 21 | } 22 | 23 | const knowledgeBase = await prisma.knowledgeBase.findUnique({ 24 | where: { id: parseInt(id) } 25 | }) 26 | 27 | if (!knowledgeBase) { 28 | throw createError({ 29 | statusCode: 404, 30 | statusMessage: 'Knowledge base not found' 31 | }) 32 | } 33 | 34 | return knowledgeBase 35 | } 36 | 37 | /** 38 | * Require the current user to be the owner of the knowledge base 39 | */ 40 | export function requireKnowledgeBaseOwner(event: any, knowledgeBase: KnowledgeBase): void { 41 | const currentUser = event.context.user 42 | 43 | // If knowledge base has no owner (user_id is null), it's accessible to all 44 | if (knowledgeBase.user_id === null) { 45 | return 46 | } 47 | 48 | // If user is not authenticated 49 | if (!currentUser) { 50 | throw createError({ 51 | statusCode: 401, 52 | statusMessage: 'Authentication required' 53 | }) 54 | } 55 | 56 | // If user is not the owner 57 | if (knowledgeBase.user_id !== currentUser.id) { 58 | throw createError({ 59 | statusCode: 403, 60 | statusMessage: 'Access denied: You are not the owner of this knowledge base' 61 | }) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /server/utils/ollama.ts: -------------------------------------------------------------------------------- 1 | import { Ollama } from 'ollama' 2 | import type { H3Event, EventHandlerRequest } from 'h3' 3 | import { FetchWithAuth } from '@/server/utils' 4 | 5 | let ollamaConfig: Array = [] 6 | let ollama: Ollama | null = null 7 | 8 | function createOllama(endpoint: string, username: string | null, password: string | null) { 9 | const isConfigChanged = ollamaConfig[0] !== endpoint || ollamaConfig[1] !== username || ollamaConfig[2] !== password 10 | if (ollama && !isConfigChanged) { 11 | return ollama 12 | } 13 | 14 | if (isConfigChanged) { 15 | ollamaConfig = [endpoint, username, password] 16 | } 17 | 18 | console.log("Ollama: ", { host: endpoint, username, password }) 19 | return new Ollama({ host: endpoint, fetch: FetchWithAuth.bind({ username, password }) }) 20 | } 21 | 22 | async function pingOllama(endpoint: string) { 23 | const res = await $fetch.raw(endpoint, { ignoreResponseError: true }).catch(() => null) 24 | if (res?.status !== 200) { 25 | const errMsg = [ 26 | `ChatOllama is unable to establish a connection with ${endpoint}, please check:`, 27 | ' 1. Is Ollama server running ? (run `ollama serve` in terminal to start the server)', 28 | ` 2. Can the server where ChatOllama is located connect to \`${endpoint}\` ?` 29 | ].join('\n') 30 | 31 | console.error(errMsg) 32 | return errMsg 33 | } 34 | return null 35 | } 36 | 37 | export async function getOllama(event: H3Event, interceptResponse = false) { 38 | const { endpoint, username, password } = event.context.keys.ollama 39 | const result = await pingOllama(endpoint) 40 | if (result !== null) { 41 | if (interceptResponse) 42 | event.respondWith(new Response(result, { status: 500 })) 43 | return null 44 | } 45 | 46 | return createOllama(endpoint, username, password) 47 | } 48 | -------------------------------------------------------------------------------- /docs/guide/README.md: -------------------------------------------------------------------------------- 1 | # Developer Guides 2 | 3 | Welcome to the ChatOllama developer documentation. This section contains comprehensive guides for developers working with the codebase. 4 | 5 | ## 📚 Available Guides 6 | 7 | ### [Session Title Generation](./session-title-generation.md) 8 | Complete developer guide for the automatic session title generation system. 9 | 10 | **Topics covered:** 11 | - Architecture overview 12 | - Integration patterns 13 | - API reference 14 | - Advanced usage 15 | - Testing strategies 16 | - Troubleshooting 17 | 18 | ### [Session Title Generation - Quick Reference](./session-title-quick-reference.md) 19 | Quick reference card for common title generation patterns and configurations. 20 | 21 | **What you'll find:** 22 | - Copy-paste code examples 23 | - Common use cases 24 | - Configuration options 25 | - Debug checklist 26 | - API endpoints 27 | 28 | ## 🚀 Getting Started 29 | 30 | If you're new to the codebase: 31 | 32 | 1. **Read the main guide** for comprehensive understanding 33 | 2. **Use the quick reference** for daily development 34 | 3. **Check existing components** for implementation examples 35 | 4. **Run tests** to ensure your changes work correctly 36 | 37 | ## 🤝 Contributing 38 | 39 | When adding new features or guides: 40 | 41 | 1. **Follow existing patterns** - Check how similar features are implemented 42 | 2. **Update documentation** - Keep guides current with code changes 43 | 3. **Add tests** - Ensure reliability and prevent regressions 44 | 4. **Consider modularity** - Design for reuse and maintainability 45 | 46 | ## 💡 Need Help? 47 | 48 | - Check the **troubleshooting sections** in individual guides 49 | - Review **existing component implementations** 50 | - Look at **test files** for usage examples 51 | - Check the **browser console** for runtime errors 52 | 53 | --- 54 | 55 | *Last updated: January 2025* -------------------------------------------------------------------------------- /components/ChatMessageActionMore.vue: -------------------------------------------------------------------------------- 1 | 61 | 62 | 71 | -------------------------------------------------------------------------------- /composables/useMenus.ts: -------------------------------------------------------------------------------- 1 | 2 | export const DEFAULT_PAGE_LINK = '/welcome' 3 | 4 | export function useMenus() { 5 | const { t } = useI18n() 6 | const features = useFeatures() 7 | 8 | const isKnowledgeBaseEnabled = computed(() => features.knowledgeBaseEnabled) 9 | const isRealtimeChatEnabled = computed(() => features.realtimeChatEnabled) 10 | const isModelsManagementEnabled = computed(() => features.modelsManagementEnabled) 11 | 12 | return computed(() => { 13 | const menus = [ 14 | { label: t('menu.home'), icon: 'i-heroicons-home', to: '/welcome' }, 15 | ] 16 | 17 | // Only add models menu if feature is enabled 18 | if (isModelsManagementEnabled.value) { 19 | menus.push({ label: t('menu.models'), icon: 'i-heroicons-rectangle-stack', to: '/models' }) 20 | } 21 | 22 | // Instructions menu is always enabled 23 | menus.push({ label: t('menu.instructions'), icon: 'i-iconoir-terminal', to: '/instructions' }) 24 | 25 | // Only add knowledge base menu if feature is enabled 26 | if (isKnowledgeBaseEnabled.value) { 27 | menus.push({ label: t('menu.knowledgeBases'), icon: 'i-heroicons-book-open', to: '/knowledgebases' }) 28 | } 29 | 30 | menus.push({ label: t('menu.chat'), icon: 'i-iconoir-chat-lines', to: '/chat' }) 31 | 32 | // Only add realtime menu if feature is enabled 33 | if (isRealtimeChatEnabled.value) { 34 | menus.push({ label: t('menu.realtime'), icon: 'i-iconoir-microphone', to: '/realtime' }) 35 | } 36 | 37 | menus.push({ label: t('menu.settings'), icon: 'i-heroicons-cog-6-tooth', to: '/settings' }) 38 | menus.push({ label: t('menu.blog'), icon: 'i-heroicons-document-text', to: 'https://blog.chatollama.cloud', external: true }) 39 | menus.push({ label: 'GitHub', icon: 'i-mdi-github', to: 'https://github.com/sugarforever/chat-ollama', external: true }) 40 | 41 | return menus 42 | }) 43 | } 44 | -------------------------------------------------------------------------------- /composables/useOpenAIModels.ts: -------------------------------------------------------------------------------- 1 | import { ref } from 'vue' 2 | import { updateOpenAIModels } from '~/config/models' 3 | 4 | const isLoadingModels = ref(false) 5 | const lastLoadTime = ref(0) 6 | const CACHE_DURATION = 1000 * 60 * 60 // 1 hour cache 7 | 8 | export function useOpenAIModels() { 9 | const { t } = useI18n() 10 | const toast = useToast() 11 | 12 | async function loadOpenAIModels(apiKey: string, forceRefresh = false, silent = true) { 13 | // Return early if we're already loading 14 | if (isLoadingModels.value) return 15 | 16 | // Check cache unless force refresh is requested 17 | const now = Date.now() 18 | if (!forceRefresh && (now - lastLoadTime.value) < CACHE_DURATION) { 19 | return 20 | } 21 | 22 | try { 23 | isLoadingModels.value = true 24 | const response = await fetch('https://api.openai.com/v1/models', { 25 | headers: { 26 | 'Authorization': `Bearer ${apiKey}`, 27 | } 28 | }) 29 | 30 | if (!response.ok) { 31 | throw new Error(`Failed to load OpenAI models: ${response.statusText}`) 32 | } 33 | 34 | const data = await response.json() 35 | const models = data.data 36 | .sort((a: any, b: any) => a.id.localeCompare(b.id)) 37 | .map((model: any) => model.id) 38 | 39 | updateOpenAIModels(models) 40 | lastLoadTime.value = now 41 | if (!silent) { 42 | toast.add({ title: t('settings.modelsLoaded'), type: 'success' }) 43 | } 44 | } catch (error: any) { 45 | console.error('Error loading OpenAI models:', error) 46 | if (!silent) { 47 | toast.add({ 48 | title: t('settings.failedToLoadModels'), 49 | description: error.message, 50 | type: 'error' 51 | }) 52 | } 53 | throw error 54 | } finally { 55 | isLoadingModels.value = false 56 | } 57 | } 58 | 59 | return { 60 | isLoadingModels, 61 | loadOpenAIModels 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /server/models/openai/tests/chat_models-vision.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { HumanMessage } from "@langchain/core/messages"; 3 | import * as fs from "node:fs/promises"; 4 | import { fileURLToPath } from "node:url"; 5 | import * as path from "node:path"; 6 | import { ChatOpenAI } from "../chat_models.js"; 7 | 8 | test("Test ChatOpenAI with a file", async () => { 9 | const __filename = fileURLToPath(import.meta.url); 10 | const __dirname = path.dirname(__filename); 11 | const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg")); 12 | const chat = new ChatOpenAI({ 13 | model: "gpt-4o-mini", 14 | maxTokens: 1024, 15 | }); 16 | const message = new HumanMessage({ 17 | content: [ 18 | { 19 | type: "text", 20 | text: "What's in this image?", 21 | }, 22 | { 23 | type: "image_url", 24 | image_url: { 25 | url: `data:image/jpeg;base64,${imageData.toString("base64")}`, 26 | }, 27 | }, 28 | ], 29 | }); 30 | // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment 31 | // @ts-expect-error unused var 32 | const res = await chat.invoke([message]); 33 | // console.log({ res }); 34 | }); 35 | 36 | test("Test ChatOpenAI with a URL", async () => { 37 | const chat = new ChatOpenAI({ 38 | model: "gpt-4o-mini", 39 | maxTokens: 1024, 40 | }); 41 | const message = new HumanMessage({ 42 | content: [ 43 | { 44 | type: "text", 45 | text: "What's in this image?", 46 | }, 47 | { 48 | type: "image_url", 49 | image_url: { 50 | url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", 51 | }, 52 | }, 53 | ], 54 | }); 55 | // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment 56 | // @ts-expect-error unused var 57 | const res = await chat.invoke([message]); 58 | // console.log({ res }); 59 | }); 60 | -------------------------------------------------------------------------------- /composables/useInstructionsCache.ts: -------------------------------------------------------------------------------- 1 | import { loadOllamaInstructions } from '~/utils/settings' 2 | import type { Instruction } from '@prisma/client' 3 | 4 | interface InstructionsCache { 5 | instructions: Instruction[] 6 | lastFetched: number 7 | loading: boolean 8 | } 9 | 10 | const CACHE_TTL = 5 * 60 * 1000 // 5 minutes 11 | const cache = ref({ 12 | instructions: [], 13 | lastFetched: 0, 14 | loading: false 15 | }) 16 | 17 | export function useInstructionsCache() { 18 | const isCacheValid = computed(() => { 19 | return cache.value.lastFetched > 0 && 20 | (Date.now() - cache.value.lastFetched) < CACHE_TTL 21 | }) 22 | 23 | const instructions = computed(() => cache.value.instructions) 24 | const isLoading = computed(() => cache.value.loading) 25 | 26 | async function preloadInstructions(forceRefresh: boolean = false) { 27 | if (cache.value.loading) return cache.value.instructions 28 | if (!forceRefresh && isCacheValid.value) return cache.value.instructions 29 | 30 | cache.value.loading = true 31 | try { 32 | const fetchedInstructions = await loadOllamaInstructions() 33 | cache.value.instructions = fetchedInstructions 34 | cache.value.lastFetched = Date.now() 35 | return fetchedInstructions 36 | } catch (error) { 37 | console.error('Failed to preload instructions:', error) 38 | return [] 39 | } finally { 40 | cache.value.loading = false 41 | } 42 | } 43 | 44 | async function getInstructions(forceRefresh: boolean = false) { 45 | if (!forceRefresh && isCacheValid.value) { 46 | return cache.value.instructions 47 | } 48 | return await preloadInstructions(forceRefresh) 49 | } 50 | 51 | function clearCache() { 52 | cache.value = { 53 | instructions: [], 54 | lastFetched: 0, 55 | loading: false 56 | } 57 | } 58 | 59 | return { 60 | instructions, 61 | isLoading, 62 | preloadInstructions, 63 | getInstructions, 64 | clearCache, 65 | isCacheValid 66 | } 67 | } -------------------------------------------------------------------------------- /components/FileButton.vue: -------------------------------------------------------------------------------- 1 | 48 | 49 | 65 | -------------------------------------------------------------------------------- /composables/useCreateChatSession.ts: -------------------------------------------------------------------------------- 1 | interface ChatSessionBaseData extends Omit { } 2 | 3 | export function useCreateChatSession() { 4 | const { chatModels, loadModels } = useModels({ immediate: false }) 5 | const { t } = useI18n() 6 | const toast = useToast() 7 | const { preloadInstructions } = useInstructionsCache() 8 | 9 | return async function createChatSession(params?: Partial>) { 10 | const baseData: ChatSessionBaseData = { 11 | createTime: Date.now(), 12 | updateTime: Date.now(), 13 | title: params?.title || '', 14 | models: params?.models || chatDefaultSettings.value.models, 15 | instructionId: params?.instructionId || 0, 16 | knowledgeBaseId: params?.knowledgeBaseId || 0, 17 | attachedMessagesCount: chatDefaultSettings.value.attachedMessagesCount, 18 | enableToolUsage: params?.enableToolUsage ?? chatDefaultSettings.value.enableToolUsage, 19 | isTop: 0, 20 | } 21 | 22 | // set default model 23 | try { 24 | await loadModels() 25 | if (chatModels.value.length === 0) { 26 | toast.add({ title: t('chat.noModelFound'), description: t('chat.noModelFoundDesc'), color: 'red' }) 27 | baseData.models = undefined 28 | } else { 29 | const availableModels = baseData.models?.filter(m => chatModels.value.some(cm => cm.value === m)) 30 | baseData.models = availableModels 31 | } 32 | } catch (error) { 33 | console.warn('Failed to load models during session creation:', error) 34 | // Continue with session creation even if models fail to load 35 | baseData.models = baseData.models || [] 36 | } 37 | 38 | // Preload instructions for faster settings panel opening 39 | preloadInstructions().catch(error => { 40 | console.warn('Failed to preload instructions during chat creation:', error) 41 | }) 42 | 43 | const id = await clientDB.chatSessions.add(baseData) 44 | return { ...baseData, id, count: 0 } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /config/models.ts: -------------------------------------------------------------------------------- 1 | export const MODEL_FAMILIES = { 2 | openai: 'OpenAI', 3 | azureOpenai: 'Azure OpenAI', 4 | anthropic: 'Anthropic', 5 | moonshot: 'Moonshot', 6 | gemini: 'Gemini', 7 | groq: 'Groq', 8 | mistral: 'Mistral' 9 | } 10 | 11 | // OpenAI models will be loaded dynamically 12 | export let OPENAI_GPT_MODELS: string[] = [] 13 | 14 | // Function to update OpenAI models 15 | export function updateOpenAIModels(models: string[]) { 16 | OPENAI_GPT_MODELS = models 17 | } 18 | 19 | export const AZURE_OPENAI_GPT_MODELS = [ 20 | "gpt-3.5-turbo", 21 | "gpt-35-turbo-16k", 22 | "gpt-35-turbo-instruct", 23 | "gpt-4", 24 | "gpt-4-32k" 25 | ] 26 | 27 | export const OPENAI_EMBEDDING_MODELS = [ 28 | "text-embedding-3-large", 29 | "text-embedding-3-small", 30 | "text-embedding-ada-002" 31 | ] 32 | 33 | export const GEMINI_EMBEDDING_MODELS = [ 34 | "text-embedding-004" 35 | ] 36 | 37 | export const MISTRAL_EMBEDDING_MODELS = [ 38 | "mistral-embed" 39 | ] 40 | 41 | export const ANTHROPIC_MODELS = [ 42 | "claude-3-5-sonnet-latest", 43 | "claude-3-5-haiku-latest", 44 | "claude-3-5-sonnet-20241022", 45 | "claude-3-5-haiku-20241022", 46 | "claude-3-haiku-20240307", 47 | "claude-3-opus-20240229", 48 | "claude-3-sonnet-20240229", 49 | "claude-2.1", 50 | "claude-2.0", 51 | "claude-instant-1.2" 52 | ] 53 | 54 | export const MOONSHOT_MODELS = [ 55 | "moonshot-v1-8k", 56 | "moonshot-v1-32k", 57 | "moonshot-v1-128k" 58 | ] 59 | 60 | export const GEMINI_MODELS = [ 61 | "gemini-2.0-flash", 62 | "gemini-2.0-flash-lite-preview-02-05", 63 | "gemini-2.0-pro-exp", 64 | "gemini-2.0-flash-thinking-exp-01-21", 65 | "gemini-2.0-flash-thinking-exp-1219", 66 | "gemini-2.0-flash-exp", 67 | "gemini-1.5-flash", 68 | "gemini-1.5-flash-8b", 69 | "gemini-1.5-pro", 70 | "gemini-1.0-pro", 71 | ] 72 | 73 | export const GROQ_MODELS = [ 74 | "llama-3.1-405b-reasoning", 75 | "llama-3.1-70b-versatile", 76 | "llama-3.1-8b-instant", 77 | "llama3-8b-8192", 78 | "llama3-70b-8192", 79 | "llama2-70b-4096", 80 | "mixtral-8x7b-32768", 81 | "gemma-7b-it", 82 | "gemma2-9b-it", 83 | ] -------------------------------------------------------------------------------- /assets/svg/deepseek.svg: -------------------------------------------------------------------------------- 1 | DeepSeek -------------------------------------------------------------------------------- /server/utils/http.ts: -------------------------------------------------------------------------------- 1 | import { MultiPartData, type H3Event } from 'h3' 2 | import type { KnowledgeBaseFormData, PageParser } from '@/server/types' 3 | 4 | export const parseKnowledgeBaseFormRequest = async (event: H3Event): Promise => { 5 | const items = await readMultipartFormData(event) 6 | 7 | const decoder = new TextDecoder("utf-8") 8 | const uploadedFiles: MultiPartData[] = [] 9 | const _knowledgeBaseId = event?.context?.params?.id 10 | 11 | const formData: KnowledgeBaseFormData = { 12 | name: '', 13 | description: '', 14 | embedding: '', 15 | isPublic: true, 16 | knowledgeBaseId: _knowledgeBaseId ? parseInt(_knowledgeBaseId) : null, 17 | uploadedFiles, 18 | urls: [], 19 | pageParser: 'default', 20 | maxDepth: 0, 21 | excludeGlobs: [], 22 | } 23 | 24 | items?.forEach((item) => { 25 | const key = (item.name || '') as keyof KnowledgeBaseFormData 26 | const decodedData = decoder.decode(item.data) 27 | 28 | if (key.startsWith("file_")) { 29 | formData.uploadedFiles.push(item) 30 | } 31 | 32 | switch (key) { 33 | case 'isPublic': 34 | formData.isPublic = decodedData === 'true' 35 | break 36 | 37 | case 'urls': 38 | formData.urls.push(decodedData) 39 | break 40 | 41 | case 'name': 42 | formData.name = decodedData 43 | break 44 | 45 | case 'description': 46 | formData.description = decodedData 47 | break 48 | 49 | case 'embedding': 50 | formData.embedding = decodedData 51 | break 52 | 53 | case 'pageParser': 54 | formData.pageParser = decodedData as PageParser 55 | break 56 | 57 | case 'maxDepth': 58 | formData.maxDepth = parseInt(decodedData) 59 | break 60 | 61 | case 'excludeGlobs': 62 | formData.excludeGlobs = decodedData.split(/[\n]+/g).filter(Boolean).map((glob) => glob.trim()) 63 | break 64 | 65 | case 'chunking': 66 | formData.chunking = JSON.parse(decodedData) 67 | break 68 | } 69 | }) 70 | 71 | return formData 72 | } 73 | -------------------------------------------------------------------------------- /components/MessageActionBar.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | -------------------------------------------------------------------------------- /server/models/openai/tools/tests/custom.int.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect, jest } from "@jest/globals"; 2 | import { 3 | AIMessage, 4 | BaseMessage, 5 | HumanMessage, 6 | ToolMessage, 7 | } from "@langchain/core/messages"; 8 | import { ToolCall } from "@langchain/core/messages/tool"; 9 | import { customTool } from "../custom.js"; 10 | import { ChatOpenAI } from "../../chat_models.js"; 11 | 12 | describe("customTool", () => { 13 | test("invoking a custom tool will keep tool metadata", async () => { 14 | const tool = customTool(async (input) => input, { 15 | name: "text_tool", 16 | description: "A tool that returns the input", 17 | }); 18 | const toolCall: ToolCall = { 19 | id: "123", 20 | type: "tool_call", 21 | name: "text_tool", 22 | args: { input: "Hello" }, 23 | }; 24 | const result = await tool.invoke(toolCall); 25 | expect(result).toBeInstanceOf(ToolMessage); 26 | expect(result.metadata).toEqual({ 27 | customTool: { 28 | name: "text_tool", 29 | description: "A tool that returns the input", 30 | }, 31 | }); 32 | }); 33 | 34 | test("responding with a tool message from a custom tool will be used correctly", async () => { 35 | const fn = jest.fn(async (input: string) => input); 36 | const tool = customTool(fn, { 37 | name: "text_tool", 38 | description: "A tool that returns the input", 39 | }); 40 | const model = new ChatOpenAI({ 41 | model: "gpt-5", 42 | reasoning: { effort: "minimal" }, 43 | }); 44 | const modelWithTools = model.bindTools([tool]); 45 | 46 | const history: BaseMessage[] = [ 47 | new HumanMessage("Invoke the tool with 'Hello'"), 48 | ]; 49 | 50 | const result = await modelWithTools.invoke(history); 51 | expect(result).toBeInstanceOf(AIMessage); 52 | history.push(result); 53 | 54 | const toolOutput = await tool.invoke(result.tool_calls![0]); 55 | expect(toolOutput).toBeInstanceOf(ToolMessage); 56 | history.push(toolOutput); 57 | 58 | const result2 = await modelWithTools.invoke(history); 59 | expect(result2).toBeInstanceOf(AIMessage); 60 | }); 61 | }); 62 | -------------------------------------------------------------------------------- /server/api/knowledgebases/[id].delete.ts: -------------------------------------------------------------------------------- 1 | import { ChromaClient, ChromaClientParams, DeleteCollectionParams } from 'chromadb' 2 | import { type KnowledgeBase } from "@prisma/client" 3 | import prisma from "@/server/utils/prisma" 4 | import { RedisDocstore } from '~/server/docstore/redis' 5 | import { requireKnowledgeBase, requireKnowledgeBaseOwner } from '~/server/utils/knowledgeBase' 6 | 7 | const deleteKnowledgeBase = async ( 8 | id?: string 9 | ): Promise => { 10 | try { 11 | let deletedKnowledgeBase = null 12 | if (id) { 13 | // Delete knowledge base from database 14 | deletedKnowledgeBase = await prisma.knowledgeBase.delete({ 15 | where: { 16 | id: parseInt(id), 17 | }, 18 | }) 19 | 20 | // Delete vectore storage 21 | const collectionName = `collection_${id}` 22 | 23 | console.log("Deleting Chroma collection: ", collectionName) 24 | const dbConfig: ChromaClientParams = { 25 | path: process.env.CHROMADB_URL 26 | } 27 | const chromaClient = new ChromaClient(dbConfig) 28 | await chromaClient.deleteCollection({ name: collectionName }) 29 | 30 | // Delete documents storage 31 | if (process.env.REDIS_HOST) { 32 | console.log("Deleting documents from docstore namespace: ", collectionName) 33 | await new RedisDocstore(collectionName).deleteAll() 34 | } 35 | } 36 | 37 | return deletedKnowledgeBase 38 | } catch (error) { 39 | console.error(`Error deleting knowledge base with id ${id}:`, error) 40 | return null 41 | } 42 | } 43 | 44 | export default defineEventHandler(async (event) => { 45 | // Check if knowledge base feature is enabled 46 | if (!isKnowledgeBaseEnabled()) { 47 | setResponseStatus(event, 403, 'Knowledge base feature is disabled') 48 | return { error: 'Knowledge base feature is disabled' } 49 | } 50 | 51 | const id = event?.context?.params?.id 52 | 53 | const knowledgeBase = await requireKnowledgeBase(id) 54 | requireKnowledgeBaseOwner(event, knowledgeBase) 55 | 56 | const deletedKnowledgeBase = await deleteKnowledgeBase(id) 57 | return { deletedKnowledgeBase } 58 | }) 59 | -------------------------------------------------------------------------------- /utils/artifactRenderers.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Utility functions for rendering different artifact types to HTML 3 | */ 4 | 5 | export function renderHTML(html: string): string { 6 | // Wrap HTML in a complete document structure if it's not already 7 | if (!html.includes('') && !html.includes(' 9 | 10 | 11 | 12 | 13 | Artifact 14 | 17 | 18 | 19 | ${html} 20 | 21 | ` 22 | return wrappedHTML 23 | } 24 | return html 25 | } 26 | 27 | export function renderJavaScript(js: string): string { 28 | // Wrap JavaScript in an HTML document with script tag 29 | const jsHTML = ` 30 | 31 | 32 | 33 | 34 | JavaScript App 35 | 38 | 39 | 40 |
41 | 44 | 45 | ` 46 | return jsHTML 47 | } 48 | 49 | export function renderCSS(css: string): string { 50 | // Create a demo HTML with the CSS applied 51 | const cssHTML = ` 52 | 53 | 54 | 55 | 56 | CSS Demo 57 | 61 | 62 | 63 |
64 |

CSS Demo

65 |

This is a preview of your CSS styles.

66 |
67 | 68 | 69 |
Card element
70 |
71 |
72 | 73 | ` 74 | return cssHTML 75 | } -------------------------------------------------------------------------------- /components/ModelsSelectMenu.vue: -------------------------------------------------------------------------------- 1 | 53 | 54 | 73 | -------------------------------------------------------------------------------- /docker-compose_arm.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: postgres:16-alpine 4 | environment: 5 | - POSTGRES_DB=chatollama 6 | - POSTGRES_USER=chatollama 7 | - POSTGRES_PASSWORD=chatollama_password 8 | ports: 9 | - "5432:5432" 10 | restart: always 11 | volumes: 12 | - postgres_data:/var/lib/postgresql/data 13 | healthcheck: 14 | test: ["CMD-SHELL", "pg_isready -U chatollama -d chatollama"] 15 | interval: 10s 16 | timeout: 5s 17 | retries: 5 18 | start_period: 30s 19 | 20 | chromadb: 21 | image: chromadb/chroma 22 | ports: 23 | - "8000:8000" 24 | restart: always 25 | volumes: 26 | - chromadb_data:/chroma/.chroma/index 27 | 28 | chatollama: 29 | environment: 30 | # Access Control Settings 31 | - ACL_ENABLED=false # Set to 'true' to enable admin-only MCP management 32 | # Feature Flags - Use NUXT_ prefix for runtime configuration 33 | - NUXT_KNOWLEDGE_BASE_ENABLED=true 34 | - NUXT_REALTIME_CHAT_ENABLED=false 35 | - NUXT_MODELS_MANAGEMENT_ENABLED=true 36 | - NUXT_MCP_ENABLED=true 37 | # Database and Service URLs 38 | - CHROMADB_URL=http://chromadb:8000 39 | - DATABASE_URL=postgresql://chatollama:chatollama_password@postgres:5432/chatollama 40 | - DIRECT_URL=postgresql://chatollama:chatollama_password@postgres:5432/chatollama 41 | - REDIS_HOST=redis 42 | # Migration settings 43 | - SKIP_MIGRATION=false # Set to 'true' to skip automatic migration 44 | - MIGRATION_TIMEOUT=300 # Migration timeout in seconds 45 | image: chatollama:latest 46 | pull_policy: always 47 | extra_hosts: 48 | - "host.docker.internal:host-gateway" 49 | ports: 50 | - "3000:3000" 51 | restart: always 52 | depends_on: 53 | postgres: 54 | condition: service_healthy 55 | chromadb: 56 | condition: service_started 57 | redis: 58 | condition: service_started 59 | volumes: 60 | - ~/.chatollama:/app/data 61 | 62 | redis: 63 | image: redis:latest 64 | restart: always 65 | volumes: 66 | - redis_data:/data 67 | 68 | volumes: 69 | postgres_data: 70 | chromadb_data: 71 | redis_data: 72 | 73 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # System 2 | # Access Control Lists (ACL) - Controls whether admin privileges are required for MCP server management 3 | # When ACL_ENABLED=false: All users can manage MCP servers (backward compatible) 4 | # When ACL_ENABLED=true: Only admin/superadmin users can create, modify, or delete MCP servers 5 | # For production deployments, set this to 'true' for security 6 | # For development and personal Docker deployments, 'false' provides better UX 7 | ACL_ENABLED=false 8 | 9 | DISABLE_VERCEL_ANALYTICS=true 10 | DATABASE_URL= 11 | DIRECT_URL= 12 | 13 | # Authentication 14 | SECRET=changeit 15 | AUTH_SECRET=your-long-random-secret-key-here-change-this-in-production 16 | 17 | # Google OAuth Configuration 18 | GOOGLE_CLIENT_ID=your-google-client-id.apps.googleusercontent.com 19 | GOOGLE_CLIENT_SECRET=your-google-client-secret 20 | 21 | # Optional: Custom base URL (defaults to http://localhost:3000) 22 | APP_BASE_URL=http://localhost:3000 23 | 24 | # Chat-ollama server's port 25 | PORT=3000 26 | HOST= 27 | 28 | # ChatOllama Server Side Settings 29 | REDIS_HOST= 30 | MOONSHOT_API_KEY= 31 | ANTHROPIC_API_KEY= 32 | TAVILY_API_KEY= 33 | 34 | # Supported values: chroma, milvus 35 | VECTOR_STORE=chroma 36 | CHROMADB_URL=http://localhost:8000 37 | MILVUS_URL=http://localhost:19530 38 | 39 | # Cohere API Key - Reranking 40 | COHERE_API_KEY= 41 | COHERE_MODEL= 42 | COHERE_BASE_URL= 43 | 44 | # Super admin name 45 | SUPER_ADMIN_NAME= 46 | 47 | # API model proxy 48 | NUXT_PUBLIC_MODEL_PROXY_ENABLED=false 49 | # HTTP / HTTPS / SOCKS4 / SOCKS5 protocols are supported, e.g. 50 | # http://127.0.0.1:1080 51 | # socks5://127.0.0.1:1080 52 | # http://username:password@127.0.0.1:1080 53 | # socks5://username:password@127.0.0.1:1080 54 | NUXT_MODEL_PROXY_URL= 55 | 56 | # Chat 57 | NUXT_PUBLIC_CHAT_MAX_ATTACHED_MESSAGES=50 58 | 59 | # Knowledge Base Feature Flag (server-side only) 60 | KNOWLEDGE_BASE_ENABLED=true 61 | 62 | # Realtime Chat Feature Flag (server-side only) 63 | REALTIME_CHAT_ENABLED=true 64 | 65 | # Models Management Feature Flag (server-side only) 66 | MODELS_MANAGEMENT_ENABLED=true 67 | 68 | # MCP (Model Context Protocol) Feature Flag (server-side only) 69 | MCP_ENABLED=true 70 | 71 | # Chrome Origin Trial Token for Prompt API 72 | CHROME_TRIAL_TOKEN_PROMPT_API= 73 | -------------------------------------------------------------------------------- /server/retriever/index.ts: -------------------------------------------------------------------------------- 1 | import { RecursiveCharacterTextSplitter } from "langchain/text_splitter" 2 | import { Embeddings } from "@langchain/core/embeddings" 3 | import { Document } from "@langchain/core/documents" 4 | import { ParentDocumentRetriever } from "langchain/retrievers/parent_document" 5 | import { RedisDocstore } from '@/server/docstore/redis' 6 | import { createVectorStore } from '@/server/utils/vectorstores' 7 | import { Chroma } from '@langchain/community/vectorstores/chroma' 8 | 9 | export const createRetriever = async ( 10 | embeddings: Embeddings, 11 | collectionName: string, 12 | documents: Document[] | null = null, 13 | parentChunkSize: number = 3000, 14 | parentChunkOverlap: number = 200, 15 | childChunkSize: number = 1000, 16 | childChunkOverlap: number = 50, 17 | parentK: number = 10, 18 | childK: number = 20 19 | ) => { 20 | const vectorStore = createVectorStore(embeddings, collectionName) 21 | if (process.env.VECTOR_STORE === 'chroma') { 22 | await (vectorStore as Chroma).ensureCollection() 23 | } 24 | 25 | let retriever = null 26 | 27 | if (process.env.REDIS_HOST) { 28 | console.log("Initializing ParentDocumentRetriever with RedisDocstore") 29 | retriever = new ParentDocumentRetriever({ 30 | vectorstore: vectorStore, 31 | docstore: new RedisDocstore(collectionName), 32 | parentSplitter: new RecursiveCharacterTextSplitter({ 33 | chunkOverlap: parentChunkOverlap, 34 | chunkSize: parentChunkSize, 35 | }), 36 | childSplitter: new RecursiveCharacterTextSplitter({ 37 | chunkOverlap: childChunkOverlap, 38 | chunkSize: childChunkSize, 39 | }), 40 | childK: childK, 41 | parentK: parentK, 42 | }) 43 | 44 | if (documents !== null) { 45 | await retriever.addDocuments(documents) 46 | } 47 | 48 | return retriever 49 | } else { 50 | console.log("Initializing vector store retriever") 51 | 52 | if (documents !== null) { 53 | const splitter = new RecursiveCharacterTextSplitter({ 54 | chunkOverlap: childChunkOverlap, 55 | chunkSize: childChunkSize, 56 | }) 57 | const splits = await splitter.splitDocuments(documents) 58 | await vectorStore.addDocuments(splits) 59 | } 60 | 61 | return vectorStore.asRetriever(4) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /components/ComponentPreview.vue: -------------------------------------------------------------------------------- 1 | 57 | 58 | 84 | -------------------------------------------------------------------------------- /composables/useFeatures.ts: -------------------------------------------------------------------------------- 1 | interface FeatureFlags { 2 | knowledgeBaseEnabled: boolean 3 | realtimeChatEnabled: boolean 4 | modelsManagementEnabled: boolean 5 | mcpEnabled: boolean 6 | } 7 | 8 | // Global reactive state to ensure consistent values across hydration 9 | const _featuresState = ref({ 10 | knowledgeBaseEnabled: false, 11 | realtimeChatEnabled: false, 12 | modelsManagementEnabled: false, 13 | mcpEnabled: false 14 | }) 15 | 16 | // Flag to track if features have been initialized 17 | const _featuresInitialized = ref(false) 18 | 19 | export function useFeatures(): FeatureFlags { 20 | // Initialize features if not already done 21 | if (!_featuresInitialized.value) { 22 | let features: FeatureFlags 23 | 24 | // Server-side: get from runtime config 25 | if (process.server) { 26 | const config = useRuntimeConfig() 27 | features = { 28 | knowledgeBaseEnabled: Boolean(config.knowledgeBaseEnabled), 29 | realtimeChatEnabled: Boolean(config.realtimeChatEnabled), 30 | modelsManagementEnabled: Boolean(config.modelsManagementEnabled), 31 | mcpEnabled: Boolean(config.mcpEnabled) 32 | } 33 | } else { 34 | // Client-side: get from SSR payload with proper fallback during hydration 35 | const nuxtApp = useNuxtApp() 36 | 37 | // Try to get features from payload first (set by server plugin) 38 | let payloadFeatures = nuxtApp.payload.features as FeatureFlags | undefined 39 | 40 | // If not available yet, try from ssrContext 41 | if (!payloadFeatures) { 42 | payloadFeatures = nuxtApp.ssrContext?.features as FeatureFlags | undefined 43 | } 44 | 45 | // Final fallback for hydration safety - ensure we always have valid FeatureFlags 46 | if (!payloadFeatures || 47 | typeof payloadFeatures.knowledgeBaseEnabled === 'undefined') { 48 | features = { 49 | knowledgeBaseEnabled: false, 50 | realtimeChatEnabled: false, 51 | modelsManagementEnabled: false, 52 | mcpEnabled: false 53 | } 54 | } else { 55 | features = payloadFeatures 56 | } 57 | } 58 | 59 | // Update the reactive state 60 | _featuresState.value = features 61 | _featuresInitialized.value = true 62 | } 63 | 64 | return _featuresState.value 65 | } 66 | -------------------------------------------------------------------------------- /composables/useKatexClient.ts: -------------------------------------------------------------------------------- 1 | import { onMounted } from 'vue' 2 | 3 | export function useKatexClient() { 4 | onMounted(() => { 5 | // Only run in client-side 6 | if (typeof window === 'undefined') return 7 | 8 | // Find all server-side rendered LaTeX blocks 9 | const ssrBlocks = document.querySelectorAll('.katex-block-ssr') 10 | 11 | if (ssrBlocks.length === 0) return 12 | 13 | // Function to render blocks once KaTeX is loaded 14 | const renderBlocks = (katex: any) => { 15 | ssrBlocks.forEach((block) => { 16 | try { 17 | const latex = decodeURIComponent(block.getAttribute('data-latex') || '') 18 | if (!latex) return 19 | 20 | // Create a new div to replace the SSR block 21 | const newBlock = document.createElement('div') 22 | newBlock.className = 'katex-block' 23 | 24 | // Render the LaTeX 25 | newBlock.innerHTML = katex.renderToString(latex, { 26 | throwOnError: false, 27 | errorColor: '#cc0000', 28 | displayMode: true 29 | }) 30 | 31 | // Replace the SSR block with the rendered block 32 | block.parentNode?.replaceChild(newBlock, block) 33 | } catch (error: any) { 34 | console.error('Error rendering LaTeX client-side:', error) 35 | // Show error message 36 | block.innerHTML = `
Error rendering LaTeX: ${error.message}
` 37 | block.className = 'katex-error-block' 38 | } 39 | }) 40 | } 41 | 42 | // Dynamically import KaTeX 43 | import('katex').then((katex) => { 44 | renderBlocks(katex.default || katex) 45 | }).catch((error: any) => { 46 | console.error('Failed to load KaTeX:', error) 47 | // Show error message on all blocks 48 | ssrBlocks.forEach((block) => { 49 | block.innerHTML = `
Failed to load KaTeX: ${error.message}
` 50 | block.className = 'katex-error-block' 51 | }) 52 | }) 53 | }) 54 | } 55 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: postgres:16-alpine 4 | environment: 5 | - POSTGRES_DB=chatollama 6 | - POSTGRES_USER=chatollama 7 | - POSTGRES_PASSWORD=chatollama_password 8 | ports: 9 | - "5432:5432" 10 | restart: always 11 | volumes: 12 | - postgres_data:/var/lib/postgresql/data 13 | healthcheck: 14 | test: ["CMD-SHELL", "pg_isready -U chatollama -d chatollama"] 15 | interval: 10s 16 | timeout: 5s 17 | retries: 5 18 | start_period: 30s 19 | 20 | chromadb: 21 | image: chromadb/chroma 22 | ports: 23 | - "8000:8000" 24 | restart: always 25 | volumes: 26 | - chromadb_volume:/chroma/chroma 27 | 28 | chatollama: 29 | environment: 30 | - ACL_ENABLED=false # Set to 'true' to enable admin-only MCP management 31 | - NUXT_KNOWLEDGE_BASE_ENABLED=true 32 | - NUXT_REALTIME_CHAT_ENABLED=false 33 | - NUXT_MODELS_MANAGEMENT_ENABLED=true 34 | - NUXT_MCP_ENABLED=true 35 | - CHROMADB_URL=http://chromadb:8000 36 | - DATABASE_URL=postgresql://chatollama:chatollama_password@postgres:5432/chatollama 37 | - DIRECT_URL=postgresql://chatollama:chatollama_password@postgres:5432/chatollama 38 | - REDIS_HOST=redis 39 | # Migration settings 40 | - SKIP_MIGRATION=false # Set to 'true' to skip automatic migration 41 | - MIGRATION_TIMEOUT=300 # Migration timeout in seconds 42 | - COHERE_API_KEY=xxxxx 43 | - COHERE_MODEL=ms-marco-MiniLM-L-6-v2 44 | - COHERE_BASE_URL=http://peanutshell:8000/v1 45 | image: 0001coder/chatollama:latest 46 | pull_policy: always 47 | #extra_hosts: 48 | # - "host.docker.internal:host-gateway" 49 | ports: 50 | - "3000:3000" 51 | restart: always 52 | depends_on: 53 | postgres: 54 | condition: service_healthy 55 | chromadb: 56 | condition: service_started 57 | redis: 58 | condition: service_started 59 | volumes: 60 | - ~/.chatollama:/app/data 61 | 62 | redis: 63 | image: redis:latest 64 | restart: always 65 | volumes: 66 | - redis_data:/data 67 | 68 | peanutshell: 69 | image: ghcr.io/sugarforever/peanut-shell:latest 70 | volumes: 71 | - hf_data:/root/.cache 72 | 73 | volumes: 74 | postgres_data: 75 | chromadb_volume: 76 | redis_data: 77 | hf_data: -------------------------------------------------------------------------------- /components/ToolCallDisplay.vue: -------------------------------------------------------------------------------- 1 | 20 | 21 | -------------------------------------------------------------------------------- /components/QuickChatButton.vue: -------------------------------------------------------------------------------- 1 | 54 | 55 | 77 | -------------------------------------------------------------------------------- /blogs/20250819-chatollama-deepagents-integration_zh.md: -------------------------------------------------------------------------------- 1 | # ChatOllama 集成 DeepAgents:为开源 AI 聊天带来深度研究能力 2 | 3 | 大家好!今天想和大家分享一个令人兴奋的更新 —— 我为 ChatOllama 集成了 DeepAgents,这让我们的开源 AI 聊天应用具备了强大的深度研究能力。 4 | 5 | ## 什么是 DeepAgents? 6 | 7 | 在开始之前,让我先介绍一下 DeepAgents。传统的 AI 智能体通常采用简单的"LLM + 工具调用"模式,虽然能完成基本任务,但在面对复杂、多步骤的研究工作时往往力不从心。这些"浅层"智能体缺乏规划能力,无法有效地分解和执行复杂任务。 8 | 9 | DeepAgents 的出现改变了这一现状。它借鉴了 Claude Code、Deep Research 等成功应用的设计理念,通过四个核心组件构建真正"深度"的智能体: 10 | 11 | - **🎯 规划工具**:帮助智能体制定和跟踪结构化计划 12 | - **🤖 子智能体**:专门处理特定任务,实现上下文隔离 13 | - **📁 文件系统**:提供持久化状态管理 14 | - **📝 精细提示**:基于成功案例优化的系统提示 15 | 16 | 这种架构让智能体能够像人类研究员一样工作:分解复杂问题、制定研究计划、调用专门工具、整理和分析信息,最终产出高质量的研究报告。 17 | 18 | ## 为什么选择集成到 ChatOllama? 19 | 20 | 作为一个专注于本地化 AI 体验的开源项目,ChatOllama 一直致力于为用户提供强大而易用的 AI 工具。DeepAgents 的加入让我们能够: 21 | 22 | ### 1. **提供专业级研究能力** 23 | 现在用户可以直接在 ChatOllama 中进行深度研究,智能体会自动: 24 | - 制定研究计划 25 | - 搜索相关信息 26 | - 分析和整合数据 27 | - 生成结构化报告 28 | 29 | ### 2. **无缝的 MCP 集成** 30 | DeepAgents 原生支持 MCP(模型上下文协议),这意味着集成过程异常顺畅。我们只需要: 31 | ```javascript 32 | // 简单的集成代码 33 | const agent = createDeepAgent({ 34 | tools: mcpTools, 35 | instructions: researchInstructions 36 | }) 37 | ``` 38 | 39 | ### 3. **保持开源精神** 40 | DeepAgents 本身就是开源的,这与 ChatOllama 的理念完美契合。用户可以完全控制自己的数据和研究过程。 41 | 42 | ## 技术实现亮点 43 | 44 | ### 智能的流式处理 45 | 我们实现了服务器端的智能内容处理,确保: 46 | - AI 响应内容在服务器端累积,避免客户端的复杂逻辑 47 | - 每个对话轮次使用唯一 UUID 分组,保持上下文清晰 48 | - 工具调用结果以可折叠的 UI 组件展示,用户体验更佳 49 | 50 | ### 工具调用可视化 51 | 当智能体使用工具时,用户可以清楚地看到: 52 | - 调用了哪个工具(搜索、浏览器、文件操作等) 53 | - 工具的执行结果 54 | - 可以展开查看详细信息 55 | 56 | ### 多语言支持 57 | 我们为新功能添加了完整的中英文支持,确保不同语言用户都能获得良好体验。 58 | 59 | ## 实际使用场景 60 | 61 | 想象一下这些使用场景: 62 | 63 | **学术研究**:询问"帮我研究一下量子计算在密码学中的应用",智能体会自动搜索最新论文、分析技术趋势、整理关键观点。 64 | 65 | **市场分析**:请求"分析一下 2024 年 AI 芯片市场的竞争格局",智能体会收集市场数据、分析竞争对手、生成详细报告。 66 | 67 | **技术调研**:提问"比较不同的容器编排方案",智能体会研究各种方案的优缺点、使用场景、最佳实践。 68 | 69 | ## 开发体验 70 | 71 | 得益于 DeepAgents 优秀的架构设计和 MCP 的标准化,整个集成过程非常顺畅: 72 | 73 | 1. **快速集成**:几行代码就能启用深度研究功能 74 | 2. **灵活配置**:可以根据需要调整智能体的指令和工具 75 | 3. **易于扩展**:通过 MCP 可以轻松添加新的工具和能力 76 | 77 | ## 未来展望 78 | 79 | 这只是开始。接下来我们计划: 80 | - 添加更多专业领域的研究模板 81 | - 支持自定义研究工作流 82 | - 集成更多专业工具和数据源 83 | - 优化长时间研究任务的性能 84 | 85 | ## 总结 86 | 87 | DeepAgents 的集成为 ChatOllama 带来了质的飞跃。我们不再只是一个简单的聊天工具,而是成为了一个强大的研究助手。这种能力的提升,加上开源的特性和本地化的优势,让 ChatOllama 在 AI 应用领域更具竞争力。 88 | 89 | 如果你对这个功能感兴趣,欢迎试用最新版本的 ChatOllama,体验 AI 深度研究的魅力。也欢迎在 GitHub 上给我们反馈,让我们一起把这个功能做得更好! 90 | 91 | --- 92 | 93 | *ChatOllama 是一个开源的本地 AI 聊天应用,致力于为用户提供私密、强大、易用的 AI 体验。* 94 | -------------------------------------------------------------------------------- /components/Auth.vue: -------------------------------------------------------------------------------- 1 | 42 | 64 | -------------------------------------------------------------------------------- /server/models/openai/tests/azure/chat_models.standard.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | import { test, expect, afterAll } from "@jest/globals"; 3 | import { ChatModelIntegrationTests } from "@langchain/standard-tests"; 4 | import { AIMessageChunk } from "@langchain/core/messages"; 5 | import { AzureChatOpenAI } from "../../azure/chat_models.js"; 6 | import { ChatOpenAICallOptions } from "../../chat_models.js"; 7 | 8 | let openAIAPIKey: string | undefined; 9 | 10 | afterAll(() => { 11 | if (openAIAPIKey) { 12 | process.env.OPENAI_API_KEY = openAIAPIKey; 13 | } 14 | }); 15 | 16 | class AzureChatOpenAIStandardIntegrationTests extends ChatModelIntegrationTests< 17 | ChatOpenAICallOptions, 18 | AIMessageChunk 19 | > { 20 | constructor() { 21 | super({ 22 | Cls: AzureChatOpenAI, 23 | chatModelHasToolCalling: true, 24 | chatModelHasStructuredOutput: true, 25 | supportsParallelToolCalls: true, 26 | constructorArgs: { 27 | model: "gpt-3.5-turbo", 28 | maxRetries: 0, 29 | }, 30 | }); 31 | } 32 | 33 | async testUsageMetadataStreaming() { 34 | this.skipTestMessage( 35 | "testUsageMetadataStreaming", 36 | "AzureChatOpenAI", 37 | "Streaming tokens is not currently supported." 38 | ); 39 | } 40 | 41 | async testStreamTokensWithToolCalls() { 42 | this.skipTestMessage( 43 | "testStreamTokensWithToolCalls", 44 | "AzureChatOpenAI", 45 | "Streaming tokens is not currently supported." 46 | ); 47 | } 48 | 49 | async testInvokeMoreComplexTools() { 50 | this.skipTestMessage( 51 | "testInvokeMoreComplexTools", 52 | "AzureChatOpenAI", 53 | "AzureChatOpenAI does not support tool schemas which contain object with unknown/any parameters." + 54 | "AzureChatOpenAI only supports objects in schemas when the parameters are defined." 55 | ); 56 | } 57 | 58 | async testParallelToolCalling() { 59 | // Pass `true` in the second argument to only verify it can support parallel tool calls in the message history. 60 | // This is because the model struggles to actually call parallel tools. 61 | await super.testParallelToolCalling(undefined, true); 62 | } 63 | } 64 | 65 | const testClass = new AzureChatOpenAIStandardIntegrationTests(); 66 | 67 | test("AzureChatOpenAIStandardIntegrationTests", async () => { 68 | const testResults = await testClass.runTests(); 69 | expect(testResults).toBe(true); 70 | }); 71 | -------------------------------------------------------------------------------- /server/models/openai/tools/tests/dalle.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { DallEAPIWrapper } from "../dalle.js"; 3 | 4 | test.skip("Dalle can generate images", async () => { 5 | const dalle = new DallEAPIWrapper(); 6 | 7 | const res = await dalle.invoke("A painting of a cat"); 8 | expect(res).toBeDefined(); 9 | expect(res).toContain("https://"); 10 | }); 11 | 12 | test.skip("Dalle can generate images with base 64 response format", async () => { 13 | const dalle = new DallEAPIWrapper({ 14 | responseFormat: "b64_json", 15 | }); 16 | 17 | const res = await dalle.invoke("A painting of a cat"); 18 | expect(res).toBeDefined(); 19 | expect(res).not.toContain("https://"); 20 | }); 21 | 22 | test.skip("Dalle returns multiple image URLs if n > 1", async () => { 23 | const dalle = new DallEAPIWrapper({ 24 | n: 2, 25 | }); 26 | const res = await dalle.invoke("A painting of a cat"); 27 | expect(res).toBeDefined(); 28 | expect(res).toBeInstanceOf(Array); 29 | if (!Array.isArray(res)) return; 30 | expect(res).toHaveLength(2); 31 | 32 | // The types for each should be `image_url` with an `image_url` field containing the URL 33 | expect(res[0].type).toBe("image_url"); 34 | expect(res[1].type).toBe("image_url"); 35 | 36 | expect(res[0]).toHaveProperty("image_url"); 37 | expect(res[1]).toHaveProperty("image_url"); 38 | 39 | expect(res[0].image_url.startsWith("https://")).toBe(true); 40 | expect(res[1].image_url.startsWith("https://")).toBe(true); 41 | }); 42 | 43 | test.skip("Dalle returns multiple base64 image strings if n > 1", async () => { 44 | const dalle = new DallEAPIWrapper({ 45 | n: 2, 46 | dallEResponseFormat: "b64_json", 47 | }); 48 | const res = await dalle.invoke("A painting of a cat"); 49 | expect(res).toBeDefined(); 50 | expect(res).toBeInstanceOf(Array); 51 | if (!Array.isArray(res)) return; 52 | expect(res).toHaveLength(2); 53 | 54 | // The types for each should be `b64_json` with an `b64_json` field containing the URL 55 | expect(res[0].type).toBe("image_url"); 56 | expect(res[1].type).toBe("image_url"); 57 | 58 | expect(res[0]).toHaveProperty("image_url"); 59 | expect(res[1]).toHaveProperty("image_url"); 60 | 61 | expect(res[0].image_url).toHaveProperty("url"); 62 | expect(res[1].image_url).toHaveProperty("url"); 63 | 64 | expect(res[0].image_url.url).toBeDefined(); 65 | expect(res[1].image_url.url).toBeDefined(); 66 | 67 | expect(res[0].image_url.url).not.toBe(""); 68 | expect(res[1].image_url.url).not.toBe(""); 69 | }); 70 | -------------------------------------------------------------------------------- /components/ChatSessionListActionMore.vue: -------------------------------------------------------------------------------- 1 | 42 | 43 | 72 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | # Open Source License 2 | 3 | This project is licensed under a modified version of the Apache License 2.0, with the following additional conditions: 4 | 5 | ## 1. Commercial Use Restrictions 6 | 7 | This software may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. However, should the conditions below be met, a commercial license must be obtained from the producer: 8 | 9 | ### a. Multi-tenant service and Cloud Hosting 10 | Unless explicitly authorized in writing, you may not use this source code to: 11 | - Operate a multi-tenant environment 12 | - Provide this software as a hosted service or Software-as-a-Service (SaaS) 13 | - Deploy this software on cloud platforms for commercial hosting purposes 14 | 15 | **Tenant Definition**: Within the context of this software, one tenant corresponds to one workspace or user environment that provides separated data and configurations. 16 | 17 | ### b. LOGO and copyright information 18 | In the process of using this software's frontend, you may not remove or modify the LOGO or copyright information in the console or applications. This restriction is inapplicable to uses that do not involve the frontend. 19 | 20 | **Frontend Definition**: For the purposes of this license, the "frontend" includes all user interface components and client-side code. 21 | 22 | ## 2. Contributor Agreement 23 | 24 | As a contributor, you should agree that: 25 | 26 | a. The producer can adjust the open-source agreement to be more strict or relaxed as deemed necessary. 27 | 28 | b. Your contributed code may be used for commercial purposes, including but not limited to business operations. 29 | 30 | ## 3. Apache License 2.0 31 | 32 | Apart from the specific conditions mentioned above, all other rights and restrictions follow the Apache License 2.0. Detailed information about the Apache License 2.0 can be found at http://www.apache.org/licenses/LICENSE-2.0. 33 | 34 | --- 35 | 36 | Copyright (c) 2025 sugarforever 37 | 38 | Licensed under the Apache License, Version 2.0 (the "License"); 39 | you may not use this file except in compliance with the License. 40 | You may obtain a copy of the License at 41 | 42 | http://www.apache.org/licenses/LICENSE-2.0 43 | 44 | Unless required by applicable law or agreed to in writing, software 45 | distributed under the License is distributed on an "AS IS" BASIS, 46 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 47 | See the License for the specific language governing permissions and 48 | limitations under the License. 49 | -------------------------------------------------------------------------------- /docker-compose_gpu.yaml: -------------------------------------------------------------------------------- 1 | # version: '3.1' 2 | 3 | services: 4 | postgres: 5 | image: postgres:16-alpine 6 | environment: 7 | - POSTGRES_DB=chatollama 8 | - POSTGRES_USER=chatollama 9 | - POSTGRES_PASSWORD=chatollama_password 10 | ports: 11 | - "5432:5432" 12 | restart: always 13 | volumes: 14 | - postgres_data:/var/lib/postgresql/data 15 | healthcheck: 16 | test: ["CMD-SHELL", "pg_isready -U chatollama -d chatollama"] 17 | interval: 10s 18 | timeout: 5s 19 | retries: 5 20 | start_period: 30s 21 | 22 | ollama: 23 | image: ollama/ollama 24 | ports: 25 | - "11434:11434" 26 | restart: always 27 | volumes: 28 | - ollama_data:/root/.ollama 29 | deploy: 30 | resources: 31 | reservations: 32 | devices: 33 | - driver: nvidia 34 | count: all 35 | capabilities: [ gpu ] 36 | chromadb: 37 | image: chromadb/chroma 38 | ports: 39 | - "8000:8000" 40 | restart: always 41 | volumes: 42 | - chromadb_data:/chroma/.chroma/index 43 | 44 | chatollama: 45 | environment: 46 | # Access Control Settings 47 | - ACL_ENABLED=false # Set to 'true' to enable admin-only MCP management 48 | # Feature Flags - Use NUXT_ prefix for runtime configuration 49 | - NUXT_KNOWLEDGE_BASE_ENABLED=true 50 | - NUXT_REALTIME_CHAT_ENABLED=false 51 | - NUXT_MODELS_MANAGEMENT_ENABLED=true 52 | - NUXT_MCP_ENABLED=true 53 | # Database and Service URLs 54 | - CHROMADB_URL=http://chromadb:8000 55 | - DATABASE_URL=postgresql://chatollama:chatollama_password@postgres:5432/chatollama 56 | - DIRECT_URL=postgresql://chatollama:chatollama_password@postgres:5432/chatollama 57 | - REDIS_HOST=redis 58 | # Migration settings 59 | - SKIP_MIGRATION=false # Set to 'true' to skip automatic migration 60 | - MIGRATION_TIMEOUT=300 # Migration timeout in seconds 61 | image: 0001coder/chatollama:latest 62 | ports: 63 | - "3000:3000" 64 | pull_policy: always 65 | restart: always 66 | depends_on: 67 | postgres: 68 | condition: service_healthy 69 | chromadb: 70 | condition: service_started 71 | redis: 72 | condition: service_started 73 | volumes: 74 | - ~/.chatollama:/app/data 75 | 76 | redis: 77 | image: redis:latest 78 | restart: always 79 | volumes: 80 | - redis_data:/data 81 | 82 | volumes: 83 | postgres_data: 84 | redis_data: 85 | chromadb_data: 86 | ollama_data: 87 | -------------------------------------------------------------------------------- /types/multimodal-live-types.ts: -------------------------------------------------------------------------------- 1 | import { Content, GenerativeContentBlob, Part } from "@google/generative-ai" 2 | 3 | export interface StreamingLog { 4 | date: Date 5 | type: string 6 | message: string | object 7 | } 8 | 9 | export interface LiveConfig { 10 | model: string 11 | generationConfig?: { 12 | temperature?: number 13 | topP?: number 14 | topK?: number 15 | } 16 | tools?: any[] 17 | } 18 | 19 | export interface SetupMessage { 20 | setup: LiveConfig 21 | } 22 | 23 | export interface RealtimeInputMessage { 24 | realtimeInput: { 25 | mediaChunks: GenerativeContentBlob[] 26 | } 27 | } 28 | 29 | export interface ClientContentMessage { 30 | clientContent: { 31 | turns: Content[] 32 | turnComplete: boolean 33 | } 34 | } 35 | 36 | export interface ModelTurn { 37 | modelTurn: { 38 | parts: Part[] 39 | } 40 | } 41 | 42 | export interface ServerContent { 43 | interrupted?: boolean 44 | end_of_turn?: boolean 45 | modelTurn?: { 46 | parts: Part[] 47 | } 48 | } 49 | 50 | export interface ToolCall { 51 | name: string 52 | arguments: string 53 | call_id: string 54 | } 55 | 56 | export interface ToolCallCancellation { 57 | call_id: string 58 | } 59 | 60 | export interface ToolResponseMessage { 61 | toolResponse: { 62 | call_id: string 63 | output: string 64 | } 65 | } 66 | 67 | export interface LiveIncomingMessage { 68 | serverContent?: ServerContent 69 | toolCall?: ToolCall 70 | toolCallCancellation?: ToolCallCancellation 71 | setupComplete?: boolean 72 | } 73 | 74 | export function isServerContentMessage(msg: LiveIncomingMessage): boolean { 75 | return 'serverContent' in msg 76 | } 77 | 78 | export function isToolCallMessage(msg: LiveIncomingMessage): boolean { 79 | return 'toolCall' in msg 80 | } 81 | 82 | export function isToolCallCancellationMessage(msg: LiveIncomingMessage): boolean { 83 | return 'toolCallCancellation' in msg 84 | } 85 | 86 | export function isSetupCompleteMessage(msg: LiveIncomingMessage): boolean { 87 | return 'setupComplete' in msg 88 | } 89 | 90 | export function isInterrupted(content: ServerContent): boolean { 91 | return content.interrupted === true 92 | } 93 | 94 | export function isTurnComplete(content: ServerContent): boolean { 95 | return content.end_of_turn === true 96 | } 97 | 98 | export function isModelTurn(content: ServerContent): boolean { 99 | return 'modelTurn' in content 100 | } 101 | -------------------------------------------------------------------------------- /server/models/openai/tests/azure/embeddings.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | import { test, expect } from "@jest/globals"; 3 | import { AzureOpenAIEmbeddings as OpenAIEmbeddings } from "../../azure/embeddings.js"; 4 | 5 | test("Test AzureOpenAIEmbeddings.embedQuery", async () => { 6 | const embeddings = new OpenAIEmbeddings(); 7 | const res = await embeddings.embedQuery("Hello world"); 8 | expect(typeof res[0]).toBe("number"); 9 | }); 10 | 11 | test("Test AzureOpenAIEmbeddings.embedDocuments", async () => { 12 | const embeddings = new OpenAIEmbeddings(); 13 | const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); 14 | expect(res).toHaveLength(2); 15 | expect(typeof res[0][0]).toBe("number"); 16 | expect(typeof res[1][0]).toBe("number"); 17 | }); 18 | 19 | test("Test AzureOpenAIEmbeddings concurrency", async () => { 20 | const embeddings = new OpenAIEmbeddings({ 21 | batchSize: 1, 22 | maxConcurrency: 2, 23 | }); 24 | const res = await embeddings.embedDocuments([ 25 | "Hello world", 26 | "Bye bye", 27 | "Hello world", 28 | "Bye bye", 29 | "Hello world", 30 | "Bye bye", 31 | ]); 32 | expect(res).toHaveLength(6); 33 | expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe( 34 | undefined 35 | ); 36 | }); 37 | 38 | test("Test timeout error thrown from SDK", async () => { 39 | await expect(async () => { 40 | const model = new OpenAIEmbeddings({ 41 | timeout: 1, 42 | maxRetries: 0, 43 | }); 44 | await model.embedDocuments([ 45 | "Hello world", 46 | "Bye bye", 47 | "Hello world", 48 | "Bye bye", 49 | "Hello world", 50 | "Bye bye", 51 | ]); 52 | }).rejects.toThrow(); 53 | }); 54 | 55 | test("Test AzureOpenAIEmbeddings.embedQuery with v3 and dimensions", async () => { 56 | const embeddings = new OpenAIEmbeddings({ 57 | modelName: "text-embedding-3-small", 58 | dimensions: 127, 59 | }); 60 | const res = await embeddings.embedQuery("Hello world"); 61 | expect(typeof res[0]).toBe("number"); 62 | expect(res.length).toBe(127); 63 | }); 64 | 65 | test("Test AzureOpenAIEmbeddings.embedDocuments with v3 and dimensions", async () => { 66 | const embeddings = new OpenAIEmbeddings({ 67 | modelName: "text-embedding-3-small", 68 | dimensions: 127, 69 | }); 70 | const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); 71 | expect(res).toHaveLength(2); 72 | expect(typeof res[0][0]).toBe("number"); 73 | expect(typeof res[1][0]).toBe("number"); 74 | expect(res[0].length).toBe(127); 75 | expect(res[1].length).toBe(127); 76 | }); 77 | -------------------------------------------------------------------------------- /blogs/20250825-langchain-upgrade-chat-fix_zh.md: -------------------------------------------------------------------------------- 1 | # LangChain 核心包版本升级导致聊天功能中断:快速修复记录 2 | 3 | **日期:** 2025年8月25日 4 | **问题:** LangChain 依赖升级后聊天功能中断 5 | **解决时间:** 约4小时 6 | 7 | ## 🐛 问题描述 8 | 9 | 原本是一次常规的 `LangChain` 依赖升级(`0.3.49` -> `0.3.72`),目的是解决 Docker 模块解析问题,但很快就演变成了一个严重的事故。在升级 LangChain 包之后,整个平台的聊天功能完全停止工作。用户无法发送消息或从任何 AI 模型获得响应,这实际上使得 ChatOllama 的核心功能完全不可用。 10 | 11 | 这个问题特别令人沮丧,因为在升级过程中没有明显的错误消息或警告。应用程序正常启动,但每次聊天尝试都会静默失败。 12 | 13 | ## 🔍 根本原因调查 14 | 15 | 通过深入研究日志和跟踪代码,我们发现 LangChain 升级在聊天模型构造函数中引入了破坏性的 API 更改。使这个问题特别棘手的是,这些不是编译时错误——旧的参数名称被简单地忽略了,导致模型使用未定义的配置进行初始化。 16 | 17 | 在 LangChain 升级过程中,ChatOpenAI 模型构造函数中的参数名称发生了一些变化。虽然仅仅被标记为 `deprecated`,但参数在下游的使用中已经发生了变化。deprecated 参数包括: 18 | 19 | - `modelName` 20 | - `openAIApiKey` 21 | 22 | 这些破坏性更改影响了多个模型提供商,每个都需要特定的参数名称更新: 23 | 24 | ### 修复前(有效的): 25 | ```typescript 26 | new ChatOpenAI({ 27 | configuration: { baseURL }, 28 | openAIApiKey: params.key, // ❌ 已弃用 29 | modelName: modelName, // ❌ 已弃用 30 | }) 31 | 32 | new ChatAnthropic({ 33 | anthropicApiUrl: endpoint, 34 | anthropicApiKey: params.key, // ❌ 已弃用 35 | modelName: modelName, // ❌ 已弃用 36 | }) 37 | ``` 38 | 39 | ### 修复后(已修复): 40 | ```typescript 41 | new ChatOpenAI({ 42 | configuration: { baseURL }, 43 | apiKey: params.key, // ✅ 新 API 44 | model: modelName, // ✅ 新 API 45 | }) 46 | 47 | new ChatAnthropic({ 48 | anthropicApiUrl: endpoint, 49 | apiKey: params.key, // ✅ 新 API 50 | model: modelName, // ✅ 新 API 51 | }) 52 | ``` 53 | 54 | ## 🔧 修复实施 55 | 56 | 一旦我们确定了根本原因,修复就相对简单,但需要仔细注意细节。我们需要在所有受影响的模型提供商中更新参数名称,同时确保向后兼容性并添加更好的错误处理。 57 | 58 | 以下模型需要更新: 59 | - **OpenAI (ChatOpenAI)** - 最常用的提供商 60 | - **Anthropic (ChatAnthropic)** - AI 代理功能的关键组件 61 | - **Gemini (ChatGoogleGenerativeAI)** - 用于多模态功能 62 | - **Groq (ChatGroq)** - 高性能推理选项 63 | 64 | 实施的关键更改包括: 65 | 1. 将 `openAIApiKey` 和 `anthropicApiKey` 标准化为统一的 `apiKey` 参数 66 | 2. 在所有提供商中将 `modelName` 更新为更简洁的 `model` 参数 67 | 3. 增强错误处理,在配置缺失时提供清晰的反馈 68 | 69 | 除了修复参数名称,我们还借此机会添加了强大的回退逻辑。现在,当外部 API 提供商由于缺少密钥或配置问题而失败时,系统会优雅地回退到 Ollama,确保用户即使在首选提供商配置错误的情况下也能继续聊天。 70 | 71 | ## 📚 经验教训 72 | 73 | 这次事件强化了在生产应用程序中管理依赖项的几个重要原则: 74 | 75 | **主要升级后彻底测试:** 即使看似微小的版本更新也可能引入不明显的破坏性更改。对所有功能进行全面测试是必要的,不仅仅是您期望受到影响的区域。 76 | 77 | **拥抱 API 标准化:** 虽然最初会造成干扰,但 LangChain 在提供商之间标准化参数名称的举措是一个积极的长期变化,将减少混乱并使代码库更易于维护。 78 | 79 | **始终实施优雅降级:** 拥有强大的回退机制不仅仅是良好的实践——当外部依赖项失败或意外更改时,这对于维护用户信任至关重要。 80 | 81 | ## 🚀 影响和解决方案 82 | 83 | 修复在识别后立即部署,为用户实现了零停机时间。更新的实现在利用新的标准化 API 的同时保持完全的向后兼容性。作为额外的好处,增强的错误处理和回退机制实际上提高了聊天系统的整体可靠性。 84 | 85 | 这次事件提醒我们,在 AI 和机器学习库快速发展的世界中,保持依赖项的最新状态需要持续的警惕和彻底的测试实践。 86 | 87 | --- 88 | 89 | *这是主要升级中"静默"破坏性更改的典型案例——这种情况使经验丰富的开发人员总是会仔细阅读变更日志两遍。一旦确定,修复就很简单,但这次经历突出了为什么我们永远不会把看似常规的更新视为理所当然。* 90 | -------------------------------------------------------------------------------- /components/ModelMentionText.vue: -------------------------------------------------------------------------------- 1 | 51 | 52 | 55 | 56 | 68 | -------------------------------------------------------------------------------- /components/MermaidRenderer.vue: -------------------------------------------------------------------------------- 1 | 66 | 67 | 86 | 87 | -------------------------------------------------------------------------------- /server/api/proxy.ts: -------------------------------------------------------------------------------- 1 | import http from 'node:http' 2 | import https from 'node:https' 3 | import { SocksProxyAgent } from 'socks-proxy-agent' 4 | import { HttpProxyAgent } from 'http-proxy-agent' 5 | import { HttpsProxyAgent } from 'https-proxy-agent' 6 | import { type H3Event } from 'h3' 7 | import { omit } from '~/composables/helpers' 8 | 9 | type Protocol = 'http:' | 'https:' 10 | 11 | const proxyCacheMap = new Map | InstanceType | InstanceType>() 12 | 13 | function createProxyAgent(proxyUrl: string, protocol: Protocol) { 14 | if (proxyCacheMap.has(proxyUrl)) 15 | return proxyCacheMap.get(proxyUrl) 16 | 17 | proxyCacheMap.clear() 18 | 19 | const p = proxyUrl.startsWith('http:') 20 | ? protocol === 'https:' ? new HttpsProxyAgent(proxyUrl) : new HttpProxyAgent(proxyUrl) 21 | : new SocksProxyAgent(proxyUrl) 22 | 23 | proxyCacheMap.set(proxyUrl, p) 24 | 25 | return p 26 | } 27 | 28 | async function proxyFetch(event: H3Event, apiEndpoint: string, proxyUrl: string) { 29 | return new Promise((resolve, reject) => { 30 | const uri = new URL(apiEndpoint) 31 | const request = (uri.protocol === 'https:' ? https : http).request(apiEndpoint, { 32 | headers: omit(event.node.req.headers, ['host', 'origin', 'referer', 'x-forwarded-for', 'x-forwarded-host', 'x-forwarded-port', 'x-forwarded-proto']), 33 | method: event.node.req.method, 34 | agent: createProxyAgent(proxyUrl, uri.protocol as Protocol) 35 | }, response => { 36 | Object.entries(response.headers).forEach(([key, value]) => setResponseHeader(event, key, value!)) 37 | if (response.statusCode) setResponseStatus(event, response.statusCode) 38 | response.pipe(event.node.res) 39 | resolve(undefined) 40 | }).on('error', e => reject(e)) 41 | 42 | event.node.req.pipe(request) 43 | }) 44 | } 45 | 46 | export default defineEventHandler(async (event) => { 47 | const url = event.node.req.url 48 | const config = useRuntimeConfig() 49 | 50 | if (!config.public.modelProxyEnabled) { 51 | setResponseStatus(event, 404) 52 | return 'Proxy is disabled' 53 | } 54 | 55 | if (!url) { 56 | setResponseStatus(event, 400) 57 | return 'Invalid URL' 58 | } 59 | 60 | const { endpoint } = getQuery(event) as { endpoint: string } 61 | const proxyUrl = config.modelProxyUrl 62 | 63 | if (endpoint && proxyUrl) { 64 | try { 65 | await proxyFetch(event, endpoint, proxyUrl) 66 | } catch (e: any) { 67 | console.error(e.message) 68 | setResponseStatus(event, 400) 69 | return e.message ?? 'Proxy error' 70 | } 71 | } else { 72 | setResponseStatus(event, 400) 73 | return 'Invalid request' 74 | } 75 | }) 76 | -------------------------------------------------------------------------------- /utils/settings.ts: -------------------------------------------------------------------------------- 1 | import { useStorage } from '@vueuse/core' 2 | import type { KnowledgeBase } from '@prisma/client' 3 | import type { ContextKeys } from '~/server/middleware/keys' 4 | 5 | // todo: only for compatibility with old localStorage values, will be removed in the future 6 | function getLocalValue(key: string) { 7 | return process.server ? '' : (localStorage.getItem(key) || '') 8 | } 9 | 10 | export const DEFAULT_KEYS_STORE: ContextKeys = { 11 | ollama: { 12 | endpoint: getLocalValue('ollama.host'), 13 | username: getLocalValue('ollama.username'), 14 | password: getLocalValue('ollama.password'), 15 | }, 16 | openai: { 17 | key: getLocalValue('keys.openai_api_key'), 18 | endpoint: getLocalValue('keys.openai_api_host'), 19 | proxy: false, 20 | }, 21 | azureOpenai: { 22 | key: getLocalValue('keys.azure_openai_api_key'), 23 | endpoint: getLocalValue('keys.azure_openai_endpoint'), 24 | deploymentName: getLocalValue('keys.azure_openai_deployment_name'), 25 | proxy: false, 26 | }, 27 | anthropic: { 28 | key: getLocalValue('keys.anthropic_api_key'), 29 | endpoint: getLocalValue('keys.anthropic_api_host'), 30 | proxy: false, 31 | }, 32 | moonshot: { 33 | key: getLocalValue('keys.moonshot_api_key'), 34 | endpoint: getLocalValue('keys.moonshot_api_host'), 35 | }, 36 | gemini: { 37 | key: getLocalValue('keys.gemini_api_key'), 38 | proxy: false, 39 | endpoint: '', 40 | }, 41 | groq: { 42 | key: getLocalValue('keys.groq_api_key'), 43 | endpoint: getLocalValue('keys.groq_api_host'), 44 | proxy: false, 45 | }, 46 | mistral: { 47 | key: getLocalValue('keys.mistral_api_key'), 48 | endpoint: getLocalValue('keys.mistral_api_host'), 49 | proxy: false, 50 | }, 51 | custom: [] 52 | } 53 | 54 | export const keysStore = useStorage('keys', DEFAULT_KEYS_STORE) 55 | 56 | export const getKeysHeader = () => ({ 'x-chat-ollama-keys': encodeURIComponent(JSON.stringify(keysStore.value)) }) 57 | 58 | export const loadOllamaInstructions = async () => { 59 | try { 60 | // Check if user is authenticated and use appropriate fetch method 61 | const { token } = useAuth() 62 | const fetchMethod = token.value ? $fetchWithAuth : $fetch 63 | 64 | const { instructions } = await fetchMethod>(`/api/instruction/`) 65 | return instructions || [] 66 | } catch (e) { 67 | console.error("Failed to fetch Ollama instructions", e) 68 | return [] 69 | } 70 | } 71 | 72 | export async function loadKnowledgeBases() { 73 | const response = await $fetchWithAuth('/api/knowledgebases/').catch(() => null) 74 | return (response?.knowledgeBases || []) as KnowledgeBase[] 75 | } 76 | -------------------------------------------------------------------------------- /assets/svg/gemini.svg: -------------------------------------------------------------------------------- 1 | Gemini -------------------------------------------------------------------------------- /server/models/genai/output_parsers.ts: -------------------------------------------------------------------------------- 1 | import type { z } from "zod" 2 | import { 3 | BaseLLMOutputParser, 4 | OutputParserException, 5 | } from "@langchain/core/output_parsers" 6 | import { JsonOutputKeyToolsParserParams } from "@langchain/core/output_parsers/openai_tools" 7 | import { ChatGeneration } from "@langchain/core/outputs" 8 | import { ToolCall } from "@langchain/core/messages/tool" 9 | 10 | interface GoogleGenerativeAIToolsOutputParserParams< 11 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 12 | T extends Record 13 | > extends JsonOutputKeyToolsParserParams { } 14 | 15 | export class GoogleGenerativeAIToolsOutputParser< 16 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 17 | T extends Record = Record 18 | > extends BaseLLMOutputParser { 19 | static lc_name() { 20 | return "GoogleGenerativeAIToolsOutputParser" 21 | } 22 | 23 | lc_namespace = ["langchain", "google_genai", "output_parsers"]; 24 | 25 | returnId = false; 26 | 27 | /** The type of tool calls to return. */ 28 | keyName: string 29 | 30 | /** Whether to return only the first tool call. */ 31 | returnSingle = false; 32 | 33 | zodSchema?: z.ZodType 34 | 35 | constructor(params: GoogleGenerativeAIToolsOutputParserParams) { 36 | super(params) 37 | this.keyName = params.keyName 38 | this.returnSingle = params.returnSingle ?? this.returnSingle 39 | this.zodSchema = params.zodSchema 40 | } 41 | 42 | protected async _validateResult(result: unknown): Promise { 43 | if (this.zodSchema === undefined) { 44 | return result as T 45 | } 46 | const zodParsedResult = await this.zodSchema.safeParseAsync(result) 47 | if (zodParsedResult.success) { 48 | return zodParsedResult.data 49 | } else { 50 | throw new OutputParserException( 51 | `Failed to parse. Text: "${JSON.stringify( 52 | result, 53 | null, 54 | 2 55 | )}". Error: ${JSON.stringify(zodParsedResult.error.errors)}`, 56 | JSON.stringify(result, null, 2) 57 | ) 58 | } 59 | } 60 | 61 | async parseResult(generations: ChatGeneration[]): Promise { 62 | const tools = generations.flatMap((generation) => { 63 | const { message } = generation 64 | if (!("tool_calls" in message) || !Array.isArray(message.tool_calls)) { 65 | return [] 66 | } 67 | return message.tool_calls as ToolCall[] 68 | }) 69 | if (tools[0] === undefined) { 70 | throw new Error( 71 | "No parseable tool calls provided to GoogleGenerativeAIToolsOutputParser." 72 | ) 73 | } 74 | const [tool] = tools 75 | const validatedResult = await this._validateResult(tool.args) 76 | return validatedResult 77 | } 78 | } 79 | --------------------------------------------------------------------------------