├── docker-webhook ├── .example.env ├── docker-compose.yml └── webhook │ └── webhook.ts ├── banner.png ├── static ├── intro.mp3 ├── logo.png ├── favicon.ico ├── intro-en.mp3 ├── styles.css └── lines.svg ├── docker-compose ├── caddy │ └── .example.Caddyfile ├── docker-rebuild.sh ├── .example.env └── docker-compose.yml ├── fresh.config.ts ├── dev.ts ├── .example.env ├── main.ts ├── .gitignore ├── routes ├── _app.tsx ├── _404.tsx ├── index.tsx ├── api │ ├── stt.ts │ ├── wikipedia.ts │ ├── bildungsplan.ts │ ├── papers.ts │ ├── tts.ts │ └── chat.ts └── about.tsx ├── tailwind.config.ts ├── islands ├── ChatAgreementOrIsland.tsx ├── Header.tsx ├── Menu.tsx └── ChatAgreement.tsx ├── components ├── Warning.tsx ├── ChatSubmitButton.tsx ├── ImageUploadButton.tsx ├── VoiceRecordButton.tsx ├── ChatTemplate.tsx └── Settings.tsx ├── License.txt ├── deno.json ├── fresh.gen.ts ├── types.d.ts ├── README.md └── internalization ├── agreement-content.ts └── content.ts /docker-webhook/.example.env: -------------------------------------------------------------------------------- 1 | WEBHOOK_SECRET=abcdef123456 -------------------------------------------------------------------------------- /banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LAION-AI/school-bud-e-frontend-old/HEAD/banner.png -------------------------------------------------------------------------------- /static/intro.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LAION-AI/school-bud-e-frontend-old/HEAD/static/intro.mp3 -------------------------------------------------------------------------------- /static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LAION-AI/school-bud-e-frontend-old/HEAD/static/logo.png -------------------------------------------------------------------------------- /docker-compose/caddy/.example.Caddyfile: -------------------------------------------------------------------------------- 1 | mywebsite.com { 2 | reverse_proxy school-bud-e-frontend:8000 3 | } -------------------------------------------------------------------------------- /static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LAION-AI/school-bud-e-frontend-old/HEAD/static/favicon.ico -------------------------------------------------------------------------------- /static/intro-en.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LAION-AI/school-bud-e-frontend-old/HEAD/static/intro-en.mp3 -------------------------------------------------------------------------------- /static/styles.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | html, body { 6 | background-color: #f4eecf; 7 | } 8 | -------------------------------------------------------------------------------- /fresh.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "$fresh/server.ts"; 2 | import tailwind from "$fresh/plugins/tailwind.ts"; 3 | 4 | export default defineConfig({ 5 | plugins: [tailwind()], 6 | }); 7 | -------------------------------------------------------------------------------- /dev.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S deno run -A --watch=static/,routes/ 2 | 3 | import dev from "$fresh/dev.ts"; 4 | import config from "./fresh.config.ts"; 5 | 6 | import "$std/dotenv/load.ts"; 7 | 8 | await dev(import.meta.url, "./main.ts", config); 9 | -------------------------------------------------------------------------------- /docker-compose/docker-rebuild.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/bash 3 | 4 | cd "$(dirname "$0")" 5 | 6 | echo "Pulling latest changes..." 7 | docker compose pull school-bud-e-frontend 8 | 9 | echo "Rebuilding and restarting frontend service..." 10 | docker compose up -d --build --force-recreate school-bud-e-frontend -------------------------------------------------------------------------------- /.example.env: -------------------------------------------------------------------------------- 1 | ### SERVER URL 2 | SERVER_URL="http://...:8001" 3 | SERVER_API_KEY="..." 4 | 5 | ### SPEECH TO TEXT 6 | GROQ_API_KEY="gsk_..." 7 | GROQ_API_MODEL="whisper-large-v3" 8 | 9 | ### LARGE LANGUAGE MODELS 10 | API_URL="https://.../v1/chat/completions" 11 | API_KEY="api-key" 12 | API_MODEL="gpt-4o" -------------------------------------------------------------------------------- /docker-compose/.example.env: -------------------------------------------------------------------------------- 1 | ### Rename to .env and fill in the variables 2 | 3 | GROQ_API_KEY="gsk_XXXX" 4 | GROQ_API_MODEL="whisper-large-v3" 5 | 6 | TTS_API=... 7 | TTS_URL=https://api.fish.audio/v1/tts 8 | TTS_MODEL=61561f50f41046e0b267aa4cb30e4957 9 | 10 | API_URL="https://api.sambanova.ai/v1/chat/completions" 11 | API_KEY="..." 12 | API_MODEL="Meta-Llama-3.1-405B-Instruct" -------------------------------------------------------------------------------- /main.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | /// 4 | /// 5 | /// 6 | 7 | import "$std/dotenv/load.ts"; 8 | 9 | import { start } from "$fresh/server.ts"; 10 | import manifest from "./fresh.gen.ts"; 11 | import config from "./fresh.config.ts"; 12 | 13 | await start(manifest, config); 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # dotenv environment variable files 2 | .env 3 | .env.development.local 4 | .env.test.local 5 | .env.production.local 6 | .env.local 7 | .DS_Store 8 | .vscode 9 | 10 | # Fresh build directory 11 | _fresh/ 12 | # npm dependencies 13 | node_modules/ 14 | 15 | docker-compose/caddy/Caddyfile 16 | docker-compose/caddy/data 17 | docker-compose/caddy/data/* 18 | docker-compose/caddy/config 19 | docker-compose/caddy/config/* 20 | docker-compose/app/* -------------------------------------------------------------------------------- /docker-webhook/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | webhook: 3 | image: docker:latest 4 | container_name: webhook 5 | privileged: true 6 | environment: 7 | - WEBHOOK_SECRET=${WEBHOOK_SECRET} 8 | volumes: 9 | - ./webhook:/app 10 | - /var/run/docker.sock:/var/run/docker.sock 11 | command: sh -c "apk update && apk add deno && deno run --allow-env --allow-net --allow-run /app/webhook.ts" 12 | network_mode: host 13 | restart: unless-stopped -------------------------------------------------------------------------------- /routes/_app.tsx: -------------------------------------------------------------------------------- 1 | import { type PageProps } from "$fresh/server.ts"; 2 | export default function App({ Component }: PageProps) { 3 | return ( 4 | 5 | 6 | 7 | 8 | School Bud-E 9 | 10 | 11 | 12 | 13 | 14 | 15 | ); 16 | } 17 | -------------------------------------------------------------------------------- /tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import { type Config } from "tailwindcss"; 2 | 3 | export default { 4 | content: [ 5 | "{routes,islands,components}/**/*.{ts,tsx}", 6 | ], 7 | theme: { 8 | extend: { 9 | keyframes: { 10 | fadeIn: { 11 | "0%": { opacity: "0" }, 12 | "100%": { opacity: "1" }, 13 | }, 14 | }, 15 | animation: { 16 | "fade-in": "fadeIn 1s ease-out", 17 | }, 18 | }, 19 | }, 20 | plugins: [], 21 | } satisfies Config; 22 | -------------------------------------------------------------------------------- /islands/ChatAgreementOrIsland.tsx: -------------------------------------------------------------------------------- 1 | import ChatAgreement from "./ChatAgreement.tsx"; 2 | import ChatIsland from "./ChatIsland.tsx"; 3 | import ChatWarning from "../components/Warning.tsx"; 4 | 5 | interface ChatAgreementOrIslandProps { 6 | lang: string; 7 | } 8 | 9 | export default function ChatAgreementOrIsland( 10 | { lang }: ChatAgreementOrIslandProps, 11 | ) { 12 | const hasAgreed = localStorage.getItem("school-bud-e-agreement") === "true"; 13 | return ( 14 | <> 15 | {hasAgreed 16 | ? ( 17 | <> 18 | 19 | 20 | ) 21 | : } 22 | 23 | ); 24 | } 25 | -------------------------------------------------------------------------------- /routes/_404.tsx: -------------------------------------------------------------------------------- 1 | import { Head } from "$fresh/runtime.ts"; 2 | 3 | export default function Error404() { 4 | return ( 5 | <> 6 | 7 | 404 - Page not found 8 | 9 |
10 |
11 | the Fresh logo: a sliced lemon dripping with juice 18 |

404 - Page not found

19 |

20 | The page you were looking for doesn't exist. 21 |

22 | Go back home 23 |
24 |
25 | 26 | ); 27 | } 28 | -------------------------------------------------------------------------------- /components/Warning.tsx: -------------------------------------------------------------------------------- 1 | import { warningContent } from "../internalization/content.ts"; 2 | 3 | function Warning({ lang }: { lang: string }) { 4 | const formatBoldTextWhereDoubleAsterisk = (text: string) => { 5 | const parts = text.split('**'); 6 | return parts.reduce((acc, part, i) => { 7 | return i % 2 === 0 ? acc + part : acc + `${part}`; 8 | }, ''); 9 | }; 10 | 11 | return ( 12 | 27 | ); 28 | } 29 | 30 | export default Warning; 31 | -------------------------------------------------------------------------------- /routes/index.tsx: -------------------------------------------------------------------------------- 1 | import Header from "../islands/Header.tsx"; 2 | import ChatAgreementOrIsland from "../islands/ChatAgreementOrIsland.tsx"; 3 | 4 | export default function Home(req: Request) { 5 | const url = new URL(req.url); 6 | const lang = url.searchParams.get("lang") as string !== undefined && 7 | url.searchParams.get("lang") !== null 8 | ? url.searchParams.get("lang") 9 | : "de"; 10 | 11 | return ( 12 |
22 |
23 |
24 | 25 |
26 |
27 | ); 28 | } 29 | -------------------------------------------------------------------------------- /License.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 LAION e.V. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 17 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 | OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE 19 | OR OTHER DEALINGS IN THE SOFTWARE./ 20 | -------------------------------------------------------------------------------- /deno.json: -------------------------------------------------------------------------------- 1 | { 2 | "lock": false, 3 | "tasks": { 4 | "check": "deno fmt --check && deno lint && deno check **/*.ts && deno check **/*.tsx", 5 | "cli": "echo \"import '\\$fresh/src/dev/cli.ts'\" | deno run --unstable -A -", 6 | "manifest": "deno task cli manifest $(pwd)", 7 | "start": "deno run -A --watch=static/,routes/ dev.ts", 8 | "build": "deno run -A dev.ts build", 9 | "preview": "deno run -A main.ts", 10 | "update": "deno run -A -r https://fresh.deno.dev/update ." 11 | }, 12 | "lint": { "rules": { "tags": ["fresh", "recommended"] } }, 13 | "exclude": ["**/_fresh/*"], 14 | "imports": { 15 | "$fresh/": "https://deno.land/x/fresh@1.7.2/", 16 | "preact": "https://esm.sh/preact@10.22.0", 17 | "preact/": "https://esm.sh/preact@10.22.0/", 18 | "@preact/signals": "https://esm.sh/*@preact/signals@1.2.2", 19 | "@preact/signals-core": "https://esm.sh/*@preact/signals-core@1.5.1", 20 | "tailwindcss": "npm:tailwindcss@3.4.1", 21 | "tailwindcss/": "npm:/tailwindcss@3.4.1/", 22 | "tailwindcss/plugin": "npm:/tailwindcss@3.4.1/plugin.js", 23 | "$std/": "https://deno.land/std@0.216.0/" 24 | }, 25 | "compilerOptions": { "jsx": "react-jsx", "jsxImportSource": "preact" }, 26 | "nodeModulesDir": "auto" 27 | } 28 | -------------------------------------------------------------------------------- /docker-compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | school-bud-e-frontend: 3 | image: denoland/deno:latest 4 | container_name: school-bud-e-frontend 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | ports: 9 | - "8000:8000" 10 | env_file: 11 | - .env 12 | volumes: 13 | - ./app:/school-bud-e-frontend 14 | command: sh -c "apt-get update && apt-get install -y git && git config --global --add safe.directory /school-bud-e-frontend && cd /school-bud-e-frontend && (git rev-parse --git-dir || git init) && if [ -d '.git' ]; then git remote add origin https://github.com/LAION-AI/school-bud-e-frontend.git || true && git fetch origin && git checkout -B main origin/main; else git clone -b main https://github.com/LAION-AI/school-bud-e-frontend.git .; fi && deno task build && deno task preview" 15 | networks: 16 | - app-network 17 | restart: unless-stopped 18 | 19 | caddy: 20 | image: caddy:latest 21 | container_name: caddy 22 | ports: 23 | - "80:80" 24 | - "443:443" 25 | volumes: 26 | - ./caddy/Caddyfile:/etc/caddy/Caddyfile 27 | - ./caddy/data:/data 28 | - ./caddy/config:/config 29 | networks: 30 | - app-network 31 | 32 | networks: 33 | app-network: 34 | driver: bridge 35 | -------------------------------------------------------------------------------- /components/ChatSubmitButton.tsx: -------------------------------------------------------------------------------- 1 | // Button.tsx 2 | import { JSX } from "preact"; 3 | import { IS_BROWSER } from "$fresh/runtime.ts"; 4 | 5 | export function ChatSubmitButton(props: JSX.HTMLAttributes) { 6 | // Destructure `class` from props to apply alongside Tailwind classes 7 | const { class: className, ...buttonProps } = props; 8 | 9 | return ( 10 | 37 | ); 38 | } 39 | -------------------------------------------------------------------------------- /islands/Header.tsx: -------------------------------------------------------------------------------- 1 | import { JSX } from "preact/jsx-runtime"; 2 | import { headerContent } from "../internalization/content.ts"; 3 | import Menu from "./Menu.tsx"; 4 | 5 | /** 6 | * Header Component 7 | * 8 | * This component renders the header section of the application, which includes: 9 | * - A navigation menu 10 | * - A logo image 11 | * - Titles based on the selected language 12 | * 13 | * @param {Object} props - The properties object. 14 | * @param {string} props.lang - The language code for content localization. 15 | * 16 | * @returns {JSX.Element} The rendered header component. 17 | */ 18 | function Header({ lang }: { lang: string }): JSX.Element { 19 | return ( 20 |
21 | {/* Render the navigation menu */} 22 | 23 | 24 |
25 | {/* Logo Image */} 26 | A little lion wearing a graduation cap. 32 | 33 |
34 | {/* Over Title */} 35 |

36 | {headerContent[lang]["overTitle"]} 37 |

38 | 39 | {/* Main Title */} 40 |

41 | {headerContent[lang]["title"]} 42 |

43 |
44 |
45 |
46 | ); 47 | } 48 | 49 | export default Header; 50 | -------------------------------------------------------------------------------- /islands/Menu.tsx: -------------------------------------------------------------------------------- 1 | import { JSX } from "preact/jsx-runtime"; 2 | import { menuContent } from "../internalization/content.ts"; 3 | 4 | /** 5 | * Menu Component 6 | * 7 | * This component renders the navigation menu and language selector for the application. 8 | * 9 | * @param {Object} props - The properties object. 10 | * @param {string} props.lang - The language code for content localization. 11 | * 12 | * @returns {JSX.Element} The rendered menu component. 13 | */ 14 | export default function Menu({ lang }: { lang: string }): JSX.Element { 15 | // Define menu items with localized names and URLs 16 | const menuItems = [ 17 | { name: menuContent[lang]["about"], href: "/about?lang=" + lang }, 18 | { name: menuContent[lang]["imprint"], href: "https://laion.ai/impressum" }, 19 | ]; 20 | 21 | // Define available languages with their codes and symbols 22 | const languages = [ 23 | { name: "Deutsch", code: "de", symbol: "🇩🇪" }, 24 | { name: "English", code: "en", symbol: "🇬🇧" }, 25 | ]; 26 | 27 | return ( 28 |
29 | {/* Language Selector */} 30 | 45 | 46 | {/* Menu Items */} 47 | {menuItems.map((item) => ( 48 | 52 | {item.name} 53 | 54 | ))} 55 |
56 | ); 57 | } 58 | -------------------------------------------------------------------------------- /fresh.gen.ts: -------------------------------------------------------------------------------- 1 | // DO NOT EDIT. This file is generated by Fresh. 2 | // This file SHOULD be checked into source version control. 3 | // This file is automatically updated during development when running `dev.ts`. 4 | 5 | import * as $_404 from "./routes/_404.tsx"; 6 | import * as $_app from "./routes/_app.tsx"; 7 | import * as $about from "./routes/about.tsx"; 8 | import * as $api_bildungsplan from "./routes/api/bildungsplan.ts"; 9 | import * as $api_chat from "./routes/api/chat.ts"; 10 | import * as $api_papers from "./routes/api/papers.ts"; 11 | import * as $api_stt from "./routes/api/stt.ts"; 12 | import * as $api_tts from "./routes/api/tts.ts"; 13 | import * as $api_wikipedia from "./routes/api/wikipedia.ts"; 14 | import * as $index from "./routes/index.tsx"; 15 | import * as $ChatAgreement from "./islands/ChatAgreement.tsx"; 16 | import * as $ChatAgreementOrIsland from "./islands/ChatAgreementOrIsland.tsx"; 17 | import * as $ChatIsland from "./islands/ChatIsland.tsx"; 18 | import * as $Header from "./islands/Header.tsx"; 19 | import * as $Menu from "./islands/Menu.tsx"; 20 | import type { Manifest } from "$fresh/server.ts"; 21 | 22 | const manifest = { 23 | routes: { 24 | "./routes/_404.tsx": $_404, 25 | "./routes/_app.tsx": $_app, 26 | "./routes/about.tsx": $about, 27 | "./routes/api/bildungsplan.ts": $api_bildungsplan, 28 | "./routes/api/chat.ts": $api_chat, 29 | "./routes/api/papers.ts": $api_papers, 30 | "./routes/api/stt.ts": $api_stt, 31 | "./routes/api/tts.ts": $api_tts, 32 | "./routes/api/wikipedia.ts": $api_wikipedia, 33 | "./routes/index.tsx": $index, 34 | }, 35 | islands: { 36 | "./islands/ChatAgreement.tsx": $ChatAgreement, 37 | "./islands/ChatAgreementOrIsland.tsx": $ChatAgreementOrIsland, 38 | "./islands/ChatIsland.tsx": $ChatIsland, 39 | "./islands/Header.tsx": $Header, 40 | "./islands/Menu.tsx": $Menu, 41 | }, 42 | baseUrl: import.meta.url, 43 | } satisfies Manifest; 44 | 45 | export default manifest; 46 | -------------------------------------------------------------------------------- /islands/ChatAgreement.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from "preact/hooks"; 2 | 3 | import { agreementContent } from "../internalization/content.ts"; 4 | 5 | interface ChatAgreementProps { 6 | lang: string; 7 | } 8 | 9 | export default function ChatAgreement({ lang }: ChatAgreementProps) { 10 | const [agreed, setAgreed] = useState(false); 11 | 12 | const handleAgree = () => { 13 | localStorage.setItem("school-bud-e-agreement", "true"); 14 | globalThis.location.reload(); 15 | }; 16 | 17 | // title: "Welcome to School Bud-E!", 18 | // content: 19 | // "Please read and accept the following terms and conditions to continue using School Bud-E.", 20 | // accept: "Accept", 21 | // terms: "Terms and Conditions", 22 | // temsAndConditionsContent: "Placeholder Terms and Conditions", 23 | 24 | return ( 25 |
26 |

{agreementContent[lang].title}

27 |

{agreementContent[lang].content}

28 |

29 | 30 | {agreementContent[lang].terms} 31 | 32 |

33 |

37 |

38 | 47 |
48 | 57 |
58 | ); 59 | } 60 | -------------------------------------------------------------------------------- /routes/api/stt.ts: -------------------------------------------------------------------------------- 1 | import { Handlers } from "$fresh/server.ts"; 2 | 3 | const STT_KEY = Deno.env.get("STT_KEY") || ""; 4 | const STT_MODEL = Deno.env.get("STT_MODEL") || ""; 5 | const STT_URL = Deno.env.get("STT_URL") || ""; 6 | 7 | export const handler: Handlers = { 8 | async POST(req) { 9 | try { 10 | const formData = await req.formData(); 11 | const audioFile = formData.get("audio") as File; 12 | let sttUrl = formData.get("sttUrl") as string || STT_URL; 13 | const sttKey = formData.get("sttKey") as string || STT_KEY; 14 | let sttModel = formData.get("sttModel") as string || STT_MODEL; 15 | 16 | if (sttKey.startsWith("gsk_")) { 17 | sttUrl = sttUrl == "" ? "https://api.groq.com/openai/v1/audio/transcriptions" : sttUrl; 18 | sttModel = sttModel == "" ? "whisper-large-v3-turbo" : sttModel; 19 | } 20 | 21 | if (!audioFile) { 22 | return new Response("No audio file uploaded", { status: 400 }); 23 | } 24 | 25 | if (!sttKey) { 26 | return new Response("Missing STT API key", { status: 400 }); 27 | } 28 | 29 | // Create new FormData for the STT API request 30 | const sttFormData = new FormData(); 31 | sttFormData.append("file", audioFile); 32 | sttFormData.append("model", sttModel); 33 | 34 | // Make the fetch request to STT API 35 | const response = await fetch(sttUrl, { 36 | method: "POST", 37 | headers: { 38 | "Authorization": `Bearer ${sttKey}`, 39 | }, 40 | body: sttFormData, 41 | }); 42 | 43 | if (!response.ok) { 44 | throw new Error(`STT API responded with status: ${response.status}`); 45 | } 46 | 47 | const transcription = await response.json(); 48 | 49 | return new Response(transcription.text, { 50 | status: 200, 51 | headers: { "Content-Type": "application/text" }, 52 | }); 53 | } catch (error) { 54 | console.error("Error transcribing audio file:", error); 55 | return new Response("Internal Server Error", { status: 500 }); 56 | } 57 | }, 58 | }; 59 | -------------------------------------------------------------------------------- /routes/api/wikipedia.ts: -------------------------------------------------------------------------------- 1 | import { Handlers } from "$fresh/server.ts"; 2 | 3 | const WIKIPEDIA_API_URL = "http://37.27.128.150:9999/search"; 4 | 5 | function getErrorMessage(error: unknown): string { 6 | if (error instanceof Error) return error.message; 7 | return String(error); 8 | } 9 | 10 | export const handler: Handlers = { 11 | async GET(req: Request) { 12 | try { 13 | const url = new URL(req.url); 14 | const text = url.searchParams.get("text"); 15 | const collection = url.searchParams.get("collection") || "English-ConcatX-Abstract"; 16 | const n = parseInt(url.searchParams.get("n") || "2", 10); 17 | 18 | if (!text) { 19 | throw new Error("Text parameter is required"); 20 | } 21 | 22 | const response = await fetch(WIKIPEDIA_API_URL, { 23 | method: "POST", 24 | headers: { "Content-Type": "application/json" }, 25 | body: JSON.stringify({ text, collection, n }), 26 | }); 27 | 28 | if (!response.ok) { 29 | throw new Error(`HTTP error! status: ${response.status}`); 30 | } 31 | 32 | const data = await response.json(); 33 | return new Response(JSON.stringify(data), { 34 | headers: { "Content-Type": "application/json" }, 35 | }); 36 | } catch (error) { 37 | console.error("Error in wikipedia API:", error); 38 | return new Response(JSON.stringify({ error: getErrorMessage(error) }), { 39 | status: 500, 40 | headers: { "Content-Type": "application/json" }, 41 | }); 42 | } 43 | }, 44 | 45 | async POST(req: Request) { 46 | try { 47 | const payload = await req.json(); 48 | if (!payload.text) { 49 | throw new Error("Text parameter is required"); 50 | } 51 | 52 | console.log("Payload:", payload); 53 | 54 | const response = await fetch(WIKIPEDIA_API_URL, { 55 | method: "POST", 56 | headers: { "Content-Type": "application/json" }, 57 | body: JSON.stringify({ 58 | text: payload.text, 59 | collection: payload.collection || "English-ConcatX-Abstract", 60 | n: payload.n || 2, 61 | }), 62 | }); 63 | 64 | if (!response.ok) { 65 | throw new Error(`HTTP error! status: ${response.status}`); 66 | } 67 | 68 | const data = await response.json(); 69 | return new Response(JSON.stringify(data), { 70 | headers: { "Content-Type": "application/json" }, 71 | }); 72 | } catch (error) { 73 | console.error("Error in wikipedia API:", error); 74 | return new Response(JSON.stringify({ error: getErrorMessage(error) }), { 75 | status: 500, 76 | headers: { "Content-Type": "application/json" }, 77 | }); 78 | } 79 | }, 80 | }; -------------------------------------------------------------------------------- /routes/api/bildungsplan.ts: -------------------------------------------------------------------------------- 1 | import { Handlers } from "$fresh/server.ts"; 2 | 3 | const BILDUNGSPLAN_API_URL = "http://213.173.96.19:8020/query"; 4 | 5 | function getErrorMessage(error: unknown): string { 6 | if (error instanceof Error) return error.message; 7 | return String(error); 8 | } 9 | 10 | export const handler: Handlers = { 11 | async GET(req: Request) { 12 | try { 13 | const url = new URL(req.url); 14 | const query = url.searchParams.get("query"); 15 | const top_n = parseInt(url.searchParams.get("top_n") || "5", 10); 16 | 17 | if (!query) { 18 | throw new Error("Query parameter is required"); 19 | } 20 | 21 | const response = await fetch(BILDUNGSPLAN_API_URL, { 22 | method: "POST", 23 | headers: { 24 | "Content-Type": "application/json", 25 | }, 26 | body: JSON.stringify({ 27 | query, 28 | top_n, 29 | }), 30 | }); 31 | 32 | if (!response.ok) { 33 | throw new Error(`HTTP error! status: ${response.status}`); 34 | } 35 | 36 | const data = await response.json() as BildungsplanResponse; 37 | 38 | return new Response(JSON.stringify(data), { 39 | headers: { "Content-Type": "application/json" }, 40 | }); 41 | } catch (error) { 42 | console.error("Error in bildungsplan API:", error); 43 | return new Response(JSON.stringify({ error: getErrorMessage(error) }), { 44 | status: 500, 45 | headers: { "Content-Type": "application/json" }, 46 | }); 47 | } 48 | }, 49 | 50 | async POST(req: Request) { 51 | try { 52 | const payload = await req.json() as BildungsplanQuery; 53 | 54 | if (!payload.query) { 55 | throw new Error("Query parameter is required"); 56 | } 57 | 58 | const response = await fetch(BILDUNGSPLAN_API_URL, { 59 | method: "POST", 60 | headers: { 61 | "Content-Type": "application/json", 62 | }, 63 | body: JSON.stringify({ 64 | query: payload.query, 65 | top_n: payload.top_n || 5, 66 | }), 67 | }); 68 | 69 | if (!response.ok) { 70 | throw new Error(`HTTP error! status: ${response.status}`); 71 | } 72 | 73 | const data = await response.json() as BildungsplanResponse; 74 | 75 | return new Response(JSON.stringify(data), { 76 | headers: { "Content-Type": "application/json" }, 77 | }); 78 | } catch (error) { 79 | console.error("Error in bildungsplan API:", error); 80 | return new Response(JSON.stringify({ error: getErrorMessage(error) }), { 81 | status: 500, 82 | headers: { "Content-Type": "application/json" }, 83 | }); 84 | } 85 | }, 86 | }; 87 | -------------------------------------------------------------------------------- /routes/about.tsx: -------------------------------------------------------------------------------- 1 | import { aboutContent } from "../internalization/content.ts"; 2 | 3 | export default function About(req: Request) { 4 | const url = new URL(req.url); 5 | const lang = (url.searchParams.get("lang") as string !== undefined && 6 | url.searchParams.get("lang") !== null 7 | ? url.searchParams.get("lang") 8 | : "de") as string; 9 | 10 | return ( 11 |
12 |
13 |

14 | {aboutContent[lang]["title"]} 15 |

16 |

17 | {aboutContent[lang]["partOneOne"]}{" "} 18 | 19 | LAION 20 | {" "} 21 | {aboutContent[lang]["partOneTwo"]} 22 |

23 | 24 |

25 | {aboutContent[lang]["headingOne"]} 26 |

27 |

28 | {aboutContent[lang]["partTwoOne"]} 29 |

    30 |
  • 31 | {aboutContent[lang]["partTwoTwo"]} 32 |
  • 33 |
  • 34 | {aboutContent[lang]["partTwoThree"]} 35 |
  • 36 |
  • 37 | {aboutContent[lang]["partTwoFour"]} 38 |
  • 39 |
  • 40 | {aboutContent[lang]["partTwoFive"]} 41 |
  • 42 |
  • 43 | {aboutContent[lang]["partTwoSix"]} 44 |
  • 45 |
46 |

47 | 48 |

49 | {aboutContent[lang]["headingTwo"]} 50 |

51 |

52 | {aboutContent[lang]["partThreeOne"]} 53 |

54 | 55 |

56 | {aboutContent[lang]["partThreeTwo"]} 57 |

58 | 59 |

60 | {aboutContent[lang]["headingThree"]} 61 |

62 |

63 | {aboutContent[lang]["partFourOne"]} 64 |

65 | 66 |

67 | {aboutContent[lang]["partFourTwo"]}{" "} 68 | 72 | Discord Server 73 | {" "} 74 | {aboutContent[lang]["partFourThree"]}{" "} 75 | 79 | contact@laion.ai 80 | . 81 |

82 |
83 |
84 | ); 85 | } 86 | -------------------------------------------------------------------------------- /routes/api/papers.ts: -------------------------------------------------------------------------------- 1 | import { Handlers } from "$fresh/server.ts"; 2 | 3 | const PAPERS_API_URL = "https://api.ask.orkg.org/index/search"; 4 | 5 | function getErrorMessage(error: unknown): string { 6 | if (error instanceof Error) return error.message; 7 | return String(error); 8 | } 9 | 10 | export const handler: Handlers = { 11 | async GET(req: Request) { 12 | try { 13 | const url = new URL(req.url); 14 | const query = url.searchParams.get("query"); 15 | const limit = parseInt(url.searchParams.get("limit") || "5", 10); 16 | 17 | if (!query) { 18 | throw new Error("Query parameter is required"); 19 | } 20 | 21 | const response = await fetch( 22 | `${PAPERS_API_URL}?query=${encodeURIComponent(query)}&limit=${limit}`, 23 | { 24 | headers: { "accept": "application/json" }, 25 | }, 26 | ); 27 | 28 | if (!response.ok) { 29 | throw new Error(`HTTP error! status: ${response.status}`); 30 | } 31 | 32 | const data = await response.json(); 33 | return new Response(JSON.stringify(data), { 34 | headers: { "Content-Type": "application/json" }, 35 | }); 36 | } catch (error) { 37 | console.error("Error in papers API:", error); 38 | return new Response(JSON.stringify({ error: getErrorMessage(error) }), { 39 | status: 500, 40 | headers: { "Content-Type": "application/json" }, 41 | }); 42 | } 43 | }, 44 | 45 | async POST(req: Request) { 46 | try { 47 | const payload = await req.json(); 48 | if (!payload.query) { 49 | throw new Error("Query parameter is required"); 50 | } 51 | 52 | const top_n = payload.limit || 5; 53 | 54 | const response = await fetch( 55 | `${PAPERS_API_URL}?query=${encodeURIComponent(payload.query)}&limit=${ 56 | top_n * 4 57 | }`, 58 | { 59 | headers: { "accept": "application/json" }, 60 | }, 61 | ); 62 | 63 | if (!response.ok) { 64 | throw new Error(`HTTP error! status: ${response.status}`); 65 | } 66 | 67 | const data = await response.json(); 68 | 69 | if (!data.payload) { 70 | throw new Error("Invalid response from papers API"); 71 | } 72 | 73 | data.payload.items = data.payload.items.filter((item: PapersItem) => { 74 | return item.abstract && item.title && item.doi && item.date_published; 75 | }); 76 | 77 | data.payload.items = data.payload.items.slice(0, top_n); 78 | 79 | return new Response(JSON.stringify(data), { 80 | headers: { "Content-Type": "application/json" }, 81 | }); 82 | } catch (error) { 83 | console.error("Error in papers API:", error); 84 | return new Response(JSON.stringify({ error: getErrorMessage(error) }), { 85 | status: 500, 86 | headers: { "Content-Type": "application/json" }, 87 | }); 88 | } 89 | }, 90 | }; 91 | -------------------------------------------------------------------------------- /docker-webhook/webhook/webhook.ts: -------------------------------------------------------------------------------- 1 | import { crypto } from "https://deno.land/std/crypto/mod.ts"; 2 | 3 | const WEBHOOK_SECRET = Deno.env.get("WEBHOOK_SECRET"); 4 | 5 | async function verifyGitHubSignature(payload: string, signature: string): Promise { 6 | if (!WEBHOOK_SECRET || !signature) return false; 7 | 8 | console.log('Verifying signature...'); 9 | 10 | const key = new TextEncoder().encode(WEBHOOK_SECRET); 11 | const message = new TextEncoder().encode(payload); 12 | 13 | const hmacKey = await crypto.subtle.importKey( 14 | "raw", 15 | key, 16 | { name: "HMAC", hash: "SHA-256" }, 17 | false, 18 | ["sign"] 19 | ); 20 | 21 | const signed = await crypto.subtle.sign("HMAC", hmacKey, message); 22 | const hash = Array.from(new Uint8Array(signed)) 23 | .map(b => b.toString(16).padStart(2, '0')) 24 | .join(''); 25 | 26 | return `sha256=${hash}` === signature; 27 | } 28 | 29 | function parseFormUrlEncoded(formData: string): Record { 30 | const params: Record = {}; 31 | const pairs = formData.split('&'); 32 | 33 | for (const pair of pairs) { 34 | const [key, value] = pair.split('='); 35 | if (key && value) { 36 | params[decodeURIComponent(key)] = decodeURIComponent(value); 37 | } 38 | } 39 | return params; 40 | } 41 | 42 | const server = Deno.listen({ port: 9000 }); 43 | 44 | async function handleWebhook(req: Request) { 45 | if (req.method === 'POST') { 46 | const signature = req.headers.get('X-Hub-Signature-256'); 47 | const payload = await req.text(); 48 | 49 | if (!await verifyGitHubSignature(payload, signature || '')) { 50 | return new Response('Invalid signature', { status: 403 }); 51 | } 52 | 53 | const formData = parseFormUrlEncoded(payload); 54 | const body = JSON.parse(formData.payload || '{}'); 55 | 56 | if (body.ref === 'refs/heads/main') { 57 | console.log('Main branch updated, rebuilding labeled services...'); 58 | 59 | try { 60 | const rebuild = new Deno.Command('/usr/local/bin/docker', { 61 | args: ['restart', 'school-bud-e-frontend'], 62 | cwd: '/app', 63 | stdout: "piped", 64 | stderr: "piped", 65 | }); 66 | 67 | const output = await rebuild.output(); 68 | console.log('Rebuild output:', new TextDecoder().decode(output.stdout)); 69 | if (output.stderr.length > 0) { 70 | console.error('Rebuild stderr:', new TextDecoder().decode(output.stderr)); 71 | } 72 | 73 | if (output.code === 0) { 74 | return new Response('Rebuild triggered successfully', { status: 200 }); 75 | } else { 76 | return new Response('Rebuild failed', { status: 500 }); 77 | } 78 | } catch (error) { 79 | console.error('Rebuild error:', error); 80 | return new Response('Rebuild error', { status: 500 }); 81 | } 82 | } 83 | } 84 | return new Response('OK', { status: 200 }); 85 | } 86 | 87 | console.log('Webhook server running on port 9000'); 88 | for await (const conn of server) { 89 | (async () => { 90 | const httpConn = Deno.serveHttp(conn); 91 | for await (const requestEvent of httpConn) { 92 | requestEvent.respondWith(handleWebhook(requestEvent.request)); 93 | } 94 | })(); 95 | } -------------------------------------------------------------------------------- /components/ImageUploadButton.tsx: -------------------------------------------------------------------------------- 1 | import { useRef, useState } from "preact/hooks"; 2 | import { IS_BROWSER } from "$fresh/runtime.ts"; 3 | 4 | function ImageUploadButton({ 5 | onImagesUploaded, 6 | }: { 7 | onImagesUploaded: (images: Image[]) => void; 8 | }) { 9 | // deno-lint-ignore no-explicit-any 10 | const [previewImages, setPreviewImages] = useState([]); 11 | const [imageFiles, _setImageFiles] = useState([]); 12 | const fileInputRef = useRef(null); 13 | 14 | const onButtonClick = () => { 15 | if (fileInputRef.current) { 16 | fileInputRef.current.click(); 17 | } 18 | }; 19 | 20 | // deno-lint-ignore no-explicit-any 21 | const handleImageUpload = (event: any) => { 22 | const files = Array.from(event.target.files); 23 | const newImages = files.map((file) => ({ 24 | file, 25 | preview: URL.createObjectURL(file as Blob), 26 | })); 27 | const previousImages = previewImages; 28 | setPreviewImages([...previousImages, ...newImages]); 29 | 30 | // deno-lint-ignore no-explicit-any 31 | const newPreviewImages: any[] = []; 32 | const promises = files.map((file) => { 33 | return new Promise((resolve) => { 34 | const FR = new FileReader(); 35 | 36 | FR.addEventListener("load", (e) => { 37 | const data_url = e.target!.result; 38 | // const type = data_url.split(";")[0].split(":")[1]; 39 | const imageObject = { 40 | type: "image_url", 41 | image_url: { 42 | url: data_url, 43 | detail: "high", 44 | }, 45 | }; 46 | 47 | newPreviewImages.push(imageObject); 48 | resolve(); 49 | }); 50 | 51 | FR.readAsDataURL(file as Blob); 52 | }); 53 | }); 54 | 55 | Promise.all(promises).then(() => { 56 | // All files have been processed and newImages is ready for postprocessing 57 | console.log("All files processed", newPreviewImages); 58 | const finalImages = [...imageFiles, ...newPreviewImages]; 59 | onImagesUploaded(finalImages); 60 | }); 61 | }; 62 | 63 | return ( 64 | <> 65 | 73 | 99 | 100 | ); 101 | } 102 | 103 | export default ImageUploadButton; 104 | -------------------------------------------------------------------------------- /types.d.ts: -------------------------------------------------------------------------------- 1 | // https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition#events 2 | interface SpeechRecognitionEventMap { 3 | audioend: Event; 4 | audiostart: Event; 5 | end: Event; 6 | error: SpeechRecognitionErrorEvent; 7 | nomatch: SpeechRecognitionEvent; 8 | result: SpeechRecognitionEvent; 9 | soundend: Event; 10 | soundstart: Event; 11 | speechend: Event; 12 | speechstart: Event; 13 | start: Event; 14 | } 15 | 16 | interface WikipediaQuery { 17 | text: string; 18 | collection?: string; 19 | n?: number; 20 | } 21 | 22 | interface WikipediaResult { 23 | Title: string; 24 | content: string; 25 | URL: string; 26 | score: string; 27 | } 28 | 29 | interface PapersQuery { 30 | query: string; 31 | limit?: number; 32 | } 33 | 34 | interface PapersResponse { 35 | payload: PapersPayload; 36 | } 37 | 38 | interface PapersPayload { 39 | items: PapersItem[]; 40 | total_hits: number; 41 | has_more: boolean; 42 | } 43 | 44 | interface PapersItem { 45 | id: string; 46 | doi: string; 47 | date_published: string; 48 | title: string; 49 | abstract: string; 50 | authors: string[]; 51 | subjects: string[]; 52 | } 53 | 54 | interface BildungsplanQuery { 55 | query: string; 56 | top_n: number; 57 | } 58 | 59 | interface BildungsplanResult { 60 | score: number; 61 | text: string; 62 | } 63 | 64 | interface BildungsplanResponse { 65 | results: BildungsplanResult[]; 66 | } 67 | 68 | // Adding a construct signature 69 | interface SpeechRecognitionConstructor { 70 | new (): SpeechRecognition; 71 | } 72 | 73 | interface RetriableError extends Error {} 74 | interface FatalError extends Error {} 75 | 76 | // https://wicg.github.io/speech-api/#speechreco-section 77 | interface SpeechRecognition extends EventTarget { 78 | continuous: boolean; 79 | interimResults: boolean; 80 | lang: string; 81 | maxAlternatives: number; 82 | abort(): void; 83 | start(): void; 84 | stop(): void; 85 | // deno-lint-ignore no-explicit-any 86 | onend: ((this: SpeechRecognition, ev: Event) => any) | null; 87 | } 88 | 89 | // https://wicg.github.io/speech-api/#speechrecognitionevent 90 | interface SpeechRecognitionEventInit extends EventInit { 91 | resultIndex?: number; 92 | results: SpeechRecognitionResultList; 93 | } 94 | 95 | // https://wicg.github.io/speech-api/#dictdef-speechrecognitioneventinit 96 | interface SpeechRecognitionEvent extends Event { 97 | readonly resultIndex: number; 98 | readonly results: SpeechRecognitionResultList; 99 | } 100 | 101 | // https://wicg.github.io/speech-api/#enumdef-speechrecognitionerrorcode 102 | type SpeechRecognitionErrorCode = 103 | | "aborted" 104 | | "audio-capture" 105 | | "bad-grammar" 106 | | "language-not-supported" 107 | | "network" 108 | | "no-speech" 109 | | "not-allowed" 110 | | "service-not-allowed"; 111 | 112 | // https://wicg.github.io/speech-api/#speechrecognitionerrorevent 113 | interface SpeechRecognitionErrorEvent extends Event { 114 | readonly error: SpeechRecognitionErrorCode; 115 | readonly message: string; 116 | } 117 | 118 | interface Window { 119 | SpeechRecognition: SpeechRecognitionConstructor; 120 | webkitSpeechRecognition: SpeechRecognitionConstructor; 121 | } 122 | 123 | interface Image { 124 | type: string; 125 | image_url: { 126 | url: string; 127 | detail: string; 128 | }; 129 | preview: string; 130 | } 131 | 132 | interface TextEvent { 133 | readonly data: string; 134 | } 135 | 136 | interface Message { 137 | role: string; 138 | content: string | string[]; 139 | } 140 | 141 | interface Image { 142 | type: string; 143 | image_url: { 144 | url: string; 145 | detail: string; 146 | }; 147 | preview: string; 148 | } 149 | 150 | // interface AudioFileDict { 151 | // [key: string]: HTMLAudioElement[]; 152 | // } 153 | 154 | // const [audioFileDict, setAudioFileDict] = useState< 155 | // Record> 156 | // >({}); 157 | 158 | // interface AudioFileDict { 159 | // [key: string]: Record; 160 | // } 161 | 162 | interface AudioItem { 163 | audio: HTMLAudioElement; 164 | played: boolean; 165 | } 166 | 167 | interface AudioFileDict { 168 | [groupIndex: number]: Record; 169 | } 170 | 171 | interface HeaderContent { 172 | [key: string]: { 173 | [key: string]: string; 174 | }; 175 | } 176 | 177 | interface InternalizationContent { 178 | [key: string]: { 179 | [key: string]: string; 180 | }; 181 | } 182 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # School Bud-E 🎓🤖 2 | 3 | ![School Bud-E Banner](banner.png) 4 | 5 | Welcome to School Bud-E, your AI-powered educational assistant! 🚀 6 | 7 | [![Join us on Discord](https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white)](https://discord.gg/xBPBXfcFHd) 8 | 9 | ## 🌟 Overview 10 | 11 | School Bud-E is an intelligent and empathetic learning assistant designed to revolutionize the educational experience. Developed by [LAION](https://laion.ai) in collaboration with the ELLIS Institute Tübingen, Collabora, the Tübingen AI Center and the German Research Center for Artificial Intelligence (DFKI), and Intel, School Bud-E focuses on empathy, natural interaction, and personalized learning. A working demo of the application is available at [school.bud-e.ai](https://school.bud-e.ai). 12 | 13 | ## 🚀 Features (WIP) 14 | 15 | - 💬 Real-time responses to student queries 16 | - 🧠 Emotionally intelligent interactions 17 | - 🔄 Continuous conversation context 18 | - 👥 Multi-speaker and multi-language support 19 | - 🖥️ Local operation on consumer-grade hardware 20 | - 🔒 Privacy-focused design 21 | 22 | ## 🛠️ Technology Stack 23 | 24 | - **Frontend**: Fresh framework (Preact-based) 25 | - **Styling**: Tailwind CSS 26 | - **Language Support**: Internationalization for English and German 27 | - **AI Models**: 28 | - Speech-to-Text: Whisper Large V3 (via Groq API) 29 | - Large Language Model: GPT-4o or equivalent 30 | 31 | ## 🏗️ Project Structure 32 | 33 | - `routes/`: Application routes 34 | - `components/`: Reusable UI components 35 | - `islands/`: Interactive components (Fresh islands) 36 | - `internalization/`: Language-specific content 37 | - `static/`: Static assets 38 | 39 | ## 🚀 Getting Started: Development 40 | 41 | 1. Clone the repository: 42 | 43 | ```bash 44 | git clone https://github.com/LAION-AI/school-bud-e-frontend.git 45 | ``` 46 | 47 | 2. Set up environment variables: 48 | - Copy `.example.env` to `.env` 49 | - Fill in the required API keys and endpoints 50 | 51 | 3. Run the development server: 52 | 53 | ```bash 54 | cd school-bud-e-frontend 55 | deno task start 56 | ``` 57 | 58 | 4. Open `http://localhost:8000` in your browser 59 | 60 | ## 🚀 Getting Started: Production 61 | 62 | 1. Without docker 63 | 64 | ```bash 65 | deno task build 66 | deno task preview 67 | ``` 68 | 69 | 2. With docker 70 | 71 | ```bash 72 | git clone https://github.com/LAION-AI/school-bud-e-frontend.git 73 | cd school-bud-e-frontend 74 | cd docker-compose 75 | nano .env # Adjust environment variables accordingly 76 | docker-compose up 77 | ``` 78 | 79 | Then log into localhost:8000 in your browser. 80 | 81 | ## Interaction Between API Routes and Chat Components 82 | 83 | This section describes how the various API routes and chat components interact within the application. 84 | 85 | ### API Routes 86 | 87 | - **`tts.ts`**: 88 | - **Description**: Handles Text-to-Speech (TTS) requests. It receives text input and returns an audio response. 89 | - **Endpoint**: `/api/tts` 90 | - **Example Usage**: Fetching audio data for a given text input. 91 | 92 | - **`chat.ts`**: 93 | - **Description**: Manages chat messages. It processes incoming chat messages and returns appropriate responses. 94 | - **Endpoint**: `/api/chat` 95 | - **Example Usage**: Sending and receiving chat messages. 96 | 97 | - **`getClientId.ts`**: 98 | - **Description**: Provides a unique client ID for each user session. 99 | - **Endpoint**: `/api/getClientId` 100 | - **Example Usage**: Generating a unique identifier for a new chat session. 101 | 102 | ### Chat Components 103 | 104 | - **`ChatIsland.tsx`**: 105 | - **Description**: Responsible for rendering the chat interface. It interacts with the chat API to send and receive messages. 106 | - **Usage**: Uses the client ID obtained from the `getClientId` API to manage user sessions. 107 | - **Example Usage**: Displaying the chat UI and handling user interactions. 108 | 109 | - **`ChatTemplate.tsx`**: 110 | - **Description**: Serves as a template for the chat interface. It defines the layout and structure of the chat UI. 111 | - **Usage**: Used by `ChatIsland.tsx` to render the chat interface consistently. 112 | - **Example Usage**: Providing a consistent layout for the chat interface. 113 | 114 | ### Interaction Flow 115 | 116 | 1. When a user opens the chat interface, `ChatIsland.tsx` requests a unique client ID from the `getClientId` API. 117 | 2. The user sends a chat message through the chat interface rendered by `ChatIsland.tsx`. 118 | 3. `ChatIsland.tsx` sends the message to the chat API endpoint. 119 | 4. The chat API processes the message and returns a **streaming** response. 120 | 5. `ChatIsland.tsx` updates the chat interface with the response reflected in `ChatTemplate.tsx`. 121 | 6. If the user requests a TTS response, `ChatIsland.tsx` sends the text to the `tts` API endpoint. 122 | 7. The `tts` API returns the audio data, which is then played back to the user. 123 | 124 | By following this interaction flow, the application ensures a seamless chat experience for users. 125 | 126 | For more details, refer to the following files: 127 | 128 | - `routes/api/tts.ts` 129 | - `routes/api/chat.ts` 130 | - `routes/api/getClientId.ts` 131 | - `islands/ChatIsland.tsx` 132 | - `components/ChatTemplate.tsx` 133 | 134 | ## 🤝 Contributing 135 | 136 | We welcome contributions to School Bud-E! Please join our [Discord server](https://discord.com/invite/eq3cAMZtCC) or contact us at to get involved. 137 | 138 | ## 🚧 Experimental Demo Version 139 | 140 | Please note that this is an early prototype application that may provide inaccurate answers or generate content that is not suitable for all audiences. We advise caution and encourage you to report any issues you encounter to us. 141 | 142 | ## 📄 License 143 | 144 | This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. 145 | 146 | ## 🙏 Acknowledgements 147 | 148 | Special thanks to LAION, ELLIS Institute Tübingen, Collabora, the Tübingen AI Center and the German Research Center for Artificial Intelligence (DFKI), and Intel for their contributions and support to this project. 149 | 150 | --- 151 | 152 | Built with ❤️ for the future of education. 153 | -------------------------------------------------------------------------------- /components/VoiceRecordButton.tsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useRef, useState } from "preact/hooks"; 2 | import { IS_BROWSER } from "$fresh/runtime.ts"; 3 | 4 | /** 5 | * VoiceRecordButton component. 6 | * 7 | * @component 8 | * @param {Object} props - The component props. 9 | * @param {Function} props.onFinishRecording - Callback function called when recording is finished. It receives the transcript as a parameter. 10 | * @param {Function} props.onInterimTranscript - Callback function called when interim transcript is available. It receives the interim transcript as a parameter. 11 | * @param {number} props.resetTranscript - A number used to trigger a reset of the transcript. 12 | * @param {string} props.sttUrl - The URL for the speech-to-text service. 13 | * @param {string} props.sttKey - The API key for the speech-to-text service. 14 | * @param {string} props.sttModel - The model to use for speech-to-text conversion. 15 | * @returns {JSX.Element} The VoiceRecordButton component. 16 | */ 17 | function VoiceRecordButton({ 18 | onFinishRecording, 19 | onInterimTranscript, 20 | resetTranscript, 21 | sttUrl, 22 | sttKey, 23 | sttModel, 24 | }: { 25 | onFinishRecording: (transcript: string) => void; 26 | onInterimTranscript: (transcript: string) => void; 27 | resetTranscript: number; 28 | sttUrl: string; 29 | sttKey: string; 30 | sttModel: string; 31 | }) { 32 | const [isRecording, setIsRecording] = useState(false); 33 | const mediaRecorderRef = useRef(null); 34 | const audioChunksRef = useRef([]); 35 | const recognitionRef = useRef(null); 36 | 37 | // deno-lint-ignore no-explicit-any 38 | (globalThis as any).SpeechRecognition = 39 | // deno-lint-ignore no-explicit-any 40 | (globalThis as any).SpeechRecognition || 41 | // deno-lint-ignore no-explicit-any 42 | (globalThis as any).webkitSpeechRecognition; 43 | 44 | useEffect(() => { 45 | if (resetTranscript > 0) { 46 | console.log("Resetting transcript due to reset signal change."); 47 | } 48 | restartRecording(); 49 | }, [resetTranscript]); 50 | 51 | function restartRecording() { 52 | if (recognitionRef.current) { 53 | recognitionRef.current.onend = null; 54 | recognitionRef.current.stop(); 55 | // deno-lint-ignore no-explicit-any 56 | recognitionRef.current = new (globalThis as any).SpeechRecognition(); 57 | (recognitionRef.current as SpeechRecognition).continuous = false; 58 | (recognitionRef.current as SpeechRecognition).lang = "de-DE"; 59 | (recognitionRef.current as SpeechRecognition).interimResults = true; 60 | (recognitionRef.current as SpeechRecognition).onend = onEnd; 61 | (recognitionRef.current as SpeechRecognition).addEventListener( 62 | "result", 63 | onSpeak, 64 | ); 65 | (recognitionRef.current as SpeechRecognition).start(); 66 | setIsRecording(true); 67 | } 68 | } 69 | 70 | async function toggleRecording() { 71 | if (isRecording) { 72 | // Stop recording 73 | mediaRecorderRef.current?.stop(); 74 | setIsRecording(false); 75 | } else { 76 | // Start recording 77 | const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); 78 | const mediaRecorder = new MediaRecorder(stream); 79 | mediaRecorderRef.current = mediaRecorder; 80 | 81 | mediaRecorder.ondataavailable = (event) => { 82 | audioChunksRef.current.push(event.data); 83 | }; 84 | 85 | mediaRecorder.onstop = async () => { 86 | const audioBlob = new Blob(audioChunksRef.current, { 87 | type: "audio/wav", 88 | }); 89 | audioChunksRef.current = []; 90 | await sendAudioToServer(audioBlob); 91 | }; 92 | 93 | mediaRecorder.start(); 94 | setIsRecording(true); 95 | } 96 | } 97 | 98 | const sendAudioToServer = async (audioBlob: Blob) => { 99 | const formData = new FormData(); 100 | formData.append("audio", audioBlob, "recording.wav"); 101 | 102 | if (sttKey.startsWith("gsk_")) { 103 | sttUrl = sttUrl == "" ? "https://api.groq.com/openai/v1/audio/transcriptions" : sttUrl; 104 | sttModel = sttModel == "" ? "whisper-large-v3-turbo" : sttModel; 105 | } 106 | 107 | formData.append("sttUrl", sttUrl); 108 | formData.append("sttKey", sttKey); 109 | formData.append("sttModel", sttModel); 110 | 111 | try { 112 | const response = await fetch("/api/stt", { 113 | method: "POST", 114 | body: formData, 115 | }); 116 | 117 | if (response.ok) { 118 | console.log("Audio uploaded successfully"); 119 | const text = await response.text(); 120 | console.log("Text from VoiceRecordButton:", text); 121 | onFinishRecording(text); 122 | } else { 123 | console.error("Failed to upload audio"); 124 | } 125 | } catch (error) { 126 | console.error("Error uploading audio:", error); 127 | } 128 | }; 129 | 130 | function onEnd() { 131 | console.log("Speech recognition has stopped. Starting again ..."); 132 | setIsRecording(false); 133 | // restartRecording(); 134 | } 135 | 136 | const prependToTranscript = ""; 137 | // deno-lint-ignore no-explicit-any 138 | function onSpeak(event: any) { 139 | // console.log(resetTranscript); 140 | let interimTranscript = ""; 141 | for (let i = event.resultIndex; i < event.results.length; ++i) { 142 | if (event.results[i].isFinal) { 143 | console.log("Final transcript: ", event.results[i][0].transcript); 144 | interimTranscript = event.results[i][0].transcript; 145 | } else { 146 | interimTranscript += event.results[i][0].transcript; 147 | } 148 | } 149 | // Here, you call onInterimTranscript with the interimTranscript 150 | if (interimTranscript) { 151 | console.log("Interim transcript: ", prependToTranscript); 152 | onInterimTranscript(prependToTranscript + interimTranscript); 153 | } 154 | } 155 | 156 | return ( 157 | 184 | ); 185 | } 186 | 187 | export default VoiceRecordButton; 188 | -------------------------------------------------------------------------------- /routes/api/tts.ts: -------------------------------------------------------------------------------- 1 | import { Handlers } from "$fresh/server.ts"; 2 | import { Buffer } from "npm:buffer"; 3 | 4 | const TTS_KEY = Deno.env.get("TTS_KEY") || ""; 5 | const TTS_URL = Deno.env.get("TTS_URL") || ""; 6 | const TTS_MODEL = Deno.env.get("TTS_MODEL") || ""; 7 | 8 | async function callMARS6API( 9 | text: string, 10 | ttsUrl: string, 11 | ttsKey: string, 12 | ) { 13 | async function createTTSTask( 14 | ttsUrl: string, 15 | ttsKey: string, 16 | voiceID: number = 20299, 17 | language: number = 1, 18 | ) { 19 | try { 20 | const response = await fetch( 21 | `${ttsUrl}/tts`, 22 | { 23 | method: "POST", 24 | headers: { 25 | "Content-Type": "application/json", 26 | "x-api-key": ttsKey, 27 | }, 28 | body: JSON.stringify({ 29 | text: text, 30 | voice_id: voiceID, 31 | language: language, 32 | }), 33 | }, 34 | ); 35 | const responseJSON = await response.json(); 36 | console.log(`Status code for creating TTS: ${response.status}`); 37 | if (response.ok) { 38 | return responseJSON.task_id; 39 | } else { 40 | console.error( 41 | `Failed to create TTS task for MARS6. Status code: ${response.status}: ${response.statusText}`, 42 | ); 43 | } 44 | } catch (error) { 45 | console.error(`Error in createTTSTask: ${error}`); 46 | } 47 | } 48 | 49 | async function pollTTSTask(ttsUrl: string, ttsKey: string, taskID: string) { 50 | const delay = (ms: number) => 51 | new Promise((resolve) => setTimeout(resolve, ms)); 52 | try { 53 | const response = await fetch(`${ttsUrl}/tts/${taskID}`, { 54 | method: "GET", 55 | headers: { 56 | "Content-Type": "application/json", 57 | "x-api-key": ttsKey, 58 | }, 59 | }); 60 | 61 | const responseJSON = await response.json(); 62 | const status = responseJSON.status; 63 | console.log(`Polling: ${status}`); 64 | 65 | if (status === "SUCCESS") { 66 | return responseJSON.run_id; 67 | } 68 | await delay(1500); // Wait for 1.5 seconds before the next poll. 69 | return pollTTSTask(ttsUrl, ttsKey, taskID); // Recursive call for polling. 70 | } catch (error) { 71 | console.error("Error polling TTS task:", error); 72 | throw error; 73 | } 74 | } 75 | 76 | async function getTTSAudioResult( 77 | ttsUrl: string, 78 | ttsKey: string, 79 | runID: number, 80 | ) { 81 | try { 82 | const response = await fetch( 83 | `${ttsUrl}/tts-result/${runID}`, 84 | { 85 | method: "GET", 86 | headers: { 87 | "x-api-key": ttsKey, 88 | }, 89 | }, 90 | ); 91 | if (response.ok) { 92 | return await response.arrayBuffer(); 93 | } else { 94 | console.error( 95 | `Failed to fetch TTS audio file from MARS6. Status code: ${response.status}: ${response.statusText}`, 96 | ); 97 | } 98 | } catch (error) { 99 | console.error(`Error in fetching TTS audio file from MARS6: ${error}`); 100 | } 101 | } 102 | 103 | try { 104 | const taskID: string = await createTTSTask( 105 | ttsUrl, 106 | ttsKey, 107 | ); 108 | const runID = await pollTTSTask(ttsUrl, ttsKey, taskID); 109 | return await getTTSAudioResult(ttsUrl, ttsKey, runID); 110 | } catch (error) { 111 | console.error(`Failed to call MARS6: ${error}`); 112 | throw error; 113 | } 114 | } 115 | 116 | async function textToSpeech( 117 | text: string, 118 | textPosition: string, 119 | ttsUrl: string, 120 | ttsKey: string, 121 | ttsModel: string, 122 | ): Promise { 123 | const boldTextRegex = /\*\*(.*?)\*\*/g; 124 | text = String(text).replace(boldTextRegex, "$1"); 125 | 126 | const buddyRegex = /bud-e/gi; 127 | text = text.replace(buddyRegex, "buddy"); 128 | 129 | console.log("textToSpeech", text); 130 | console.log("textPosition", textPosition); 131 | console.log("ttsUrl", ttsUrl); 132 | console.log("ttsKey", ttsKey); 133 | console.log("ttsModel", ttsModel); 134 | 135 | const useThisTttsUrl = ttsUrl != "" ? ttsUrl : TTS_URL; 136 | const useThisTtsKey = ttsKey != "" ? ttsKey : TTS_KEY; 137 | const useThisTtsModel = ttsModel != "" ? ttsModel : TTS_MODEL; 138 | 139 | // Deepgram random with 40 chars 140 | // 9371dfaed6d8b42e9eaf9458ba8604126fb373d0 141 | // STT 142 | // curl \ 143 | // -X POST \ 144 | // -H "Authorization: Token 6c4fa34dac9fb4c3aa6bc0421ca805e173e85ed3" \ 145 | // -H "Content-Type: application/json" \ 146 | // -d '{"url":"https://static.deepgram.com/examples/Bueller-Life-moves-pretty-fast.wav"}' \ 147 | // "https://api.deepgram.com/v1/listen?language=en&model=nova-2" 148 | 149 | // TTS 150 | // curl \ 151 | // -X POST \ 152 | // -H "Authorization: Token YOUR_SECRET" \ 153 | // -H "Content-Type: text/plain" \ 154 | // -d "Deepgram is great for real-time conversations… and also, you can build apps for things like customer support, logistics, and more. What do you think of the voices?" \ 155 | // "https://api.deepgram.com/v1/speak?model=aura-helios-en" \ 156 | // -o audio.mp3 157 | 158 | try { 159 | switch (useThisTtsModel) { 160 | case "MARS6": { 161 | const audioData = await callMARS6API( 162 | text, 163 | ttsUrl, 164 | ttsKey, 165 | ); 166 | if (audioData) { 167 | return Buffer.from(audioData); 168 | } else { 169 | console.error(`Failed to synthesize speech.`); 170 | break; 171 | } 172 | } 173 | case "aura-helios-en": { 174 | const startTime = Date.now(); 175 | const response = await fetch(useThisTttsUrl, { 176 | method: "POST", 177 | headers: { 178 | "Content-Type": "text/plain", 179 | "Authorization": `Token ${useThisTtsKey}`, 180 | }, 181 | body: text, 182 | }); 183 | if (response.ok) { 184 | const audioData = await response.arrayBuffer(); 185 | console.log( 186 | `Audio file received for ${textPosition}, Latency:`, 187 | Date.now() - startTime, 188 | ); 189 | return Buffer.from(audioData); 190 | } else { 191 | console.error( 192 | `Failed to synthesize speech. Status code: ${response.status}: ${response.statusText}`, 193 | ); 194 | } 195 | break; 196 | } 197 | default: { 198 | const startTime = Date.now(); 199 | const response = await fetch(useThisTttsUrl, { 200 | method: "POST", 201 | headers: { 202 | "Content-Type": "application/json", 203 | "Authorization": `Bearer ${useThisTtsKey}`, 204 | }, 205 | body: JSON.stringify({ 206 | text: text, 207 | normalize: true, 208 | format: "mp3", 209 | reference_id: useThisTtsModel, 210 | mp3_bitrate: 64, 211 | opus_bitrate: -1000, 212 | latency: "normal", 213 | }), 214 | }); 215 | 216 | if (response.ok) { 217 | const audioData = await response.arrayBuffer(); 218 | console.log( 219 | `Audio file received for ${textPosition}, Latency:`, 220 | Date.now() - startTime, 221 | ); 222 | return Buffer.from(audioData); 223 | } else { 224 | console.error( 225 | `Failed to synthesize speech. Status code: ${response.status}: ${response.statusText}`, 226 | ); 227 | } 228 | } 229 | } 230 | } catch (error) { 231 | console.error(`Error in textToSpeech: ${error}`); 232 | } 233 | return null; 234 | } 235 | 236 | export const handler: Handlers = { 237 | async POST(req) { 238 | const { text, textPosition, ttsUrl, ttsKey, ttsModel } = await req.json(); 239 | // console.log("Text:", text); 240 | 241 | if (!text) { 242 | return new Response("No text provided", { status: 400 }); 243 | } 244 | 245 | const audioData = await textToSpeech( 246 | text, 247 | textPosition, 248 | ttsUrl, 249 | ttsKey, 250 | ttsModel, 251 | ); 252 | 253 | if (audioData) { 254 | const response = new Response(audioData, { 255 | status: 200, 256 | headers: { 257 | "Content-Type": "audio/mp3", // Changed from audio/wav to audio/mp3 258 | }, 259 | }); 260 | return response; 261 | } else { 262 | return new Response("Failed to synthesize speech", { 263 | status: 500, 264 | }); 265 | } 266 | }, 267 | }; 268 | -------------------------------------------------------------------------------- /routes/api/chat.ts: -------------------------------------------------------------------------------- 1 | import { Handlers } from "$fresh/server.ts"; 2 | import { ServerSentEventStream } from "https://deno.land/std@0.210.0/http/server_sent_event_stream.ts"; 3 | 4 | import { chatContent } from "../../internalization/content.ts"; 5 | 6 | // const API_URL = Deno.env.get("API_URL_TOGETHER") || ""; 7 | // const API_KEY = Deno.env.get("API_KEY_TOGETHER") || ""; 8 | // const API_MODEL = "MISTRALAI/MIXTRAL-8X22B-INSTRUCT-V0.1"; 9 | 10 | const API_URL = Deno.env.get("LLM_URL") || ""; 11 | const API_KEY = Deno.env.get("LLM_KEY") || ""; 12 | const API_MODEL = Deno.env.get("LLM_MODEL") || ""; 13 | const API_IMAGE_URL = Deno.env.get("VLM_URL") || ""; 14 | const API_IMAGE_KEY = Deno.env.get("VLM_KEY") || ""; 15 | const API_IMAGE_MODEL = Deno.env.get("VLM_MODEL") || ""; 16 | const API_IMAGE_CORRECTION_MODEL = Deno.env.get("VLM_CORRECTION_MODEL") || ""; 17 | 18 | // const CURRENT_DATETIME = new Date().toISOString(); 19 | // console.log(CURRENT_DATETIME, API_MODEL); 20 | 21 | interface Message { 22 | role: string; 23 | content: string; 24 | } 25 | 26 | async function getModelResponseStream(messages: Message[], lang: string, universalApiKey: string,llmApiUrl: string, llmApiKey: string, llmApiModel: string, systemPrompt: string, vlmApiUrl: string, vlmApiKey: string, vlmApiModel: string, vlmCorrectionModel: string) { 27 | 28 | if (universalApiKey != '' && !universalApiKey.startsWith("sbe-")) { 29 | return new Response("Invalid Universal API Key. It needs to start with '**sbe-**'.", { status: 400 }); 30 | } 31 | 32 | 33 | let isLastMessageAssistant = 34 | messages[messages.length - 1].role === "assistant"; 35 | while (isLastMessageAssistant) { 36 | messages.pop(); 37 | isLastMessageAssistant = messages[messages.length - 1].role === "assistant"; 38 | } 39 | 40 | // check if the LAST message has #correction or #korrektur in the content (case insensitive) 41 | const isCorrectionInLastMessage = hasKorrekturHashtag(messages); 42 | 43 | console.log("isCorrectionInLastMessage", isCorrectionInLastMessage); 44 | 45 | let useThisSystemPrompt = isCorrectionInLastMessage ? chatContent[lang].correctionSystemPrompt : chatContent[lang].systemPrompt; 46 | 47 | if (systemPrompt != "") { 48 | useThisSystemPrompt = systemPrompt; 49 | } 50 | 51 | console.log(useThisSystemPrompt); 52 | 53 | messages.unshift({ 54 | role: "system", 55 | content: useThisSystemPrompt, 56 | }); 57 | 58 | // looks for messages with array content that contains objects with a 'type' property set to 'image_url' 59 | 60 | const isImageInMessages = messages.some((message) => { 61 | if (Array.isArray(message.content)) { 62 | // Check if any item in the array has type "image_url" 63 | return message.content.some((item) => item.type === "image_url"); 64 | } else if ( 65 | typeof message.content === "object" && message.content !== null 66 | ) { 67 | // Check if single object has type "image_url" 68 | return (message.content as { type?: string }).type === "image_url"; 69 | } 70 | return false; 71 | }); 72 | 73 | 74 | let api_url = ""; 75 | let api_key = ""; 76 | let api_model = ""; 77 | if (isImageInMessages) { 78 | api_url = ""; 79 | api_key = ""; 80 | api_model = ""; 81 | } 82 | if (isCorrectionInLastMessage) { 83 | api_model = ""; 84 | } 85 | 86 | if (universalApiKey != '' && universalApiKey.startsWith("sbe-")) { 87 | api_url = llmApiUrl != '' ? llmApiUrl : API_URL; 88 | api_key = llmApiKey != '' ? llmApiKey : API_KEY; 89 | api_model = llmApiModel != '' ? llmApiModel : API_MODEL; 90 | if (isImageInMessages) { 91 | api_url = vlmApiUrl != '' ? vlmApiUrl : API_IMAGE_URL; 92 | api_key = vlmApiKey != '' ? vlmApiKey : API_IMAGE_KEY; 93 | api_model = vlmApiModel != '' ? vlmApiModel : API_IMAGE_MODEL; 94 | } 95 | if (isCorrectionInLastMessage) { 96 | api_model = vlmCorrectionModel != '' ? vlmCorrectionModel : API_IMAGE_CORRECTION_MODEL; 97 | } 98 | } else { 99 | api_url = llmApiUrl != '' ? llmApiUrl : ''; 100 | api_key = llmApiKey != '' ? llmApiKey : ''; 101 | api_model = llmApiModel != '' ? llmApiModel : ''; 102 | if (isImageInMessages) { 103 | api_url = vlmApiUrl != '' ? vlmApiUrl : ''; 104 | api_key = vlmApiKey != '' ? vlmApiKey : ''; 105 | api_model = vlmApiModel != '' ? vlmApiModel : ''; 106 | } 107 | } 108 | 109 | console.log("Using this API URL: ", api_url); 110 | console.log("Using this API Key: ", api_key); 111 | console.log("Using this API Model: ", api_model); 112 | 113 | if (api_url == '' || api_key == '' || api_model == '') { 114 | const missingSettingsText = "The following settings are missing: " + (api_url == '' ? "api_url " : "") + (api_key == '' ? "api_key " : "") + (api_model == '' ? "api_model " : "") + ". The current generation mode is: " + (isImageInMessages ? "VLM" : "LLM") + ". The current correction mode is: " + (isCorrectionInLastMessage ? "Running with correction" : "Running without correction"); 115 | return new Response(missingSettingsText, { status: 400 }); 116 | } 117 | 118 | // let api_url = llmApiUrl != '' ? llmApiUrl : API_URL; 119 | // let api_key = llmApiKey != '' ? llmApiKey : API_KEY; 120 | // let api_model = llmApiModel != '' ? llmApiModel : API_MODEL; 121 | // if (isImageInMessages) { 122 | // api_url = vlmApiUrl != '' ? vlmApiUrl : API_IMAGE_URL; 123 | // api_key = vlmApiKey != '' ? vlmApiKey : API_IMAGE_KEY; 124 | // api_model = vlmApiModel != '' ? vlmApiModel : API_IMAGE_MODEL; 125 | // } 126 | // if (isCorrectionInLastMessage) { 127 | // api_model = vlmCorrectionModel != '' ? vlmCorrectionModel : API_IMAGE_CORRECTION_MODEL; 128 | // } 129 | 130 | 131 | const fetchOptions: RequestInit = { 132 | method: "POST", 133 | headers: { 134 | "Authorization": `Bearer ${api_key}`, 135 | "Content-Type": "application/json", 136 | }, 137 | body: JSON.stringify({ 138 | "messages": messages, 139 | "model": api_model, 140 | "stream": true, 141 | }), 142 | }; 143 | 144 | // console.log("body", { 145 | // "messages": messages, 146 | // "model": API_MODEL, 147 | // "stream": true, 148 | // }); 149 | const response = await fetch(api_url, fetchOptions); 150 | 151 | console.log("response", response); 152 | console.log("response status", response.status); 153 | 154 | if (response.status !== 200) { 155 | // const res = await response.json(); 156 | // console.log(res); 157 | return new Response(response.statusText, { status: response.status }); 158 | } 159 | 160 | // if (!response.body) { 161 | // return new Response("Failed to get response body from external API", { 162 | // status: 500, 163 | // }); 164 | // } 165 | 166 | // if (response.status === 400) { 167 | // console.log("Bad request"); 168 | // const res = await response.json(); 169 | // console.log(res); 170 | // return new Response("Bad request", { status: 400 }); 171 | // } 172 | 173 | const reader = response.body!.getReader(); 174 | const decoder = new TextDecoder(); 175 | let buffer = ""; 176 | 177 | return new Response( 178 | new ReadableStream({ 179 | async start(controller) { 180 | try { 181 | while (true) { 182 | const { value, done } = await reader.read(); 183 | if (done) break; 184 | const chunk = decoder.decode(value, { stream: true }); 185 | buffer += chunk; 186 | 187 | const lines = buffer.split("\n"); 188 | buffer = lines.pop() || ""; 189 | 190 | lines.forEach((line: string) => { 191 | // console.log(line); 192 | if (line.startsWith("data: ") && line !== "data: [DONE]") { 193 | const jsonStr = line.substring(5); // Adjust to correctly parse your API response 194 | try { 195 | const data = JSON.parse(jsonStr); 196 | if ( 197 | data.choices[0] !== undefined && 198 | data.choices[0].delta.content !== undefined && 199 | data.choices[0].delta.content !== null 200 | ) { 201 | if (data.choices[0].delta.content === "<|im_end|>") { 202 | console.log("End of model response!"); 203 | controller.close(); 204 | } else { 205 | controller.enqueue( 206 | { 207 | data: JSON.stringify( 208 | data.choices[0].delta.content, 209 | ), 210 | id: Date.now(), 211 | event: "message", 212 | }, 213 | ); 214 | } 215 | } 216 | } catch (error: Error | unknown) { 217 | console.error("Error parsing JSON:", error, jsonStr); 218 | controller.close(); 219 | } 220 | } else if (line === "data: [DONE]") { 221 | console.log("Closing controller!"); 222 | controller.close(); 223 | } 224 | }); 225 | } 226 | } catch (_error) { 227 | // console.error("Error reading the stream", error); 228 | controller.close(); 229 | } 230 | }, 231 | cancel(err) { 232 | console.log("cancel", err); 233 | console.log("cancel"); 234 | }, 235 | }).pipeThrough(new ServerSentEventStream()), 236 | { 237 | headers: { 238 | "Content-Type": "text/event-stream", 239 | }, 240 | }, 241 | ); 242 | } 243 | 244 | // deno-lint-ignore no-explicit-any 245 | function hasKorrekturHashtag(messages: any[]): boolean { 246 | if (!messages || messages.length === 0) return false; 247 | 248 | const lastMessage = messages[messages.length - 1]; 249 | if (!lastMessage || !lastMessage.content) return false; 250 | 251 | let content = ''; 252 | 253 | // Handle different content formats 254 | if (typeof lastMessage.content === 'string') { 255 | content = lastMessage.content; 256 | } else if (Array.isArray(lastMessage.content)) { 257 | // Handle array of content objects 258 | const textContent = lastMessage.content.find( 259 | // deno-lint-ignore no-explicit-any 260 | (item: any) => item.type === 'text' 261 | ); 262 | content = textContent?.text || ''; 263 | } 264 | 265 | return content.toLowerCase().includes('#korrektur') || 266 | content.toLowerCase().includes('#correction'); 267 | } 268 | 269 | export const handler: Handlers = { 270 | async POST(req: Request) { 271 | const payload = await req.json(); 272 | 273 | // console.log(payload.lang); 274 | 275 | // payload.messages.unshift({ 276 | // role: "system", 277 | // content: 278 | // "You are an intelligent and empathetic learning assistant. Always respond empathetically, friendly, curiously and appropriately to the school context. Respond briefly and to the point. Your name is BUD-E and you would be created by LAION. LAION is a non-profit organization for the democratization of open source AI. Try to keep the conversation friendly, educational and entertaining and to keep it running while taking into account previously said information. Respond briefly, concisely and to the point.", 279 | // }); 280 | 281 | // console.log("Model used: ", API_MODEL); 282 | // console.log("payload messages", payload.messages); 283 | return getModelResponseStream(payload.messages, payload.lang, payload.universalApiKey, payload.llmApiUrl, payload.llmApiKey, payload.llmApiModel, payload.systemPrompt, payload.vlmApiUrl, payload.vlmApiKey, payload.vlmApiModel, payload.vlmCorrectionModel); 284 | }, 285 | }; 286 | -------------------------------------------------------------------------------- /components/ChatTemplate.tsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useState } from "preact/hooks"; 2 | import { chatTemplateContent } from "../internalization/content.ts"; 3 | 4 | function downloadAudioFiles( 5 | items: { [key: string]: { audio: HTMLAudioElement } }, 6 | ) { 7 | const timestamp = new Date().getTime(); 8 | const nicelyFormattedTimestamp = new Date(timestamp).toISOString().slice(0, 19) 9 | .replace(/[-:]/g, "-"); 10 | 11 | // If there's only one item, download it directly 12 | if (Object.keys(items).length === 1) { 13 | const singleAudio = Object.values(items)[0].audio; 14 | fetch(singleAudio.src) 15 | .then(response => response.blob()) 16 | .then(blob => { 17 | const url = URL.createObjectURL(blob); 18 | const a = document.createElement("a"); 19 | a.href = url; 20 | a.download = `audio-${nicelyFormattedTimestamp}.mp3`; 21 | a.click(); 22 | URL.revokeObjectURL(url); 23 | }); 24 | return; 25 | } 26 | 27 | // For multiple items, download all MP3s first 28 | const mp3Promises = Object.values(items).map(item => 29 | fetch(item.audio.src) 30 | .then(response => response.blob()) 31 | ); 32 | 33 | Promise.all(mp3Promises) 34 | .then(blobs => { 35 | // Combine all MP3 blobs into a single blob 36 | const combinedBlob = new Blob(blobs, { type: 'audio/mp3' }); 37 | 38 | // Create download link for combined file 39 | const url = URL.createObjectURL(combinedBlob); 40 | const a = document.createElement("a"); 41 | a.href = url; 42 | a.download = `audio-${nicelyFormattedTimestamp}.mp3`; 43 | a.click(); 44 | URL.revokeObjectURL(url); 45 | }); 46 | } 47 | 48 | function convertDoiToUrl(doi: string): string { 49 | // Remove 'DOI: ' if present and handle null DOIs 50 | const cleanDoi = doi.replace(/^DOI:\s*/, ''); 51 | return cleanDoi === 'null' ? '#' : `https://doi.org/${cleanDoi}`; 52 | } 53 | 54 | function renderTextWithLinksAndBold(text: string) { 55 | // Updated regex to catch DOIs in the format "DOI: 10.1234/xxx" or just "10.1234/xxx" 56 | const parts = text.split(/((?:\*\*.*?\*\*)|(?:https?:\/\/[^\s]+)|(?:www\.[^\s]+)|(?:DOI:\s*(?:null|[\d.]+\/[^\s]+))|(?:(? { 59 | if (part.startsWith('**') && part.endsWith('**')) { 60 | return {part.slice(2, -2)}; 61 | } else if (part.startsWith('DOI:') || part.match(/^10\.\d+\//)) { 62 | return ( 63 | 70 | {part} 71 | 72 | ); 73 | } else if (part.startsWith('http://') || part.startsWith('https://') || part.startsWith('www.')) { 74 | const url = part.startsWith('www.') ? `https://${part}` : part; 75 | return ( 76 | 83 | {part} 84 | 85 | ); 86 | } 87 | return {part}; 88 | }); 89 | } 90 | 91 | function ChatTemplate( 92 | { 93 | lang, 94 | parentImages, 95 | messages, 96 | readAlways, 97 | autoScroll, 98 | currentEditIndex, 99 | audioFileDict, 100 | onRefreshAction, 101 | onEditAction, 102 | onSpeakAtGroupIndexAction, 103 | onImageChange, 104 | onToggleAutoScrollAction, 105 | onToggleReadAlwaysAction, 106 | }: { 107 | lang: string; 108 | parentImages: Image[]; 109 | messages: Message[]; 110 | isComplete: boolean; 111 | readAlways: boolean; 112 | autoScroll: boolean; 113 | currentEditIndex: number; 114 | audioFileDict: AudioFileDict; 115 | onToggleAutoScrollAction: () => void; 116 | onToggleReadAlwaysAction: () => void; 117 | onSpeakAtGroupIndexAction: (groupIndex: number) => void; 118 | onRefreshAction: (groupIndex: number) => void; 119 | onEditAction: (groupIndex: number) => void; 120 | onUploadActionToMessages: (uploadedMessages: Message[]) => void; 121 | onImageChange: (images: Image[]) => void; 122 | onTrashAction: () => void; 123 | }, 124 | ) { 125 | const [images, setImages] = useState([]); 126 | const [imageFiles, setImageFiles] = useState([]); 127 | 128 | // deno-lint-ignore no-explicit-any 129 | const deleteImage = (event: any) => { 130 | const index = images.findIndex((image) => 131 | image.preview === event.target.src 132 | ); 133 | const newImages = [...images]; 134 | const newImageFiles = [...imageFiles]; 135 | newImages.splice(index, 1); 136 | newImageFiles.splice(index, 1); 137 | setImages(newImages); 138 | setImageFiles(newImageFiles); 139 | onImageChange(newImageFiles); 140 | }; 141 | 142 | useEffect(() => { 143 | setImages(parentImages); 144 | }, [parentImages]); 145 | 146 | return ( 147 |
152 | 160 | 168 | {messages?.map((item, groupIndex) => { 169 | return ( 170 |
175 | 179 | {item.role === "user" ? "Du" : "Bud-E"} 180 | {groupIndex !== 0 && ( 181 | 191 | )} 192 | 193 | {item.role !== "user" && groupIndex !== 0 && ( 194 | 204 | )} 205 | {item.role !== "user" && ( 206 | 234 | )} 235 | {item.role !== "user" && audioFileDict[groupIndex] && 236 | Object.keys(audioFileDict[groupIndex]).length > 0 && ( 237 | // download audio file audioFileDict[groupIndex][0].audio.src to local files 238 | 251 | )} 252 | 253 |
260 | {typeof item.content === "string" 261 | ? {renderTextWithLinksAndBold(item.content)} 262 | : ( 263 | 264 | {typeof item.content[0] === "string" 265 | ? renderTextWithLinksAndBold(item.content.join("")) 266 | : ( 267 |
268 | {(item.content as unknown as { 269 | "type": string; 270 | "text": string; 271 | "image_url": { url: string }; 272 | }[]).map((content, contentIndex) => { 273 | if (content.type === "text") { 274 | return ( 275 | 276 | {renderTextWithLinksAndBold(content.text)} 277 | 278 | ); 279 | } else if (content.type === "image_url") { 280 | return ( 281 | User uploaded image 287 | ); 288 | } 289 | })} 290 |
291 | )} 292 |
293 | )} 294 |
295 |
296 | ); 297 | })} 298 | {images.length > 0 && ( 299 |
300 |
301 | {images.map((image, index) => ( 302 | {`Thumbnail 309 | ))} 310 |
311 |
312 | )} 313 |
314 | ); 315 | } 316 | 317 | export default ChatTemplate; 318 | -------------------------------------------------------------------------------- /static/lines.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /components/Settings.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from "preact/hooks"; 2 | import { settingsContent } from "../internalization/content.ts"; 3 | 4 | export default function Settings({ 5 | settings, 6 | onSave, 7 | onClose, 8 | lang = "en", 9 | }: { 10 | settings: { 11 | universalApiKey: string; 12 | apiUrl: string; 13 | apiKey: string; 14 | apiModel: string; 15 | ttsUrl: string; 16 | ttsKey: string; 17 | ttsModel: string; 18 | sttUrl: string; 19 | sttKey: string; 20 | sttModel: string; 21 | systemPrompt: string; 22 | vlmUrl: string; 23 | vlmKey: string; 24 | vlmModel: string; 25 | vlmCorrectionModel: string; 26 | }; 27 | onSave: (newSettings: typeof settings) => void; 28 | onClose: () => void; 29 | lang?: string; 30 | }) { 31 | const [newSettings, setNewSettings] = useState({ 32 | ...settings, 33 | }); 34 | const [showAdvanced, setShowAdvanced] = useState(false); 35 | 36 | const providerConfigs = { 37 | googleai: { 38 | keyCharacteristics: { startsWith: "AI" }, 39 | config: { 40 | api: { 41 | url: 42 | "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions", 43 | model: "gemini-1.5-flash", 44 | }, 45 | vlm: { 46 | url: 47 | "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions", 48 | model: "gemini-1.5-flash", 49 | }, 50 | }, 51 | }, 52 | hyprlab: { 53 | keyCharacteristics: { startsWith: "hypr-lab" }, 54 | config: { 55 | api: { 56 | url: "https://api.hyprlab.io/v1/chat/completions", 57 | model: "gemini-1.5-pro", 58 | }, 59 | vlm: { 60 | url: "https://api.hyprlab.io/v1/chat/completions", 61 | model: "gemini-1.5-pro", 62 | }, 63 | }, 64 | }, 65 | groq: { 66 | keyCharacteristics: { startsWith: "gsk_" }, 67 | config: { 68 | api: { 69 | url: "https://api.groq.com/openai/v1/chat/completions", 70 | model: "llama-3.3-70b-versatile", 71 | }, 72 | vlm: { 73 | url: "https://api.groq.com/openai/v1/chat/completions", 74 | model: "llama-3.2-90b-vision-preview", 75 | }, 76 | stt: { 77 | url: "https://api.groq.com/openai/v1/audio/transcriptions", 78 | model: "whisper-large-v3-turbo", 79 | }, 80 | }, 81 | }, 82 | sambanova: { 83 | keyCharacteristics: { length: 36 }, 84 | config: { 85 | api: { 86 | url: "https://api.sambanova.ai/v1/chat/completions", 87 | model: "Meta-Llama-3.3-70B-Instruct", 88 | }, 89 | vlm: { 90 | url: "https://api.sambanova.ai/v1/chat/completions", 91 | model: "Meta-Llama-3.2-90B-Vision-Instruct", 92 | }, 93 | }, 94 | }, 95 | fish: { 96 | keyCharacteristics: { length: 32 }, 97 | config: { 98 | tts: { 99 | url: "https://api.fish.audio/v1/tts", 100 | model: lang === "de" ? "61561f50f41046e0b267aa4cb30e4957" : "6f45f4694ff54d6980337a68902e20d7", 101 | }, 102 | }, 103 | }, 104 | deepgram: { 105 | keyCharacteristics: { length: 40 }, 106 | config: { 107 | stt: { 108 | url: `https://api.deepgram.com/v1/listen?language=en&model=nova-2`, 109 | model: "nova-2", 110 | }, 111 | tts: { 112 | url: `https://api.deepgram.com/v1/speak?model=aura-helios-en`, 113 | model: "aura-helios-en", 114 | }, 115 | }, 116 | }, 117 | }; 118 | 119 | function updateSettings(key: string, value: string) { 120 | const updatedSettings = { ...newSettings }; 121 | 122 | if (key !== "universalApiKey") { 123 | if (key.endsWith("Key") && value !== "") { 124 | const serviceType = key.slice(0, -3); 125 | const urlKey = `${serviceType}Url` as keyof typeof settings; 126 | const modelKey = `${serviceType}Model` as keyof typeof settings; 127 | 128 | // Find matching provider based on key characteristics 129 | const provider = Object.values(providerConfigs).find((provider) => { 130 | const { keyCharacteristics } = provider; 131 | return ( 132 | ("startsWith" in keyCharacteristics && 133 | value.startsWith(keyCharacteristics.startsWith)) || 134 | ("length" in keyCharacteristics && 135 | keyCharacteristics.length === value.length) 136 | ); 137 | }); 138 | 139 | if (provider?.config[serviceType as keyof typeof provider.config]) { 140 | const serviceConfig = provider 141 | .config[serviceType as keyof typeof provider.config] as { 142 | url: string; 143 | model: string; 144 | }; 145 | updatedSettings[urlKey] = serviceConfig.url; 146 | updatedSettings[modelKey] = serviceConfig.model; 147 | } 148 | } 149 | } 150 | 151 | updatedSettings[key as keyof typeof settings] = value; 152 | setNewSettings(updatedSettings); 153 | } 154 | 155 | return ( 156 |
157 |
158 |
159 |

⚙️ {settingsContent[lang].title}

160 | 166 |
167 | 168 | {/* Basic Settings */} 169 |
170 | 173 | 177 | updateSettings( 178 | "universalApiKey", 179 | (e.target as HTMLInputElement).value, 180 | )} 181 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500 bg-yellow-50" 182 | placeholder={settingsContent[lang].universalApiKeyPlaceholder} 183 | /> 184 |
185 | 186 | {/* Advanced Settings Toggle Button */} 187 | 195 | 196 | {/* Advanced Settings */} 197 | {showAdvanced && ( 198 | <> 199 | {/* Chat API Settings */} 200 |
201 |

202 | 💬 {settingsContent[lang].chatApiTitle} 203 |

204 |
205 |
206 | 209 | 213 | updateSettings( 214 | "apiKey", 215 | (e.target as HTMLInputElement).value, 216 | )} 217 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500 bg-yellow-50" 218 | placeholder={settingsContent[lang].apiKeyPlaceholder} 219 | /> 220 |
221 |
222 | 225 | 229 | updateSettings( 230 | "apiUrl", 231 | (e.target as HTMLInputElement).value, 232 | )} 233 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 234 | placeholder={settingsContent[lang].apiUrlPlaceholder} 235 | /> 236 |
237 | 238 |
239 | 242 | 246 | updateSettings( 247 | "apiModel", 248 | (e.target as HTMLInputElement).value, 249 | )} 250 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 251 | placeholder={settingsContent[lang].modelPlaceholder} 252 | /> 253 |
254 |
255 |
256 | 257 | {/* TTS Settings */} 258 |
259 |

260 | 🗣️ {settingsContent[lang].ttsTitle} 261 |

262 |
263 | 267 | updateSettings( 268 | "ttsKey", 269 | (e.target as HTMLInputElement).value, 270 | )} 271 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500 bg-yellow-50" 272 | placeholder={settingsContent[lang].ttsKeyPlaceholder} 273 | /> 274 | 278 | updateSettings( 279 | "ttsUrl", 280 | (e.target as HTMLInputElement).value, 281 | )} 282 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 283 | placeholder={settingsContent[lang].ttsUrlPlaceholder} 284 | /> 285 | 289 | updateSettings( 290 | "ttsModel", 291 | (e.target as HTMLInputElement).value, 292 | )} 293 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 294 | placeholder={settingsContent[lang].ttsModelPlaceholder} 295 | /> 296 |
297 |
298 | 299 | {/* STT Settings */} 300 |
301 |

302 | 👂 {settingsContent[lang].sttTitle} 303 |

304 |
305 | 309 | updateSettings( 310 | "sttKey", 311 | (e.target as HTMLInputElement).value, 312 | )} 313 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500 bg-yellow-50" 314 | placeholder={settingsContent[lang].sttKeyPlaceholder} 315 | /> 316 | 320 | updateSettings( 321 | "sttUrl", 322 | (e.target as HTMLInputElement).value, 323 | )} 324 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 325 | placeholder={settingsContent[lang].sttUrlPlaceholder} 326 | /> 327 | 331 | updateSettings( 332 | "sttModel", 333 | (e.target as HTMLInputElement).value, 334 | )} 335 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 336 | placeholder={settingsContent[lang].sttModelPlaceholder} 337 | /> 338 |
339 |
340 | 341 | {/* VLM Settings */} 342 |
343 |

344 | 👀 {settingsContent[lang].vlmTitle} 345 |

346 |
347 | 351 | updateSettings( 352 | "vlmKey", 353 | (e.target as HTMLInputElement).value, 354 | )} 355 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500 bg-yellow-50" 356 | placeholder={settingsContent[lang].vlmKeyPlaceholder} 357 | /> 358 | 362 | updateSettings( 363 | "vlmUrl", 364 | (e.target as HTMLInputElement).value, 365 | )} 366 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 367 | placeholder={settingsContent[lang].vlmUrlPlaceholder} 368 | /> 369 | 373 | updateSettings( 374 | "vlmModel", 375 | (e.target as HTMLInputElement).value, 376 | )} 377 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 378 | placeholder={settingsContent[lang].vlmModelPlaceholder} 379 | /> 380 | 384 | updateSettings( 385 | "vlmCorrectionModel", 386 | (e.target as HTMLInputElement).value, 387 | )} 388 | class="w-full p-2 border rounded focus:ring-2 focus:ring-blue-500" 389 | placeholder={settingsContent[lang] 390 | .vlmCorrectionModelPlaceholder} 391 | /> 392 |
393 |
394 | 395 | )} 396 | 397 |
398 | 404 | 410 |
411 |
412 |
413 | ); 414 | } 415 | -------------------------------------------------------------------------------- /internalization/agreement-content.ts: -------------------------------------------------------------------------------- 1 | export const agreementContentRaw: InternalizationContent = { 2 | en: { 3 | title: "Welcome to School Bud-E!", 4 | content: 5 | "Please read and accept the following terms and conditions to continue using School Bud-E.", 6 | temsAndConditionsContent: 7 | `
8 |
9 |

Definitions

10 |
11 |

Portal: the website at https://school.bud-e.ai/ including all services available there

12 |

Language Model: an algorithm trained on text data analysis to understand and generate human-like language

13 |

Chatbot: an AI-powered, language model-based text dialogue system

14 |

Prompt: refers to the input request sent by a user to the chatbot to trigger a specific response or text generation.

15 |

User: Any natural person who uses the offered service.

16 |
17 |
18 | 19 |
20 |

Scope, Changes

21 |
22 |

These terms of use govern the contractual relationship between LAION e.V., Herman-Lange-Weg 26, 21035 Hamburg (hereinafter referred to as "LAION") and the users of the services on the website at https://school.bud-e.ai/. They apply in their current version at the time of contract conclusion.

23 |

LAION reserves the right to modify these terms of use at any time - including for existing users - particularly in case of legal changes, changes in supreme court jurisprudence, or changes in LAION's offerings. Existing users will be informed of any changes to the terms of use when accessing the website.

24 |
25 |
26 | 27 |
28 |

Subject Matter, Contract Conclusion

29 |
30 | 31 |

32 | 1. LAION aims to advance self-learning algorithms in artificial intelligence and make training datasets and AI models available to the general public. LAION provides users with a portal at https://school.bud-e.ai where users can utilize a school assistant for students. School Bud-E is a chatbot specialized in educational purposes that can be used via both prompts and voice input to provide students with individualized assistance for various school activities. 33 |

34 | 35 | 36 |

37 | 2. The usage contract between LAION and the user is concluded by accessing the portal and accepting these terms of use. 38 |

39 | 40 | 41 |

42 | 3. School Bud-E may only be used by users who are 16 years of age or older. Users who have not yet reached the age of 16 require the express consent of a legal guardian. The provider reserves the right to request proof of this consent. 43 |

44 | 45 | 46 |

47 | 4. The use of the chatbot is only permitted for educational purposes. Any misuse, particularly for illegal or non-educational purposes, is prohibited. 48 |

49 | 50 |

51 | 5. The use of the portal is free of charge. Users have no legal claim to use the portal against LAION. Within the scope of contractual freedom, LAION is entitled to exclude individual users from use without giving reasons, to restrict the offer, or to discontinue it entirely. 52 |

53 |
54 | 55 |
56 |

DISCLAIMER

57 |
58 |

LAION's language model is still under development and may therefore be faulty in function and content - it is a demo or test version.

59 |

LAION therefore assumes no guarantee for the correctness, currentness and/or completeness of the content.

60 |
61 |
62 | 63 |
64 |

User Obligations

65 |
66 |

1. The user agrees not to misuse the Portal. The user is prohibited from making false or misleading statements on the Portal. The user must refrain from all activities in connection with the Portal that constitute a violation of:

67 |
    68 |
  • a. applicable law,
  • 69 |
  • b. third-party rights,
  • 70 |
  • c. principles of youth protection,
  • 71 |
  • d. public morality
  • 72 |
  • e. school internal regulations.
  • 73 |
74 | 75 |

2. Furthermore, the user agrees not to enter or upload any content to the Portal that:

76 |
    77 |
  • a. could harm LAION or any associated legal or natural person,
  • 78 |
  • b. could violate third-party copyrights,
  • 79 |
  • c. does not align with or contradicts educational purposes,
  • 80 |
  • d. contains personal data,
  • 81 |
  • e. contains or represents intellectual property or trade secrets of third parties,
  • 82 |
  • f. glorifies or incites violence,
  • 83 |
  • g. is of National Socialist nature,
  • 84 |
  • h. promotes or glorifies child pornography,
  • 85 |
  • i. incites or promotes hatred,
  • 86 |
  • j. discriminates against people based on their religion, sexuality, gender, political views, worldview, or other grounds,
  • 87 |
  • k. could be interpreted as bullying, threats, or blackmail,
  • 88 |
  • l. calls for harm to others or self-harm, or
  • 89 |
  • m. promotes or condones animal cruelty.
  • 90 |
91 | 92 |

3. The user further agrees not to submit prompts or language inputs that violate these terms of use and could particularly result in potential liability. The user also agrees to refrain from creating content that has no educational connection or is clearly intended to harm a third party.

93 | 94 |

4. The use of School Bud-E is permitted exclusively for educational purposes.

95 | 96 |

5. LAION expressly reserves the right to report violations of these terms of use to the appropriate authorities, particularly law enforcement agencies, when such violations become known.

97 | 98 |

6. The user is obligated to immediately report any obvious errors or misuse of the Portal to the provider. This serves to protect other users and improve the Portal.

99 |
100 |
101 | 102 |
103 |

Contract Duration and Termination

104 |
105 |

1. The user agreement begins when the user accepts these terms of use and is concluded for an indefinite period. The agreement may be terminated by either party at any time without notice.

106 | 107 |

2. The right to extraordinary termination for good cause remains unaffected.

108 | 109 |

3. The user agreement automatically terminates when LAION discontinues the services on the Portal.

110 | 111 |

4. In the event of loss of school-related status, the user is obligated to inform the provider. The agreement automatically terminates as soon as the user loses their school status.

112 |
113 |
114 | 115 |
116 |

Intellectual Property

117 |
118 |

1. The Portal and its contents regularly constitute protected intellectual property. In particular, all logos, layouts, graphics, texts, images, and videos are protected by copyright and trademark law.

119 | 120 |

2. LAION grants the user a simple, non-transferable right to use the Portal and its contents. This right of use is temporally limited to the duration of the user agreement. In terms of content, the right of use is limited to uses appropriate for the intended use of the Portal.

121 | 122 |

3. The user is specifically prohibited from reproducing, modifying, decompiling, or making the Portal, in whole or in part, accessible to unauthorized third parties.

123 | 124 |

4. The user grants LAION a simple, temporally and spatially unlimited right to use the content uploaded by them to the Portal – particularly texts. In terms of content, the right of use is limited to the purpose of operating the Portal and the (further) development of language models and training datasets. The right of use specifically includes:

125 |
    126 |
  • a. the storage of content and its reproduction,
  • 127 |
  • b. making it publicly accessible, and
  • 128 |
  • c. sharing it with third parties.
  • 129 |
130 | 131 |

5. The user assures that the content uploaded by them to the Portal is free from third-party rights and that they are authorized to grant rights to the content. Should the operator be held liable by third parties due to the infringement of intellectual property rights and if this claim is based on culpable conduct by the user, the user shall indemnify the operator against these claims and bear the reimbursable costs of the operator's legal defense according to the German Lawyers' Remuneration Act (RVG).

132 |
133 |
134 | 135 |
136 |

Data Protection

137 |
138 |

1. Users may not enter any personal data concerning themselves when using the services for improving the language model and using the chatbot.

139 | 140 |

2. Personal data processed in the context of using the chatbot includes data entered by the user and metadata used to improve the language model. The provider stores this data exclusively for the duration of the Portal use and deletes it as soon as the purpose of processing ceases, unless legal retention obligations prevent this.

141 | 142 |

3. LAION processes the user's personal data in accordance with data protection regulations, particularly the European General Data Protection Regulation (GDPR). Detailed information can be found in the Privacy Policy, available at: https://laion.ai/privacy-policy/

143 |
144 | 145 |

Liability

146 |
147 |

1. LAION assumes unlimited liability for intent and gross negligence. For simple negligence, LAION is only liable for damages resulting from:

148 |
    149 |
  • a. injury to life, body, and health,
  • 150 |
  • b. breach of an essential contractual obligation (obligation whose fulfillment is essential for the proper execution of the contract and on whose compliance the contractual partner regularly relies and may rely),
  • 151 |
  • c. violation of provisions of the General Data Protection Regulation (GDPR),
  • 152 |
  • d. violation of provisions of the Product Liability Act (ProdHaftG), and
  • 153 |
  • e. violation of other statutory provisions, if and to the extent that these do not permit an exclusion or limitation of liability.
  • 154 |
155 | 156 |

2. In the case of simple negligent breach of essential contractual obligations, LAION's liability is limited in amount to the foreseeable, typically occurring damage.

157 | 158 |

3. In all other respects, LAION's liability is excluded.

159 | 160 |

4. The above limitations of liability also apply in favor of LAION's legal representatives and agents.

161 | 162 |

5. LAION assumes no liability for the prompts or language inputs entered by users, or for incorrect application of the chatbot. Furthermore, LAION assumes no liability for damages resulting from incorrect or improper use of content generated by the chatbot.

163 |
164 | 165 |

Final Provisions

166 |
167 |

1. The contractual relationship is exclusively governed by the law of the Federal Republic of Germany, excluding the UN Convention on Contracts for the International Sale of Goods.

168 | 169 |

2. Should individual provisions of these Terms of Use, including this provision, be or become invalid in whole or in part, the validity of the remaining provisions shall remain unaffected. The respective statutory provisions shall take the place of the invalid or missing provisions.

170 | 171 |

3. If the customer is a merchant, a legal entity under public law, or a special fund under public law, the place of jurisdiction for all disputes arising from and in connection with contracts concluded under these Terms of Use shall be LAION's registered office.

172 | 173 |

4. These Terms of Use were drafted in German and subsequently translated into English. In case of contradictions or doubts about the interpretation, the German original version shall always prevail.

174 |
175 |
`, 176 | agree: "I agree to the terms and conditions", 177 | accept: "Accept", 178 | }, 179 | de: { 180 | title: "Willkommen bei School Bud-E!", 181 | content: 182 | "Bitte lesen und akzeptieren Sie die folgenden Nutzungsbedingungen, um mit der Nutzung von School Bud-E fortzufahren.", 183 | temsAndConditionsContent: 184 | `
185 |
186 |

Begriffsbestimmungen

187 |
188 |

Portal: die Website unter https://school.bud-e.ai/ mitsamt der dort zur Verfügung stehenden Dienste

189 |

Sprachmodell: ein Algorithmus, der auf der Analyse von Textdaten trainiert wird, um menschenähnliche Sprache zu verstehen und zu generieren

190 |

Chatbot: ein KI-gestütztes, auf einem Sprachmodell basierendes, textbasiertes Dialogsystem

191 |

Prompt: bezeichnet die Eingabeaufforderung, die von einem Nutzer an den Chatbot gesendet wird, um eine bestimmte Antwort oder Texterzeugung auszulösen.

192 |

Nutzer: Jede natürliche Person, die den angebotenen Dienst nutzt.

193 |
194 |
195 | 196 |
197 |

Geltungsbereich, Änderungen

198 |
199 |

Die vorliegenden Nutzungsbedingungen regeln das Vertragsverhältnis zwischen dem LAION e.V., Herman-Lange-Weg 26, 21035 Hamburg (nachfolgend bezeichnet als „LAION") und den Nutzern der Dienste auf der Website unter https://school.bud-e.ai/. Sie gelten in ihrer jeweils zum Zeitpunkt des Vertragsschlusses aktuellen Fassung.

200 |

LAION behält sich vor, diese Nutzungsbedingungen jederzeit - auch gegenüber bestehenden Nutzern - zu ändern, insbesondere bei Gesetzesänderungen, Änderungen der höchstrichterlichen Rechtsprechung sowie bei Veränderungen des Angebots von LAION. Bei Änderungen der Nutzungsbedingungen werden bestehende Nutzer hierüber beim Aufruf der Website informiert.

201 |
202 |
203 | 204 | 205 |
206 |

1. Vertragsgegenstand, Vertragsschluss

207 |
208 | 209 |

210 | 1. LAION hat sich zum Ziel gesetzt, selbstlernende Algorithmen im Sinne der künstlichen Intelligenz weiterzuentwickeln und Trainingsdatensätze sowie KI-Modelle der breiten Öffentlichkeit zur Verfügung zu stellen. LAION stellt dem Nutzer unter https://school.bud-e.ai/ ein Portal zur Verfügung, innerhalb dessen der Nutzer einen Schul-Assistenten für Schüler nutzen kann. School Bud-E ist ein auf schulische Zwecke spezialisierter Chatbot, welcher sowohl mittels Prompts als auch Spracheingabe verwendet werden kann, um Schülern eine individuelle Hilfestellung für verschiedene schulische Aktivitäten zu bieten. 211 |

212 | 213 |

214 | 2. Der Nutzungsvertrag zwischen LAION und dem Nutzer kommt durch Aufruf des Portals und das Akzeptieren der vorliegenden Nutzungsbedingungen zustande. 215 |

216 |

217 | 3. School Bud-E darf nur von Nutzern verwendet werden, die das 16. Lebensjahr vollendet haben. Nutzer, die das 16. Lebensjahr noch nicht vollendet haben, benötigen die ausdrückliche Zustimmung eines Erziehungsberechtigten. Der Anbieter behält sich das Recht vor, den Nachweis dieser Zustimmung zu verlangen. 218 |

219 | 220 |

221 | 4. Die Nutzung des Chatbots ist nur zu schulischen Zwecken erlaubt. Jede missbräuchliche Verwendung, insbesondere für rechtswidrige oder nicht bildungsbezogene Zwecke, ist verboten. 222 |

223 | 224 |

225 | 5. Die Nutzung des Portals ist kostenlos. Es besteht kein Rechtsanspruch auf Nutzung des Portals seitens des Nutzers gegenüber LAION. LAION ist im Rahmen der Vertragsfreiheit berechtigt einzelne Nutzer, ohne Angabe von Gründen, von der Nutzung auszuschließen, das Angebot einzuschränken oder gänzlich einzustellen. 226 |

227 |
228 | 229 |
230 |

DISCLAIMER

231 |
232 |

Das Sprachmodell von LAION befindet sich noch in der Entwicklung und kann daher fehlerhaft in Funktion und Inhalt sein - es handelt sich um eine Demo- bzw. Testversion.

233 |

LAION übernimmt daher insbesondere keinerlei Gewähr für die Richtigkeit, Aktualität und/oder Vollständigkeit der Inhalte.

234 |
235 |
236 | 237 |
238 |

Nutzerpflichten

239 |
240 |

1. Der Nutzer verpflichtet sich, das Portal nicht missbräuchlich zu verwenden. Es ist dem Nutzer untersagt, auf dem Portal falsche oder irreführende Angaben zu machen. Der Nutzer hat alle Aktivitäten in Zusammenhang mit dem Portal zu unterlassen, die eine Verletzung:

241 |
    242 |
  • a. des geltenden Rechts,
  • 243 |
  • b. von Rechten Dritter,
  • 244 |
  • c. der Grundsätze über den Jugendschutz sowie
  • 245 |
  • d. der guten Sitten
  • 246 |
  • e. schulinterner Regelungen darstellen.
  • 247 |
248 | 249 |

2. Der Nutzer verpflichtet sich darüber hinaus, keine Inhalte im Portal einzugeben oder über das Portal hochzuladen, die:

250 |
    251 |
  • a. LAION oder einer damit verbundenen juristischen oder natürlichen Person schaden könnten,
  • 252 |
  • b. das Urheberrecht Dritter verletzen könnten,
  • 253 |
  • c. nicht den schulischen Zwecken entsprechen oder solchen zuwiderlaufen könnten,
  • 254 |
  • d. personenbezogene Daten enthalten,
  • 255 |
  • e. geistiges Eigentum oder Geschäftsgeheimnisse Dritter enthalten oder darstellen,
  • 256 |
  • f. gewaltverherrlichend sind oder zu Gewalt aufrufen,
  • 257 |
  • g. nationalsozialistischer Art sind,
  • 258 |
  • h. zu Kinderpornografie aufrufen oder diese verherrlichen,
  • 259 |
  • i. Hass schüren oder zu Hass aufrufen,
  • 260 |
  • j. Menschen aufgrund ihrer Religionszugehörigkeit, ihrer Sexualität, ihres Geschlechts, ihrer politischen Einstellung, ihrer Weltanschauung oder aus anderen Gründen diskriminieren,
  • 261 |
  • k. als Mobbing, Drohung oder Erpressung aufgefasst werden können,
  • 262 |
  • l. dazu aufrufen, andere oder sich selbst zu verletzen oder
  • 263 |
  • m. zu Tierquälerei aufrufen oder gutheißen.
  • 264 |
265 | 266 |

3. Der Nutzer verpflichtet sich weiterhin keine Prompts oder Spracheingaben vorzunehmen, die gegen diese Nutzungspflichten verstoßen und insbesondere eine potenzielle Haftung nach sich ziehen können. Der Nutzer verpflichtet sich zudem, die Erstellung von Inhalten zu unterlassen, die keinen schulischen Bezug aufweisen oder erkennbar einem Dritten schaden sollen.

267 | 268 |

4. Die Nutzung von School Bud-E ist ausschließlich zu schulischen Zwecken erlaubt.

269 | 270 |

5. LAION behält sich ausdrücklich das Recht vor, bei Kenntnis über eine Nutzung des Portals entgegen dieser Nutzungsbedingungen Verstöße den zuständigen Behörden, insbesondere Strafverfolgungsbehörden, zur Kenntnis zu bringen.

271 | 272 |

6. Der Nutzer ist verpflichtet, offensichtliche Fehler oder missbräuchliche Nutzung des Portals unverzüglich dem Anbieter zu melden. Dies dient dem Schutz der weiteren Nutzer und der Verbesserung des Portals.

273 |
274 |
275 | 276 |
277 |

Vertragslaufzeit, Kündigung

278 |
279 |

1. Der Nutzungsvertrag beginnt mit dem Akzeptieren der vorliegenden Nutzungsbedingungen durch den Nutzer und wird auf unbestimmte Zeit geschlossen. Der Vertrag kann von beiden Parteien jederzeit ohne Einhaltung einer Frist gekündigt werden.

280 | 281 |

2. Das Recht zur außerordentlichen Kündigung aus wichtigem Grund bleibt unberührt.

282 | 283 |

3. Der Nutzungsvertrag endet automatisch, wenn LAION die Dienste auf dem Portal einstellt.

284 | 285 |

4. Bei Verlust des schulbezogenen Status ist der Nutzer verpflichtet, den Anbieter darüber zu informieren. Eine automatische Beendigung des Vertrags tritt ein, sobald der Nutzer seinen schulischen Status verliert.

286 |
287 |
288 | 289 |
290 |

Geistiges Eigentum

291 |
292 |

1. Das Portal und seine Inhalte stellen regelmäßig geschütztes geistiges Eigentum dar. Insbesondere sind alle Logos, Layouts, Grafiken, Texte, Bilder und Videos durch das Urheber- und Markenrecht geschützt.

293 | 294 |

2. LAION räumt dem Nutzer ein einfaches, nicht übertragbares Nutzungsrecht an dem Portal und seinen Inhalten ein. In zeitlicher Hinsicht ist das Nutzungsrecht auf die Dauer des Nutzungsvertrages beschränkt. Inhaltlich ist das Nutzungsrecht beschränkt auf die zur bestimmungsgemäßen Nutzung des Portals zweckmäßigen Nutzungen.

295 | 296 |

3. Dem Nutzer ist es insbesondere untersagt, das Portal im Ganzen oder Teile davon zu vervielfältigen, zu verändern, zu dekompilieren oder unberechtigten Dritten zugänglich zu machen.

297 | 298 |

4. Der Nutzer räumt LAION ein einfaches, zeitlich und räumlich unbeschränktes Nutzungsrecht an den von ihm auf dem Portal hochgeladenen Inhalten – insbesondere Texten – ein. Inhaltlich ist das Nutzungsrecht auf den Zweck des Betriebs des Portals und der (Weiter-)Entwicklung von Sprachmodellen und Trainingsdatensätzen beschränkt. Das Nutzungsrecht umfasst insbesondere:

299 |
    300 |
  • a. die Speicherung der Inhalte sowie deren Vervielfältigung,
  • 301 |
  • b. die öffentliche Zugänglichmachung und
  • 302 |
  • c. die Weitergabe an Dritte.
  • 303 |
304 | 305 |

5. Der Nutzer sichert zu, dass die von ihm auf dem Portal hochgeladenen Inhalte frei von Rechten Dritter sind und er zur Einräumung der Rechte an den Inhalten befugt ist. Sollte die Betreiberin von Dritten aufgrund der Verletzung von geistigen Eigentumsrechten in Anspruch genommen werden und beruht die Inanspruchnahme auf einem schuldhaften Handeln des Nutzers, so hat der Nutzer die Betreiberin von diesen Ansprüchen freizustellen und die nach dem Rechtsanwaltsvergütungsgesetz (RVG) erstattungsfähigen Kosten der Rechtsverteidigung der Betreiberin zu übernehmen.

306 |
307 |
308 | 309 |
310 |

Datenschutz

311 |
312 |

1. Nutzer dürfen im Rahmen der Nutzung der Dienste zur Verbesserung des Sprachmodells und der Nutzung des Chatbots keine die eigene Person betreffenden personenbezogenen Daten eingeben.

313 | 314 |

2. Personenbezogene Daten, die im Rahmen der Nutzung des Chatbots verarbeitet werden, umfassen die vom Nutzer eingegebenen Daten sowie Metadaten, die zur Verbesserung des Sprachmodells verwendet werden. Der Anbieter speichert diese Daten ausschließlich für die Dauer der Nutzung des Portals und löscht sie, sobald der Zweck der Verarbeitung entfällt, es sei denn, gesetzliche Aufbewahrungspflichten stehen dem entgegen.

315 | 316 |

3. Die personenbezogenen Daten des Nutzers verarbeitet LAION im Einklang mit den datenschutzrechtlichen Bestimmungen, insbesondere der Europäischen Datenschutzgrundverordnung (DSGVO). Detaillierte Informationen finden sich in der Datenschutzerklärung, abrufbar unter: https://laion.ai/privacy-policy/

317 |
318 | 319 |

Haftung

320 |
321 |

1. LAION haftet unbeschränkt für Vorsatz und grobe Fahrlässigkeit. Für einfache Fahrlässigkeit haftet LAION nur für Schäden aus:

322 |
    323 |
  • a. der Verletzung des Lebens, des Körpers und der Gesundheit,
  • 324 |
  • b. der Verletzung einer wesentlichen Vertragspflicht (Verpflichtung, deren Erfüllung die ordnungsgemäße Durchführung des Vertrags überhaupt erst ermöglicht und auf deren Einhaltung der Vertragspartner regelmäßig vertraut und vertrauen darf),
  • 325 |
  • c. der Verletzung von Bestimmungen der Datenschutzgrundverordnung (DSGVO),
  • 326 |
  • d. der Verletzung von Bestimmungen des Produkthaftungsgesetzes (ProdHaftG) sowie
  • 327 |
  • e. aus der Verletzung sonstiger gesetzlicher Vorschriften, wenn und soweit diese einen Ausschluss bzw. eine Beschränkung der Haftung nicht erlauben.
  • 328 |
329 | 330 |

2. Im Fall der einfach fahrlässigen Verletzung wesentlicher Vertragspflichten ist die Haftung von LAION der Höhe nach begrenzt auf den vorhersehbaren, typischerweise eintretenden Schaden.

331 | 332 |

3. Im Übrigen ist die Haftung von LAION ausgeschlossen.

333 | 334 |

4. Die vorstehenden Haftungsbeschränkungen gelten auch zugunsten der gesetzlichen Vertreter und Erfüllungsgehilfen von LAION.

335 | 336 |

5. LAION übernimmt keine Haftung für die eingegebenen Prompts oder Spracheingaben der Nutzer, sowie bei falscher Anwendung des Chatbots. Weiterhin übernimmt LAION keine Haftung für Schäden, die durch eine unkorrekte oder missbräuchliche Verwendung der vom Chatbot generierten Inhalte entstehen.

337 |
338 | 339 |

Schlussbestimmungen

340 |
341 |

1. Für das Vertragsverhältnis gilt ausschließlich das Recht der Bundesrepublik Deutschland unter Ausschluss des UN-Kaufrechts.

342 | 343 |

2. Sollten einzelne Bestimmungen dieser Nutzungsbedingungen einschließlich dieser Bestimmung ganz oder teilweise unwirksam sein oder werden, bleibt die Wirksamkeit der übrigen Regelungen unberührt. Anstelle der unwirksamen oder fehlenden Bestimmungen treten die jeweiligen gesetzlichen Regelungen.

344 | 345 |

3. Ist der Kunde Kaufmann, juristische Person des öffentlichen Rechts oder öffentlich-rechtliches Sondervermögen, so ist für den Gerichtsstand aller Streitigkeiten aus und in Zusammenhang mit unter Einbeziehung dieser Nutzungsbedingungen geschlossenen Verträgen, der Sitz von LAION maßgeblich.

346 | 347 |

4. Die vorliegenden Nutzungsbedingungen wurden in deutscher Sprache verfasst und sodann ins Englische übersetzt. Bei Widersprüchen oder Zweifeln über die Auslegungen geht stets die deutsche Originalfassung vor.

348 |
349 |
`, 350 | agree: "Ich stimme den Nutzungsbedingungen zu", 351 | accept: "Akzeptieren", 352 | }, 353 | }; 354 | -------------------------------------------------------------------------------- /internalization/content.ts: -------------------------------------------------------------------------------- 1 | import { agreementContentRaw } from "./agreement-content.ts"; 2 | 3 | export const headerContent: InternalizationContent = { 4 | en: { 5 | overTitle: "Experimental", 6 | title: "School Bud-E!", 7 | }, 8 | de: { 9 | overTitle: "Experimenteller", 10 | title: "School Bud-E!", 11 | }, 12 | }; 13 | 14 | export const agreementContent: InternalizationContent = agreementContentRaw; 15 | 16 | export const menuContent: InternalizationContent = { 17 | en: { 18 | about: "About School Bud-E", 19 | imprint: "Imprint", 20 | }, 21 | de: { 22 | about: "Über School Bud-E", 23 | imprint: "Impressum", 24 | }, 25 | }; 26 | 27 | export const warningContent: InternalizationContent = { 28 | en: { 29 | title: "🚧 Experimental Demo Version 🚧", 30 | content: 31 | "Please note that this is an early prototype application that may provide inaccurate answers or generate content that is not suitable for all audiences. We advise caution and encourage you to report any issues you encounter to us.", 32 | usage: 33 | "**School Bud-E Features**\n1.: Wikipedia search (**#wikipedia**: search term)\n2.: Paper search (**#papers**: search term)\n3.: Search the Hamburger Bildungsplan (**#bildungsplan**: search term)\n4.: Correction of student assignments (only **#correction** or **#korrektur** with image upload)\n\n Points 1 to 3 can optionally be followed by :top_n to limit the number of results.\nExample: **#wikipedia: Artificial Intelligence:3**\n\n Alternatively to #wikipedia, you can also use **#wikipedia_de** or **#wikipedia_en** to set the language of the Wikipedia search.\nExample: **#wikipedia_de: Artificial Intelligence**\n\n**Support Email**: contact@laion.ai", 34 | }, 35 | de: { 36 | title: "🚧 Experimentelle Demoversion 🚧", 37 | content: 38 | "Bitte beachten Sie, dass dies eine frühe Prototyp-Anwendung ist, die möglicherweise ungenaue Antworten liefert oder Inhalte erzeugt, die nicht für alle Zielgruppen geeignet sind. Wir raten zur Vorsicht und raten Ihnen uns alle Probleme, die Sie feststellen, mitzuteilen.", 39 | usage: 40 | "**School Bud-E Funktionen**\n1.: Wikipedia-Suche (**#wikipedia**: Suchbegriff)\n2.: Paper-Suche (**#papers**: Suchbegriff)\n3.: Suche im Hamburger Bildungsplan (**#bildungsplan**: Suchbegriff)\n4.: Korrektur von Schüleraufgaben (nur **#korrektur** oder **#correction** mit Bilderupload)\n\n Punkte 1 bis 3 können optional mit einem :top_n am Ende versehen werden, um die Anzahl der Ergebnisse zu begrenzen.\nBeispiel: **#wikipedia: Künstliche Intelligenz:3**\n\n Alternativ zu #wikipedia, kann auch **#wikipedia_de** oder **#wikipedia_en** verwendet werden, um die Sprache der Wikipedia-Suche festzulegen.\nBeispiel: **#wikipedia_de: Künstliche Intelligenz**\n\n**Support Email**: contact@laion.ai", 41 | }, 42 | }; 43 | 44 | export const chatIslandContent: InternalizationContent = { 45 | en: { 46 | welcomeMessage: 47 | "Hello! I am School Bud-E, your personal AI assistant. How can I help you today?", 48 | deleteCurrentChat: "current chat", 49 | deleteAllChats: "all chats", 50 | backupChat: "Download", 51 | restoreChat: "Upload", 52 | placeholderText: "Chat with the School Bud-E...", 53 | wikipediaTitle: "Title", 54 | wikipediaContent: "Content", 55 | wikipediaURL: "URL", 56 | wikipediaScore: "Score", 57 | papersDOI: "DOI", 58 | papersTitle: "Title", 59 | papersSubjects: "Type", 60 | papersAuthors: "Authors", 61 | papersAbstract: "Abstract", 62 | papersDate: "Date", 63 | result: "Result", 64 | of: "of", 65 | noSettings: 66 | "⚠️ The minimum requirement to run the chat is missing. You need to open the settings and either provide an Universal API key or a custom API key with the corresponding url and model. ⚠️", 67 | }, 68 | de: { 69 | welcomeMessage: 70 | "Hallo! Ich bin School Bud-E, dein persönlicher Assistent. Wie kann ich dir helfen?", 71 | deleteCurrentChat: "diesen Chat", 72 | deleteAllChats: "alle Chats", 73 | backupChat: "Download", 74 | restoreChat: "Upload", 75 | placeholderText: "Schreibe mit dem School Bud-E...", 76 | wikipediaTitle: "Titel", 77 | wikipediaContent: "Inhalt", 78 | wikipediaURL: "URL", 79 | wikipediaScore: "Score", 80 | papersDOI: "DOI", 81 | papersTitle: "Titel", 82 | papersSubjects: "Typ", 83 | papersAuthors: "Autoren", 84 | papersAbstract: "Abstract", 85 | papersDate: "Datum", 86 | result: "Ergebnis", 87 | of: "von", 88 | noSettings: 89 | "⚠️ Die Mindestanforderung zum Ausführen des Chats fehlt. Du musst die Einstellungen öffnen und entweder einen Universal-API-Schlüssel oder einen benutzerdefinierten API-Schlüssel mit der entsprechender URL und dem Modell eintragen. ⚠️", 90 | }, 91 | }; 92 | 93 | export const chatTemplateContent: InternalizationContent = { 94 | "en": { 95 | readOutText: "Read out text", 96 | silent: "Silent", 97 | autoScrollOn: "Auto scroll", 98 | autoScrollOff: "Manual scroll", 99 | }, 100 | "de": { 101 | readOutText: "Text vorlesen", 102 | silent: "Stumm", 103 | autoScrollOn: "Automatisch scrollen", 104 | autoScrollOff: "Manuelles scrollen", 105 | }, 106 | }; 107 | 108 | export const chatContent: InternalizationContent = { 109 | en: { 110 | systemPrompt: 111 | "You are an intelligent and empathetic learning assistant. Always respond empathetically, friendly, curiously and appropriately to the school context. Respond briefly and to the point. Your name is School Bud-E and you would be created by LAION. LAION is a non-profit organization for the democratization of open source AI. Try to keep the conversation friendly, educational and entertaining and to keep it running while taking into account previously said information. Respond briefly, concisely and to the point.When someone talks to you or asks you a question, you must always reply in the same language they are using at that moment. For example, if someone is talking to you in German but then switches to English, you must reply in English. If someone asks you something in Chinese, you must reply in Chinese. It's important to always use the language the person last spoke in. try your best to be inspiring and to spark curiosity and essence of wonder and beauty in the world.", 112 | correctionSystemPrompt: 113 | `This Vision Language Model is specialized in supporting teachers in correcting tests, exams, and assessments. It accurately analyzes the submitted documents, transcribes them with the highest accuracy, and creates well-founded, empathetic, and customizable correction suggestions that adapt to the teacher's expectations and the students' level. This Vision Language Model is specifically designed to provide correction suggestions for tests, exams, and possibly university tests or any other performance and learning assessments. For teachers who upload some tests written by their students, a few tests, or exams. And you should then take a very close look, so this Vision Language Model should take a very close look at what is contained in there, transcribe the whole thing, so in terms of Optical Character Recognition, then for each page of the student's task and processing with page number and description, so with a headline for each document, write down exactly, meticulously, factually what is written there. So if there is, for example, the student's handwriting, it should be written down exactly as the student wrote it. And after this has been done for each document, a correction suggestion should be made for each task, which should be intelligent and empathetic, fact-based and based on the user's, the teacher's expectations, what age group it is. So for example, you can't expect as much from a 12-year-old student as from an 18-year-old student. And from a student at a comprehensive school or district school, not as much performance-wise as from a grammar school student, for example. All this should be taken into account and if, for example, it is specified in the task how many points can be achieved in a task or part of the task, it should be justified how many points are given out of the maximum achievable points and why. And in general, detailed step-by-step explanations and justifications should be given by specifically referring to what the student has written. If the teacher does not provide information, so the user of the chatbot, the Vision Language Model, about the grade level and what to expect, try to find out from the task sheet. Check if there is any indication of what grade level it is and whether it is a grammar school or a comprehensive school, a district school, a district school is a comprehensive school in Hamburg. And try to adapt the correction suggestions to the level of the student's age, the grade level, and the type of school. Carry out the correction factually correct, with high accuracy, but at the same time with empathy and goodwill towards the student. And start the whole thing with a neutral, objective, meticulously accurate transcription of the task descriptions and the sinful slip of paper that the user sent. And the student's processing. Try your best to decipher the student's handwriting. And then after these transcriptions of the sent images, make your correction suggestions. Be intelligent, precise, and work step by step. And always justify your assessment with references to what the student has written. Instruction to the Vision Language Model: 1. Transcription of the submitted documents: Analyze each uploaded document (images or scans) meticulously. Use Optical Character Recognition (OCR) to transcribe all content. Capture each page and each task in detail, including: Page number Title of the document (if available) Task Student processing (as accurately as possible, including spelling errors or special spellings). Carefully decipher handwriting and make an effort to interpret it as best as possible. Explicitly note illegible or unclear passages. 2. Contextualization of the correction suggestions: Analyze the document to extract relevant information about the target group if no further information is provided: Age group of the students (e.g., "12 years old") Grade level (e.g., "Class 6") Type of school (e.g., grammar school, comprehensive school, district school). Use hints in the document, such as the difficulty level of the task, the language level, or specific notes, to adapt the assessment and correction to the age-appropriate and school-type-appropriate level. 3. Creation of the correction suggestions: Proceed systematically and step by step for each task and each part of the task. Justify in detail: The point allocation in relation to the maximum possible points (if specified). Your decision with references to specific content of the student's response. Create your correction suggestions based on the following criteria: Fact-based correctness of the answer. Clarity and structure of the student's response. Comprehensibility of the thought processes and solution paths. Provide constructive feedback that supports and motivates the student. Explicitly show improvement potential and possible solution paths. Take into account, if specified, the specific expectations of the teacher (e.g., special requirements for methodology or solution approaches). 4. Adaptation of the assessment: Adapt your correction suggestions and feedback to the age group, the type of school, and the expected performance level. Example: A student in the 6th grade at a comprehensive school is expected to have a lower level than a grammar school student in the 12th grade. If no precise information is provided, assume that the average level of the specified school level applies. 5. Presentation of the results: Ensure that the results are clear and concise: Start with the transcription of the task descriptions and student processing, sorted by pages and tasks. Then add the correction suggestions, clearly separated by tasks. Formulate the feedback in a professional, empathetic, and supportive tone. 6. Additional notes: If a task or student response is unclear, explicitly state this and explain possible interpretations. Refer, where possible, to relevant learning objectives or educational standards to support the teacher. Example of the output: Page 1: Task 1: Task description: "Calculate: 12 + 8 = ?" Student response: "22" Correction: "The answer is incorrect. The correct result is 20. The calculation method was not provided, therefore no partial points. 0/2 points." Feedback: "Make sure to write down the calculation method to receive partial points if the final result is incorrect." Page 2: Task 2: Task description: "Describe the water cycle." Student response: "The sun makes water go up then clouds come." Correction: "Approach is recognizable, but the answer is too imprecise. Important terms like evaporation, condensation, and precipitation are missing. 2/5 points." Feedback: "Try to formulate more precisely and use the technical terms from the lesson." The VLM is designed to combine maximum accuracy, empathy, and utility to provide teachers with high-quality support in evaluating student work. ALWAYS RESPOND IN ENGLISH, WITH CORRECT SPELLING AND GRAMMAR. FORMAT YOUR ANSWERS IN MARKUP LANGUAGE WITH CLEAR HEADINGS AND PARAGRAPHS SO THAT THE ANSWER CAN BE RENDERED NICELY.`, 114 | }, 115 | de: { 116 | systemPrompt: 117 | "Du bist ein sehr intelligenter, empathischer, geduldiger Lernassistent. Antworte immer empathisch, freundlich, neugierig und dem Kontext Schule angemessen. Antworte kurz und auf den Punkt gebracht. Dein Name ist School Bud-E und Du würdest von LAION erschaffen. LAION ist ein gemeinnütziger Verein zur Demokratisierung von Open Source AI. Wenn jemand mit dir spricht oder dir eine Frage stellt, musst du immer in der Sprache antworten, in der die Person dich gerade angesprochen hat. Wenn jemand zum Beispiel auf Deutsch mit dir redet und dann plötzlich auf Englisch wechselt, musst du auf Englisch antworten. Wenn jemand dir eine Frage auf Chinesisch stellt, musst du auf Chinesisch antworten. Es ist wichtig, immer die Sprache zu verwenden, die die Person zuletzt benutzt hat. Versuche so gut es geht die Unterhaltung freundlich, inspirierend und unterhaltsam am laufen zu halten.", 118 | correctionSystemPrompt: 119 | `Dieses Vision Language Model ist darauf spezialisiert, Lehrkräfte bei der Korrektur von Tests, Klassenarbeiten und Prüfungen zu unterstützen. Es analysiert präzise die eingereichten Dokumente, transkribiert sie mit höchster Genauigkeit und erstellt auf dieser Grundlage fundierte, empathische und anpassbare Korrekturvorschläge, die sich an die Erwartungen der Lehrkraft und das Niveau der Schüler anpassen. dieses Vision Language Model soll spezifisch gut dafür sein, um Korrekturvorschläge für Tests und Klassenarbeiten und eventuell Universitätstests oder so, was auch immer, Leistungs- und Lernstandskontrollen zu machen. Für Lehrkräfte, die dir von ihren Schülern etwas hochladen, so ein paar Tests, die sie geschrieben haben oder Klassenarbeiten. Und du sollst dir dann ganz genau angucken, also dieses Vision Language Model soll sich ganz genau angucken, was da drin enthalten ist, das Ganze transkripieren, also so im Sinne von Optical Character Recognition, dann für jede Seite der Aufgabe und der Bearbeitung des Schülers mit Seitenzahl und Beschreibung, also mit einer Überschrift für jedes Dokument, ganz genau, akribisch, faktengetreu aufschreiben, was dort steht. Also wenn da zum Beispiel Handschrift des Schülers ist, soll das ganz genau so aufgeschrieben werden, wie der Schüler das geschrieben hat. Und nachdem das durchgeführt wurde für jedes Dokument, soll anschließend für jede Aufgabe ein Korrekturvorschlag gemacht werden, der intelligent und empathisch sein soll, faktenbezogen und basierend auf den Angaben des Nutzers, also des Lehrers, was er erwartet, welche Altersgruppe es sich handelt. Also zum Beispiel von einem zwölfjährigen Schüler kann man nicht so viel erwarten inhaltlich wie von einem 18-jährigen Schüler. Und von einem Schüler auf einer Gesamtschule oder Stadtteilschule ist auch nicht leistungsmäßig so viel zu erwarten wie von einem Gymnasialschüler zum Beispiel. Das soll halt alles dabei berücksichtigt werden und wenn da zum Beispiel dann angegeben ist in der Aufgabenstellung, wie viele Punkte man bei einer Aufgabe oder beim Aufgabenteil erreichen kann, soll begründet werden, wie viele Punkte man von den maximal erreichbaren Punkten gibt und warum. Und grundsätzlich soll detailliert Schritt für Schritt erklärt werden und begründet werden, indem man halt konkret sich bezieht auf das, was der Schüler geschrieben hat.Falls der Lehrende keine Angabe macht, dazu also der Nutzer des ChatBots, des Vision Language Models, um welche Klassenstufe es sich handelt und was zu erwarten ist, versuche das dem Aufgabenzettel zu entnehmen. Gucke, ob da steht irgendwo, was für eine Klassenstufe das ist und ob es ein Gymnasium oder eine Gesamtschule ist, eine Stadtteilschule, eine Stadtteilschule ist eine Gesamtschule in Hamburg. Und versuche die Korrekturvorschläge dem Niveau des Alters des Schülers, der Klassenstufe und der Schulform anzupassen. Führe die Korrektur faktisch korrekt, mit hoher Genauigkeit, aber auch gleichzeitig mit Empathie und Wohlwollen gegenüber dem Schüler durch. Und beginne das Ganze mit einer neutralen, objektiven, akribisch genauen Transkription der Aufgabenstellungen und der sündlichen Zettel, die der Nutzer geschickt hat. Und der Bearbeitungen des Schülers. Versuche, so gut du kannst, die Handschrift des Schülers zu entziffern. Und anschließend nach diesen Transkriptionen der geschickten Bilder, mache deine Korrekturvorschläge. Sei dabei intelligent, genau und arbeite dich Schritt für Schritt voran. Und begründe deine Beurteilung immer mit Referenzen zu dem, was der Schüler geschrieben hat. Instruktion an das Vision Language Model: 1. Transkription der eingereichten Dokumente: Analysiere jedes hochgeladene Dokument (Bilder oder Scans) akribisch. Nutze Optical Character Recognition (OCR), um sämtliche Inhalte zu transkribieren. Erfasse jede Seite und jede Aufgabe detailliert, einschließlich: Seitenzahl Überschrift des Dokuments (falls vorhanden) Aufgabenstellung Schülerbearbeitung (so exakt wie möglich, einschließlich Rechtschreibfehler oder besonderer Schreibweisen). Entziffere Handschrift sorgfältig und bemühe dich, diese bestmöglich zu interpretieren. Vermerke unleserliche oder unklare Passagen explizit. 2. Kontextualisierung der Korrekturvorschläge: Analysiere das Dokument, um relevante Informationen zur Zielgruppe zu extrahieren, falls keine weiteren Angaben gemacht wurden: Altersgruppe der Schüler (z. B. "12 Jahre alt") Klassenstufe (z. B. "Klasse 6") Schulform (z. B. Gymnasium, Gesamtschule, Stadtteilschule). Nutze Hinweise im Dokument, wie etwa den Schwierigkeitsgrad der Aufgabenstellung, das Sprachniveau oder spezifische Anmerkungen, um die Beurteilung und Korrektur altersgerecht und schultypgerecht anzupassen. 3. Erstellung der Korrekturvorschläge: Gehe systematisch und schrittweise für jede Aufgabe und jeden Aufgabenteil vor. Begründe detailliert: Die Punktevergabe in Relation zu den maximal möglichen Punkten (falls diese angegeben sind). Deine Entscheidung mit Verweisen auf spezifische Inhalte der Schülerantwort. Erstelle deine Korrekturvorschläge basierend auf den folgenden Kriterien: Faktenbezogene Richtigkeit der Antwort. Klarheit und Struktur der Schülerantwort. Nachvollziehbarkeit der Gedankengänge und Lösungswege. Gib konstruktives Feedback, das den Schüler unterstützt und motiviert. Zeige explizit Verbesserungspotenziale und mögliche Lösungswege auf. Berücksichtige, falls angegeben, die spezifischen Erwartungen der Lehrkraft (z. B. besondere Anforderungen an Methodik oder Lösungsansätze). 4. Anpassung der Beurteilung: Passe deine Korrekturvorschläge und das Feedback an die Altersgruppe, die Schulform und das erwartete Leistungsniveau an. Beispiel: Von einem Schüler der 6. Klasse auf einer Gesamtschule ist ein geringeres Niveau zu erwarten als von einem Gymnasialschüler der 12. Klasse. Wenn keine genauen Angaben gemacht werden, gehe davon aus, dass das Durchschnittsniveau der angegebenen Schulstufe gilt. 5. Präsentation der Ergebnisse: Stelle sicher, dass die Ergebnisse klar und übersichtlich sind: Beginne mit der Transkription der Aufgabenstellungen und Schülerbearbeitungen, sortiert nach Seiten und Aufgaben. Füge anschließend die Korrekturvorschläge hinzu, klar getrennt nach Aufgaben. Formuliere das Feedback in einem professionellen, empathischen und unterstützenden Ton. 6. Zusätzliche Hinweise: Sollte eine Aufgabenstellung oder Schülerantwort unklar sein, gib dies explizit an und erläutere mögliche Interpretationen. Verweise, wo möglich, auf relevante Lernziele oder Bildungsstandards, um die Lehrkraft zu unterstützen. Beispiel für die Ausgabe: Seite 1: Aufgabe 1: Aufgabenstellung: "Rechne: 12 + 8 = ?" Schülerantwort: "22" Korrektur: "Die Antwort ist falsch. Das korrekte Ergebnis lautet 20. Der Rechenweg wurde nicht angegeben, daher keine Teilpunkte. 0/2 Punkte." Feedback: "Achte darauf, den Rechenweg aufzuschreiben, um Teilpunkte zu erhalten, falls das Endergebnis falsch ist." Seite 2: Aufgabe 2: Aufgabenstellung: "Beschreibe den Wasserkreislauf." Schülerantwort: "Die Sonne macht Wasser geht hoch dann Wolken kommen." Korrektur: "Ansatz ist erkennbar, aber die Antwort ist zu ungenau. Wichtige Begriffe wie Verdunstung, Kondensation und Niederschlag fehlen. 2/5 Punkte." Feedback: "Versuche, präziser zu formulieren und die Fachbegriffe aus dem Unterricht zu verwenden." Das VLM ist darauf ausgelegt, höchstmögliche Genauigkeit, Empathie und Nützlichkeit zu kombinieren, um Lehrkräften eine hochwertige Unterstützung bei der Bewertung von Schülerarbeiten zu bieten. ANTWORTE IMMER AUF DEUTSCH, MIT KORREKTER RECHTSCHREIBUNG UND KORREKTER GRAMMATIK. FORMATIERE DEINE ANTWORTEN IN MARKUP LANGUAGE MIT ÜBERSICHTLICHEN ÜBERSCHRIFTEN UND ABSÄTZEN, SO DASS DIE ANTWORT HÜBSCH GERENDERT WERDEN KANN.`, 120 | }, 121 | }; 122 | 123 | export const aboutContent: InternalizationContent = { 124 | en: { 125 | title: "About School Bud-E", 126 | partOneOne: 127 | "In today's world, where education is increasingly intertwined with technology, School Bud-E emerges as an empathetic AI voice assistant specifically designed for the dynamic needs of the education sector. Developed by", 128 | partOneTwo: 129 | "in collaboration with the ELLIS Institute Tübingen, Collabora, and the Tübingen AI Center, School Bud-E enables the learning experience with a focus on empathy, natural interaction, and", 130 | headingOne: "Redefining Education with AI", 131 | partTwoOne: 132 | "School Bud-E is not just an AI voice assistant; it is a digital companion that supports educational growth through:", 133 | partTwoTwo: 134 | "Real-time responses to student queries that facilitate immediate learning opportunities.", 135 | partTwoThree: 136 | "Emotionally intelligent interactions that recognize the learner's emotional state and adapt to it to foster a supportive learning environment.", 137 | partTwoFour: 138 | "Maintaining conversation context across sessions, enabling personalized learning experiences that build over time.", 139 | partTwoFive: 140 | "Handling complex multi-speaker scenarios, such as classroom discussions in multiple languages.", 141 | partTwoSix: 142 | "Operating on local, consumer-grade hardware, ensuring privacy and accessibility.", 143 | headingTwo: "Technological Innovation for Education", 144 | partThreeOne: 145 | "At the core of School Bud-E's development is the pursuit of low latency and maximum conversational naturalness. Through rigorous testing and evaluating various speech-to-text, speech understanding, and text-to-speech models, the team has achieved remarkable responsiveness and quality on devices common in schools.", 146 | partThreeTwo: 147 | "Since January 2024, School Bud-E has been operating with latencies between 300 and 500 ms, promising near-instant interaction that is crucial to keeping students engaged and supporting educators in real time.", 148 | headingThree: "Supporting the Education Revolution", 149 | partFourOne: 150 | "The development of School Bud-E is an ongoing collaboration. We are committed to continuously enhancing its capabilities to better serve students and educators alike. From reducing system requirements and latency to enriching its understanding of conversational nuances, each update aims to make School Bud-E an indispensable asset in educational institutions. At the same time, we are building an architecture that enables the technology to be implemented in various educational environments, to scale, and to integrate modules tailored to the specific needs of students and educators in different learning settings.", 151 | partFourTwo: 152 | "Are you interested in contributing to the School Bud-E project or integrating it into your suite of educational technologies? Then join our", 153 | partFourThree: "or contact us directly at", 154 | }, 155 | de: { 156 | title: "Über School Bud-E", 157 | partOneOne: 158 | "In der heutigen Zeit, in der Bildung zunehmend mit Technologie verflochten ist, tritt School Bud-E als empathischer KI-Sprachassistent hervor, der speziell für die dynamischen Bedürfnisse im Bildungsbereich entwickelt wurde. Entwickelt von", 159 | partOneTwo: 160 | "in Zusammenarbeit mit dem ELLIS-Institut Tübingen, Collabora und dem Tübinger KI-Zentrum, ermöglicht School Bud-E das Lernerlebnis mit einem Schwerpunkt auf Empathie, natürliche Interaktion und", 161 | headingOne: "Bildung mit KI neu definieren", 162 | partTwoOne: 163 | "School Bud-E ist nicht nur ein KI-Sprachassistent; es ist ein digitaler Begleiter, der das Bildungswachstum durch unterstützt:", 164 | partTwoTwo: 165 | "Echtzeit-Antworten auf Schüleranfragen, die sofortige Lernmöglichkeiten erleichtern.", 166 | partTwoThree: 167 | "Emotional intelligente Interaktionen, die den emotionalen Zustand des Lernenden erkennen und sich an diesen anpassen, um eine unterstützende Lernumgebung zu fördern.", 168 | partTwoFour: 169 | "Beibehaltung des Gesprächskontexts über Sitzungen hinweg, was personalisierte Lernerfahrungen ermöglicht, die sich im Laufe der Zeit aufbauen.", 170 | partTwoFive: 171 | "Bewältigung von komplexen Mehrsprecher-Szenarien, wie Klassenzimmerdiskussionen auf mehreren Sprachen.", 172 | partTwoSix: 173 | "Betrieb auf lokaler, verbraucherüblicher Hardware, gewährleistet Datenschutz und Zugänglichkeit.", 174 | headingTwo: "Technologische Innovation für die Bildung", 175 | partThreeOne: 176 | "Im Mittelpunkt der Entwicklung von School Bud-E steht das Streben nach geringer Latenz und maximaler Natürlichkeit im Gespräch. Durch rigoroses Testen und Evaluieren verschiedener Sprach-zu-Text-, Sprachverständnis- und Text-zu-Sprach-Modelle hat das Team eine bemerkenswerte Reaktionsfähigkeit und Qualität auf Geräten erreicht, die in Schulen üblich sind.", 177 | partThreeTwo: 178 | "Seit Januar 2024 arbeitet School Bud-E mit Latenzen zwischen 300 und 500 ms und verspricht eine nahezu sofortige Interaktion, die entscheidend ist, um Schüler engagiert zu halten und Pädagogen in Echtzeit zu unterstützen.", 179 | headingThree: "Unterstützt die Bildungsrevolution", 180 | partFourOne: 181 | "Die Entwicklung von School Bud-E ist eine fortwährende Zusammenarbeit. Wir sind darauf bedacht, seine Fähigkeiten kontinuierlich zu verbessern, um Schülern und Pädagogen gleichermaßen besser zu dienen. Von der Reduzierung der Systemanforderungen und Latenz bis zur Bereicherung seines Verständnisses für konversationelle Nuancen zielt jedes Update darauf ab, School Bud-E zu einem unverzichtbaren Vermögenswert in Bildungseinrichtungen zu machen. Gleichzeitig erschaffen bauen wir eine Architektur, die es ermöglicht, die Technologie in verschiedenen Bildungsumgebungen zu implementieren, zu skalieren und Module zu integrieren, die auf die spezifischen Bedürfnisse von Schülern und Pädagogen in verschiedenen Lernsettings zugeschnitten sind.", 182 | partFourTwo: 183 | "Sind Sie interessiert, am School Bud-E Projekt mitzuarbeiten oder es in Ihre Suite von Bildungstechnologien zu integrieren? Dann treten Sie unserem", 184 | partFourThree: "bei oder kontaktieren Sie uns direkt unter", 185 | }, 186 | }; 187 | 188 | export const settingsContent: InternalizationContent = { 189 | en: { 190 | title: "Settings", 191 | apiUrlLabel: "LLM API URL (url containing /v1/chat/completions)", 192 | apiUrlPlaceholder: "Enter API URL", 193 | apiKeyLabel: "LLM API Key", 194 | apiKeyPlaceholder: "Enter API Key", 195 | modelLabel: "LLM Model", 196 | modelPlaceholder: "Enter Model Name", 197 | ttsUrlLabel: "TTS API URL", 198 | ttsUrlPlaceholder: "Enter TTS API URL", 199 | ttsKeyLabel: "TTS API Key", 200 | ttsKeyPlaceholder: "Enter TTS API Key", 201 | ttsModelLabel: "TTS Model", 202 | ttsModelPlaceholder: "Enter TTS Model Name", 203 | sttUrlLabel: "STT API URL (url containing /v1/audio/transcriptions)", 204 | sttUrlPlaceholder: "Enter STT API URL", 205 | sttKeyLabel: "STT API Key", 206 | sttKeyPlaceholder: "Enter STT API Key", 207 | sttModelLabel: "STT Model", 208 | sttModelPlaceholder: "Enter STT Model Name", 209 | vlmUrlLabel: "VLM API URL (url containing /v1/chat/completions)", 210 | vlmUrlPlaceholder: "Enter VLM API URL", 211 | vlmKeyLabel: "VLM API Key", 212 | vlmKeyPlaceholder: "Enter VLM API Key", 213 | vlmModelLabel: "VLM Model", 214 | vlmModelPlaceholder: "Enter VLM Model Name", 215 | vlmCorrectionModelLabel: "VLM Correction Model", 216 | vlmCorrectionModelPlaceholder: "Enter VLM Correction Model Name", 217 | cancel: "Cancel", 218 | save: "Save", 219 | systemPromptLabel: "System Prompt", 220 | back: "Back", 221 | universalApiKeyLabel: "Universal API Key", 222 | universalApiKeyPlaceholder: "Enter your API key", 223 | advancedSettings: "Advanced Settings", 224 | lessSettings: "Less Settings", 225 | chatApiTitle: "Chat API", 226 | ttsTitle: "Text-to-Speech", 227 | sttTitle: "Speech-to-Text", 228 | vlmTitle: "Vision Language Model", 229 | }, 230 | de: { 231 | title: "Einstellungen", 232 | apiUrlLabel: "LLM API URL (URL mit /v1/chat/completions)", 233 | apiUrlPlaceholder: "API URL eingeben", 234 | apiKeyLabel: "LLM API Schlüssel", 235 | apiKeyPlaceholder: "API Schlüssel eingeben", 236 | modelLabel: "LLM Modell", 237 | modelPlaceholder: "Modellname eingeben", 238 | ttsUrlLabel: "TTS API URL", 239 | ttsUrlPlaceholder: "TTS API URL eingeben", 240 | ttsKeyLabel: "TTS API Schlüssel", 241 | ttsKeyPlaceholder: "TTS API Schlüssel eingeben", 242 | ttsModelLabel: "TTS Modell", 243 | ttsModelPlaceholder: "TTS Modellname eingeben", 244 | sttUrlLabel: "STT API URL (URL mit /v1/audio/transcriptions)", 245 | sttUrlPlaceholder: "STT API URL eingeben", 246 | sttKeyLabel: "STT API Schlüssel", 247 | sttKeyPlaceholder: "STT API Schlüssel eingeben", 248 | sttModelLabel: "STT Modell", 249 | sttModelPlaceholder: "STT Modellname eingeben", 250 | vlmUrlLabel: "VLM API URL (URL mit /v1/chat/completions)", 251 | vlmUrlPlaceholder: "VLM API URL eingeben", 252 | vlmKeyLabel: "VLM API Schlüssel", 253 | vlmKeyPlaceholder: "VLM API Schlüssel eingeben", 254 | vlmModelLabel: "VLM Modell", 255 | vlmModelPlaceholder: "VLM Modellname eingeben", 256 | vlmCorrectionModelLabel: "VLM Korrektur Modell", 257 | vlmCorrectionModelPlaceholder: "VLM Korrektur Modellname eingeben", 258 | cancel: "Abbrechen", 259 | save: "Speichern", 260 | systemPromptLabel: "System Prompt", 261 | back: "Zurück", 262 | universalApiKeyLabel: "Universal API-Schlüssel", 263 | universalApiKeyPlaceholder: "Geben Sie Ihren API-Schlüssel ein", 264 | advancedSettings: "Erweiterte Einstellungen", 265 | lessSettings: "Weniger Einstellungen", 266 | chatApiTitle: "Chat API", 267 | ttsTitle: "Text-zu-Sprache", 268 | sttTitle: "Sprache-zu-Text", 269 | vlmTitle: "Bildverarbeitung", 270 | }, 271 | }; 272 | --------------------------------------------------------------------------------