├── .env ├── .eslintignore ├── .eslintrc.cjs ├── .github └── workflows │ └── publish.yml ├── .gitignore ├── .npmrc ├── .prettierignore ├── .prettierrc ├── LICENSE ├── README.md ├── images ├── chat-graph.png ├── dark-mode.png ├── folders.png └── light-mode.png ├── package.json ├── pnpm-lock.yaml ├── postcss.config.cjs ├── src-tauri ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── build.rs ├── icons │ ├── 128x128.png │ ├── 128x128@2x.png │ ├── 32x32.png │ ├── Square107x107Logo.png │ ├── Square142x142Logo.png │ ├── Square150x150Logo.png │ ├── Square284x284Logo.png │ ├── Square30x30Logo.png │ ├── Square310x310Logo.png │ ├── Square44x44Logo.png │ ├── Square71x71Logo.png │ ├── Square89x89Logo.png │ ├── StoreLogo.png │ ├── icon.icns │ ├── icon.ico │ └── icon.png ├── src │ └── main.rs └── tauri.conf.json ├── src ├── app.d.ts ├── app.html ├── app.postcss ├── lib │ ├── assets │ │ └── xpress-ai-logo-white.png │ ├── backend │ │ ├── Anthropic.ts │ │ ├── BackendFactory.ts │ │ ├── OpenAI.ts │ │ └── types.ts │ ├── components │ │ ├── CodeRenderer.svelte │ │ ├── ConversationGraph.svelte │ │ ├── EditableString.svelte │ │ ├── Folder.svelte │ │ ├── Menu.svelte │ │ ├── MessageCard.svelte │ │ ├── SubMenu.svelte │ │ └── dialogs.ts │ ├── stores │ │ ├── schema.ts │ │ ├── technologicStores.ts │ │ └── utils.ts │ └── translations │ │ ├── index.ts │ │ ├── lang.json │ │ ├── translations.ts │ │ └── util.ts └── routes │ ├── +layout.svelte │ ├── +layout.ts │ ├── +page.svelte │ ├── [conversationId] │ ├── +page.svelte │ └── conversationBroker.ts │ └── settings │ ├── backends │ ├── +page.svelte │ ├── [backendName] │ │ └── +page.svelte │ └── new │ │ └── +page.svelte │ └── backup │ └── +page.svelte ├── static └── favicon.svg ├── svelte.config.js ├── tailwind.config.cjs ├── tsconfig.json └── vite.config.ts /.env: -------------------------------------------------------------------------------- 1 | #BACKEND=https://api.openai.com 2 | #PUBLIC_MODEL=gpt-3.5-turbo 3 | 4 | BACKEND=http://100.82.217.33:5000 5 | PUBLIC_MODEL=rwkv-raven-14b-eng-more 6 | 7 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /build 4 | /.svelte-kit 5 | /package 6 | .env 7 | .env.* 8 | !.env.example 9 | 10 | # Ignore files for PNPM, NPM and YARN 11 | pnpm-lock.yaml 12 | package-lock.json 13 | yarn.lock 14 | -------------------------------------------------------------------------------- /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | parser: '@typescript-eslint/parser', 4 | extends: ['eslint:recommended', 'plugin:@typescript-eslint/recommended', 'prettier'], 5 | plugins: ['svelte3', '@typescript-eslint'], 6 | ignorePatterns: ['*.cjs'], 7 | overrides: [{ files: ['*.svelte'], processor: 'svelte3/svelte3' }], 8 | settings: { 9 | 'svelte3/typescript': () => require('typescript') 10 | }, 11 | parserOptions: { 12 | sourceType: 'module', 13 | ecmaVersion: 2020 14 | }, 15 | env: { 16 | browser: true, 17 | es2017: true, 18 | node: true 19 | } 20 | }; 21 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish To Technologic Site 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | 7 | jobs: 8 | build: 9 | runs-on: self-hosted 10 | 11 | steps: 12 | - uses: actions/checkout@v2 13 | 14 | - name: Install dependencies 15 | run: pnpm install --no-frozen-lockfile 16 | 17 | - name: Build site 18 | run: pnpm run build 19 | 20 | - name: Copying to technologic.xpress.ai 21 | run: rsync -r -v build/* ${{ secrets.SSH_USER }}@${{ secrets.SSH_HOST }}:/var/www/technologic.xpress.ai/ 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /build 4 | /.svelte-kit 5 | /package 6 | .env.* 7 | !.env.example 8 | vite.config.js.timestamp-* 9 | vite.config.ts.timestamp-* 10 | /.idea 11 | /src-tauri/target 12 | .env -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | engine-strict=true 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /build 4 | /.svelte-kit 5 | /package 6 | .env 7 | .env.* 8 | !.env.example 9 | 10 | # Ignore files for PNPM, NPM and YARN 11 | pnpm-lock.yaml 12 | package-lock.json 13 | yarn.lock 14 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "useTabs": true, 3 | "singleQuote": true, 4 | "trailingComma": "none", 5 | "printWidth": 100, 6 | "plugins": ["prettier-plugin-svelte"], 7 | "pluginSearchDirs": ["."], 8 | "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }] 9 | } 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Xpress AI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Technologic - AI Chat 2 | 3 | ![GitHub release](https://img.shields.io/github/release/XpressAI/technologic) 4 | ![GitHub issues](https://img.shields.io/github/issues/XpressAI/technologic) 5 | ![License](https://img.shields.io/github/license/XpressAI/technologic) 6 | 7 | Technologic is a powerful, feature-rich AI Chatbot Client that is designed to work seamlessly with OpenAI's API or any 8 | compatible backend (such as your [Xpress AI](https://www.xpress.ai) agents). With a user-friendly interface and the ability to organize, modify, and manage your conversations, 9 | Technologic brings you a next-level chatting experience with your AI assistant. 10 | 11 | **[Demo: https://technologic.xpress.ai/](https://technologic.xpress.ai/)** 12 | 13 | ![Technologic Screenshot](./images/light-mode.png) 14 | 15 | ## Features 16 | 17 | - **Secure Storage**: Your conversations are stored locally on your computer using your browser's IndexedDB storage. 18 | - **Backend Compatibility**: Works with any backend compatible with OpenAI's API. 19 | - **Bring Your Own API Key**: Easily configure your OpenAI API key or any other compatible backend (e.g. [xai-llm-server](https://github.com/XpressAI/xai-llm-server)). 20 | - **Organize Conversations**: Keep your conversations tidy by organizing them into folders. 21 | - **Message Modification**: Edit and modify messages, both sent and received, as needed. 22 | - **Custom Personality**: Support for "System Messages" to give your chatbot a unique personality (if supported by the backend). 23 | - **Fork Conversations**: Easily branch off into different topics without losing the context of previous conversations. 24 | - **Elaborate**: Use the "Go on" feature to prompt the bot to expand on its last message. 25 | - **Merge Messages**: Combine messages to avoid fragmentation or incomplete code blocks. 26 | - **View Raw Message**: Access the raw text of any message with the flip of a switch. 27 | 28 | ## Screenshots 29 | 30 | ### Light Mode 31 | ![Technologic Screenshot](./images/light-mode.png) 32 | 33 | ### Dark Mode 34 | ![Technologic Screenshot](./images/dark-mode.png) 35 | 36 | ### Folders 37 | ![Technologic Screenshot](./images/folders.png) 38 | 39 | ### Conversation Graph 40 | ![Technologic Screenshot](./images/chat-graph.png) 41 | 42 | ## Installation 43 | 44 | Clone the repository and navigate to the project directory: 45 | 46 | ``` 47 | git clone https://github.com/XpressAI/technologic.git 48 | cd technologic 49 | ``` 50 | 51 | Install the dependencies: 52 | 53 | ``` 54 | pnpm install 55 | ``` 56 | 57 | Start the development server: 58 | ``` 59 | pnpm run dev 60 | ``` 61 | 62 | ## Configuration 63 | 64 | The entire configuration happens through the browser. Just click the little cog icon on the bottom right corner of the 65 | sidebar. 66 | 67 | 68 | ## Developing 69 | 70 | Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: 71 | 72 | ```bash 73 | pnpm run dev 74 | 75 | # or start the server and open the app in a new browser tab 76 | pnpm run dev -- --open 77 | ``` 78 | 79 | ## Usage 80 | 81 | 1. Configure the backend by pasting your OpenAI API key or any other compatible API key into the backend configuration. 82 | 83 | 2. Start chatting with the AI by typing your message in the input field and hitting "Send". 84 | 85 | 3. Organize your conversations by creating folders and moving conversations into them. 86 | 87 | 4. Modify messages by clicking on the message you want to edit. 88 | 89 | 5. Use the "Go on" feature by hitting "Send" with an empty message to prompt the bot to elaborate on its last message. 90 | 91 | 6. Merge messages by selecting two or more fragmented messages and clicking on the "Merge" button. 92 | 93 | 7. View the raw text of a message by clicking the "Flip" button on the message. 94 | 95 | ## FAQs 96 | 97 | **Q: Is my data secure?** 98 | 99 | A: Yes, Technologic stores all your conversations locally on your computer using your browser's IndexedDB storage. 100 | 101 | **Q: Can I use Technologic with other backends apart from OpenAI's API?** 102 | 103 | A: Absolutely! Technologic is designed to work with any backend that is compatible with OpenAI's API. Just configure the backend and you're good to go. 104 | 105 | **Q: How can I give my AI chatbot a custom personality?** 106 | 107 | A: Technologic supports "System Messages" that allow you to give your chatbot a unique personality. This feature is available if your chosen backend supports it. 108 | 109 | ## Contributing 110 | 111 | We welcome contributions to Technologic! 112 | 113 | ## License 114 | Technologic is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details. 115 | -------------------------------------------------------------------------------- /images/chat-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/images/chat-graph.png -------------------------------------------------------------------------------- /images/dark-mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/images/dark-mode.png -------------------------------------------------------------------------------- /images/folders.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/images/folders.png -------------------------------------------------------------------------------- /images/light-mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/images/light-mode.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "technologic", 3 | "version": "0.2.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "vite dev", 7 | "build": "vite build", 8 | "preview": "vite preview", 9 | "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", 10 | "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch", 11 | "lint": "prettier --plugin-search-dir . --check . && eslint .", 12 | "format": "prettier --plugin-search-dir . --write .", 13 | "native-dev": "tauri dev", 14 | "native-build": "tauri build" 15 | }, 16 | "devDependencies": { 17 | "@floating-ui/dom": "^1.3.0", 18 | "@fontsource/quicksand": "^4.5.12", 19 | "@skeletonlabs/skeleton": "^1.7.1", 20 | "@sveltejs/adapter-auto": "^2.1.0", 21 | "@sveltejs/adapter-static": "^2.0.2", 22 | "@sveltejs/kit": "^1.20.2", 23 | "@tailwindcss/typography": "^0.5.9", 24 | "@tauri-apps/cli": "^1.4.0", 25 | "@typescript-eslint/eslint-plugin": "^5.59.11", 26 | "@typescript-eslint/parser": "^5.59.11", 27 | "autoprefixer": "^10.4.14", 28 | "eslint": "^8.42.0", 29 | "eslint-config-prettier": "^8.8.0", 30 | "eslint-plugin-svelte3": "^4.0.0", 31 | "highlight.js": "^11.8.0", 32 | "localforage": "^1.10.0", 33 | "postcss": "^8.4.24", 34 | "postcss-load-config": "^4.0.1", 35 | "prettier": "^2.8.8", 36 | "prettier-plugin-svelte": "^2.10.1", 37 | "process": "^0.11.10", 38 | "svelte": "^3.59.1", 39 | "svelte-check": "^3.4.3", 40 | "svelte-dnd-action": "^0.9.22", 41 | "svelte-legos": "^0.1.9", 42 | "svelte-markdown": "^0.2.3", 43 | "svelte-preprocess": "^4.10.7", 44 | "sveltekit-i18n": "^2.4.2", 45 | "tailwindcss": "^3.3.2", 46 | "tslib": "^2.5.3", 47 | "typescript": "^5.1.3", 48 | "vite": "^4.3.9" 49 | }, 50 | "type": "module", 51 | "dependencies": { 52 | "@tabler/icons-svelte": "^2.22.0" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /postcss.config.cjs: -------------------------------------------------------------------------------- 1 | const tailwindcss = require('tailwindcss'); 2 | const autoprefixer = require('autoprefixer'); 3 | 4 | const config = { 5 | plugins: [ 6 | //Some plugins, like tailwindcss/nesting, need to run before Tailwind, 7 | tailwindcss(), 8 | //But others, like autoprefixer, need to run after, 9 | autoprefixer 10 | ] 11 | }; 12 | 13 | module.exports = config; 14 | -------------------------------------------------------------------------------- /src-tauri/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | -------------------------------------------------------------------------------- /src-tauri/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "technologic" 3 | version = "0.1.0" 4 | description = "Technologic is a user-friendly AI Chatbot Client packed with features to enhance your chatting experience. Securely store conversations, modify messages, and easily organize them - all while enjoying compatibility with various backends, including OpenAI's API." 5 | authors = ["Paul Dubs", "Eduardo Gonzalez"] 6 | license = "MIT" 7 | repository = "https://github.com/XpressAI/technologic/" 8 | default-run = "technologic" 9 | edition = "2021" 10 | rust-version = "1.59" 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [build-dependencies] 15 | tauri-build = { version = "1.2.1", features = [] } 16 | 17 | [dependencies] 18 | serde_json = "1.0" 19 | serde = { version = "1.0", features = ["derive"] } 20 | tauri = { version = "1.2.5", features = [] } 21 | 22 | [features] 23 | # by default Tauri runs in production mode 24 | # when `tauri dev` runs it is executed with `cargo run --no-default-features` if `devPath` is an URL 25 | default = [ "custom-protocol" ] 26 | # this feature is used for production builds where `devPath` points to the filesystem 27 | # DO NOT remove this 28 | custom-protocol = [ "tauri/custom-protocol" ] 29 | -------------------------------------------------------------------------------- /src-tauri/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | tauri_build::build() 3 | } 4 | -------------------------------------------------------------------------------- /src-tauri/icons/128x128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/128x128.png -------------------------------------------------------------------------------- /src-tauri/icons/128x128@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/128x128@2x.png -------------------------------------------------------------------------------- /src-tauri/icons/32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/32x32.png -------------------------------------------------------------------------------- /src-tauri/icons/Square107x107Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square107x107Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square142x142Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square142x142Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square150x150Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square150x150Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square284x284Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square284x284Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square30x30Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square30x30Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square310x310Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square310x310Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square44x44Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square44x44Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square71x71Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square71x71Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/Square89x89Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/Square89x89Logo.png -------------------------------------------------------------------------------- /src-tauri/icons/StoreLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/StoreLogo.png -------------------------------------------------------------------------------- /src-tauri/icons/icon.icns: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/icon.icns -------------------------------------------------------------------------------- /src-tauri/icons/icon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/icon.ico -------------------------------------------------------------------------------- /src-tauri/icons/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src-tauri/icons/icon.png -------------------------------------------------------------------------------- /src-tauri/src/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr( 2 | all(not(debug_assertions), target_os = "windows"), 3 | windows_subsystem = "windows" 4 | )] 5 | 6 | fn main() { 7 | tauri::Builder::default() 8 | .run(tauri::generate_context!()) 9 | .expect("error while running tauri application"); 10 | } 11 | -------------------------------------------------------------------------------- /src-tauri/tauri.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "../node_modules/@tauri-apps/cli/schema.json", 3 | "build": { 4 | "beforeBuildCommand": "pnpm run build", 5 | "beforeDevCommand": "pnpm run dev", 6 | "devPath": "http://localhost:5173", 7 | "distDir": "../build" 8 | }, 9 | "package": { 10 | "productName": "technologic", 11 | "version": "0.1.0" 12 | }, 13 | "tauri": { 14 | "allowlist": { 15 | "all": false 16 | }, 17 | "bundle": { 18 | "active": true, 19 | "category": "DeveloperTool", 20 | "copyright": "", 21 | "deb": { 22 | "depends": [] 23 | }, 24 | "externalBin": [], 25 | "icon": [ 26 | "icons/32x32.png", 27 | "icons/128x128.png", 28 | "icons/128x128@2x.png", 29 | "icons/icon.icns", 30 | "icons/icon.ico" 31 | ], 32 | "identifier": "ai.xpress.technologic", 33 | "longDescription": "", 34 | "macOS": { 35 | "entitlements": null, 36 | "exceptionDomain": "", 37 | "frameworks": [], 38 | "providerShortName": null, 39 | "signingIdentity": null 40 | }, 41 | "resources": [], 42 | "shortDescription": "", 43 | "targets": "all", 44 | "windows": { 45 | "certificateThumbprint": null, 46 | "digestAlgorithm": "sha256", 47 | "timestampUrl": "" 48 | } 49 | }, 50 | "security": { 51 | "csp": null 52 | }, 53 | "updater": { 54 | "active": false 55 | }, 56 | "windows": [ 57 | { 58 | "fullscreen": false, 59 | "height": 600, 60 | "resizable": true, 61 | "title": "Technologic", 62 | "width": 800 63 | } 64 | ] 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/app.d.ts: -------------------------------------------------------------------------------- 1 | // See https://kit.svelte.dev/docs/types#app 2 | // for information about these interfaces 3 | declare global { 4 | namespace App { 5 | // interface Error {} 6 | // interface Locals {} 7 | // interface PageData {} 8 | // interface Platform {} 9 | } 10 | } 11 | 12 | export {}; 13 | -------------------------------------------------------------------------------- /src/app.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | %sveltekit.head% 8 | 9 | 10 |
%sveltekit.body%
11 | 12 | 13 | -------------------------------------------------------------------------------- /src/app.postcss: -------------------------------------------------------------------------------- 1 | /* Write your global styles here, in PostCSS syntax */ 2 | 3 | html, 4 | body { 5 | @apply h-full overflow-hidden; 6 | } 7 | 8 | body { 9 | @apply bg-surface-50-900-token; 10 | } 11 | 12 | body { 13 | background-image: radial-gradient( 14 | at 0% 0%, 15 | rgba(var(--color-secondary-500) / 0.33) 0px, 16 | transparent 50% 17 | ), 18 | radial-gradient(at 98% 1%, rgba(var(--color-error-500) / 0.33) 0px, transparent 50%); 19 | } 20 | 21 | :root { 22 | --theme-font-family-base: 'Quicksand', sans-serif; 23 | --theme-font-family-heading: 'Quicksand', sans-serif; 24 | /* ... */ 25 | } 26 | 27 | .streaming-message { 28 | margin-left: 0.5em; 29 | width: 0.5em; 30 | height: 1em; 31 | display: inline-block; 32 | background-color: rgb(var(--color-surface-500)); 33 | } 34 | -------------------------------------------------------------------------------- /src/lib/assets/xpress-ai-logo-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XpressAI/technologic/HEAD/src/lib/assets/xpress-ai-logo-white.png -------------------------------------------------------------------------------- /src/lib/backend/Anthropic.ts: -------------------------------------------------------------------------------- 1 | import type { BackendConfiguration } from '$lib/stores/schema'; 2 | import type { Backend, Message } from '$lib/backend/types'; 3 | import type { ConversationStore } from "$lib/stores/schema"; 4 | import {get} from "svelte/store"; 5 | import type { BackendFactory } from "./types"; 6 | 7 | export const anthropicBackendFactory: BackendFactory = { 8 | createBackend 9 | }; 10 | 11 | export function createBackend(configuration: BackendConfiguration, model: string): Backend { 12 | // according to docs: https://docs.anthropic.com/claude/reference/messages_post 13 | // temperature default is 1.0, I set it to 0.9 to make it slightly less random 14 | const temperature = 0.9; 15 | 16 | function request(payload: any) { 17 | 18 | let baseUrl = configuration.url; 19 | if(baseUrl.startsWith("http://0.0.0.0")){ 20 | baseUrl = ""; 21 | } 22 | 23 | const headers = new Headers(); 24 | headers.append('Content-Type', 'application/json'); 25 | headers.append('x-api-key',`${configuration.token}`); 26 | headers.append('anthropic-version', '2023-06-01'); 27 | 28 | return fetch(`${baseUrl}/messages`, { 29 | method: 'POST', 30 | headers: headers, 31 | body: JSON.stringify(payload) 32 | }); 33 | } 34 | 35 | async function sendMessage(history: Message[]): Promise { 36 | const payload = { 37 | model: model, 38 | max_tokens: 1024, 39 | messages: history.filter((h) => h.role !== 'system'), 40 | system: history.find((h) => h.role == 'system')?.content 41 | }; 42 | 43 | const response = await request(payload); 44 | 45 | const out = await response.json(); 46 | const content = out.content[0]; 47 | 48 | return { 49 | role: out.role, 50 | content: content.text, 51 | }; 52 | } 53 | 54 | async function sendMessageAndStream( 55 | history: Message[], 56 | onMessage: (message: string, done: boolean) => Promise 57 | ) { 58 | const payload = { 59 | model: model, 60 | max_tokens: 1024, 61 | temperature: temperature, 62 | messages: history.filter((h) => h.content.length > 0), 63 | stream: true 64 | }; 65 | 66 | const response = await request(payload); 67 | 68 | const reader = response.body?.getReader(); 69 | if (!reader) { 70 | throw new Error('Could not get reader from response body'); 71 | } 72 | 73 | const decoder = new TextDecoder('utf-8'); 74 | let out = ''; 75 | while (true) { 76 | const { done, value } = await reader.read(); 77 | if (done) { 78 | break; 79 | } 80 | 81 | const chunk = decoder.decode(value, { stream: true }); 82 | out += chunk; 83 | 84 | let eventSeparatorIndex; 85 | while ((eventSeparatorIndex = out.indexOf('\n\n')) !== -1) { 86 | const data = out.slice(0, eventSeparatorIndex); 87 | 88 | if (data.match(/^data: \[DONE\]/)) { 89 | await onMessage('', true); // send end message. 90 | return; 91 | } 92 | const json = data.match(/data: (.*)/); 93 | if (json && json.length >= 1) { 94 | 95 | const event = JSON.parse(json[1]); 96 | 97 | out = out.slice(eventSeparatorIndex + 2); 98 | 99 | switch (event.type) { 100 | // case 'message_start': 101 | case 'content_block_start': 102 | await onMessage('', false); // send start message. 103 | break; 104 | 105 | // case 'content_block_stop': 106 | case 'message_stop': 107 | await onMessage('', true); // send end message. 108 | return; 109 | 110 | case 'content_block_delta': 111 | await onMessage(event.delta.text, false); 112 | break; 113 | 114 | case 'message_delta': 115 | case 'ping': 116 | // ignore 117 | } 118 | } else { 119 | console.warn('no json match foound.'); 120 | await onMessage('', false); // send start message. 121 | return; 122 | } 123 | } 124 | } 125 | } 126 | 127 | async function renameConversationWithSummary(currentConversation: ConversationStore) { 128 | const summarizeMessage = 'Using the same language, in at most 3 words summarize the conversation between assistant and user.' 129 | 130 | const systemMessage: Message = { 131 | role: 'system', 132 | content: summarizeMessage 133 | }; 134 | 135 | // system prompt alone might not be enough, specially not with other OpenAI-API-compatible models... 136 | // therefore we just add a "user" message that is the same as the system prompt, to "trigger" the model 137 | // to write an "assistant" message to the users request. 138 | const userMessage: Message = { 139 | role: 'user', 140 | content: summarizeMessage, 141 | }; 142 | 143 | const history = get(currentConversation.history); 144 | const filteredHistory = history.filter((msg) => msg.role === 'user' || msg.role === 'assistant'); 145 | 146 | const response = await sendMessage([...filteredHistory, systemMessage, userMessage]); 147 | 148 | const newTitle = response.content; 149 | if (newTitle) { 150 | await currentConversation.rename(newTitle); 151 | } 152 | } 153 | 154 | 155 | return { 156 | get api() { 157 | return configuration.api; 158 | }, 159 | get name() { 160 | return configuration.name; 161 | }, 162 | get model() { 163 | return model; 164 | }, 165 | get temperature() { 166 | return temperature; 167 | }, 168 | 169 | sendMessage, 170 | sendMessageAndStream, 171 | renameConversationWithSummary 172 | }; 173 | } 174 | -------------------------------------------------------------------------------- /src/lib/backend/BackendFactory.ts: -------------------------------------------------------------------------------- 1 | import type { Backend, BackendFactory } from "./types"; 2 | import type { BackendConfiguration } from "$lib/stores/schema"; 3 | import { openAIBackendFactory } from '$lib/backend/OpenAI'; 4 | import { anthropicBackendFactory } from '$lib/backend/Anthropic'; 5 | 6 | const backends: { [key: string]: BackendFactory } = { 7 | openai: openAIBackendFactory, 8 | anthropic: anthropicBackendFactory, 9 | openchat: openAIBackendFactory, // can use the same API as OpenAI 10 | } 11 | 12 | export function createBackend(config: BackendConfiguration, model: string): Backend { 13 | let backendFactory = backends[config.api]; 14 | 15 | if (!backendFactory) { 16 | console.warn(`No matching backend factory found for api type '${config.api}', using OpenAI API backend instead`); 17 | backendFactory = backends.openai; 18 | } 19 | 20 | return backendFactory.createBackend(config, model); 21 | } 22 | -------------------------------------------------------------------------------- /src/lib/backend/OpenAI.ts: -------------------------------------------------------------------------------- 1 | import type { BackendConfiguration } from '$lib/stores/schema'; 2 | import type { Backend, Message } from '$lib/backend/types'; 3 | import type { ConversationStore } from "$lib/stores/schema"; 4 | import {get} from "svelte/store"; 5 | import type { BackendFactory } from "./types"; 6 | 7 | export const openAIBackendFactory: BackendFactory = { 8 | createBackend 9 | }; 10 | 11 | export function createBackend(configuration: BackendConfiguration, model: string): Backend { 12 | const temperature = 0.7; 13 | 14 | function request(payload: any) { 15 | let baseUrl = configuration.url; 16 | if(baseUrl.startsWith("http://0.0.0.0")){ 17 | baseUrl = ""; 18 | } 19 | return fetch(`${baseUrl}/chat/completions`, { 20 | method: 'POST', 21 | body: JSON.stringify(payload), 22 | headers: { 23 | 'Content-Type': 'application/json', 24 | Authorization: `Bearer ${configuration.token}` 25 | } 26 | }); 27 | } 28 | 29 | async function sendMessage(history: Message[]): Promise { 30 | if (model.startsWith('o1') || model.startsWith('o3')) { 31 | const payload = { 32 | model: model, 33 | reasoning_effort: 'medium', 34 | response_format: { 35 | type: 'text' 36 | }, 37 | messages: history 38 | }; 39 | const response = await request(payload); 40 | 41 | const out = await response.json(); 42 | return out.choices[0].message; 43 | } else { 44 | const payload = { 45 | model: model, 46 | temperature: temperature, 47 | messages: history 48 | }; 49 | const response = await request(payload); 50 | 51 | const out = await response.json(); 52 | return out.choices[0].message; 53 | } 54 | } 55 | 56 | async function sendMessageAndStream( 57 | history: Message[], 58 | onMessage: (message: string, done: boolean) => Promise 59 | ) { 60 | let payload = null; 61 | 62 | if (model.startsWith('o1') || model.startsWith('o3')) { 63 | payload = { 64 | model: model, 65 | reasoning_effort: 'medium', 66 | response_format: { 67 | type: 'text' 68 | }, 69 | messages: history, 70 | stream: true 71 | }; 72 | } else { 73 | payload = { 74 | model: model, 75 | temperature: temperature, 76 | messages: history, 77 | stream: true 78 | }; 79 | } 80 | 81 | const response = await request(payload); 82 | 83 | const reader = response.body?.getReader(); 84 | if (!reader) { 85 | throw new Error('Could not get reader from response body'); 86 | } 87 | 88 | const decoder = new TextDecoder('utf-8'); 89 | let out = ''; 90 | while (true) { 91 | const { done, value } = await reader.read(); 92 | if (done) { 93 | break; 94 | } 95 | const chunk = decoder.decode(value, { stream: true }); 96 | out += chunk; 97 | 98 | let eventSeparatorIndex; 99 | while ((eventSeparatorIndex = out.indexOf('\n\n')) !== -1) { 100 | const data = out.slice(0, eventSeparatorIndex); 101 | 102 | if (data.match(/^data: \[DONE\]/)) { 103 | await onMessage('', true); // send end message. 104 | return; 105 | } 106 | 107 | const event = JSON.parse(data.replace(/^data: /, '')); 108 | 109 | out = out.slice(eventSeparatorIndex + 2); 110 | 111 | if (event.choices[0].finish_reason === 'stop') { 112 | await onMessage('', true); // send end message. 113 | } else if (event.choices[0].role === 'assistant') { 114 | await onMessage('', false); // send start message. 115 | } else { 116 | await onMessage(event.choices[0].delta.content, false); 117 | } 118 | } 119 | } 120 | } 121 | 122 | function escapeHtml(unsafeText: string) { 123 | const div = document.createElement('div'); 124 | div.textContent = unsafeText; 125 | return div.innerHTML; 126 | } 127 | 128 | function limitTitle(title: string) { 129 | const words = title.split(' '); 130 | if (words.length <= 7) { 131 | return title; 132 | } 133 | return words.slice(0, 7).join(' ') + '...'; 134 | } 135 | 136 | async function renameConversationWithSummary(currentConversation: ConversationStore) { 137 | const summarizeMessage = 'Using the same language, in at most 7 words summarize the conversation between assistant and user before this message to serve as a title. Respond with ONLY the summary, no other commentary or acknowledgements of this instruction.' 138 | 139 | const systemMessage: Message = { 140 | role: 'system', 141 | content: summarizeMessage, 142 | }; 143 | 144 | // system prompt alone might not be enough, specially not with other OpenAI-API-compatible models... 145 | // therefore we just add a "user" message that is the same as the system prompt, to "trigger" the model 146 | // to write an "assistant" message to the users request. 147 | const userMessage: Message = { 148 | role: 'user', 149 | content: summarizeMessage, 150 | }; 151 | 152 | const history = get(currentConversation.history); 153 | const filteredHistory = history.filter((msg) => msg.role === 'user' || msg.role === 'assistant'); 154 | 155 | const response = await sendMessage([...filteredHistory, systemMessage, userMessage]); 156 | const newTitle = limitTitle(escapeHtml(response.content.replace(/[\s\S]*?<\/think>/g, ''))); 157 | if (newTitle) { 158 | await currentConversation.rename(newTitle); 159 | } 160 | } 161 | 162 | 163 | return { 164 | get api() { 165 | return configuration.api; 166 | }, 167 | get name() { 168 | return configuration.name; 169 | }, 170 | get model() { 171 | return model; 172 | }, 173 | get temperature() { 174 | return temperature; 175 | }, 176 | 177 | sendMessage, 178 | sendMessageAndStream, 179 | renameConversationWithSummary 180 | }; 181 | } 182 | -------------------------------------------------------------------------------- /src/lib/backend/types.ts: -------------------------------------------------------------------------------- 1 | import type { BackendConfiguration } from "$lib/stores/schema"; 2 | import type { ConversationStore } from "$lib/stores/schema"; 3 | 4 | export interface TextContent { 5 | type: 'text'; 6 | text: string; 7 | } 8 | 9 | export interface ImageContent { 10 | type: 'image_url'; 11 | image_url: { 12 | url: string; // url or f"data:image/jpeg;base64,{base64_image}" 13 | }; 14 | } 15 | 16 | export type ContentItem = TextContent | ImageContent; 17 | 18 | export interface Message { 19 | role: string; 20 | content: string | ContentItem[]; 21 | name?: string; 22 | } 23 | 24 | export interface Backend { 25 | readonly api: string; 26 | readonly name: string; 27 | readonly model: string; 28 | readonly temperature: number; 29 | 30 | sendMessage(history: Message[]): Promise; 31 | sendMessageAndStream( 32 | history: Message[], 33 | onMessage: (message: string, done: boolean) => Promise 34 | ): Promise; 35 | renameConversationWithSummary(currentConversation: ConversationStore): Promise; 36 | } 37 | 38 | export interface BackendFactory { 39 | createBackend(configuration: BackendConfiguration, model: string): Backend; 40 | } 41 | -------------------------------------------------------------------------------- /src/lib/components/CodeRenderer.svelte: -------------------------------------------------------------------------------- 1 | 28 | 29 | {#if loadedLangs[actualLang]} 30 | 31 | {:else} 32 | 33 | {/if} 34 | 35 | -------------------------------------------------------------------------------- /src/lib/components/ConversationGraph.svelte: -------------------------------------------------------------------------------- 1 | 79 | 80 |
81 |
82 |

Chat Graph

83 |
84 | 89 | {#each $currentConversation?.graph as link} 90 | {#if link.from} 91 | 100 | {/if} 101 | {/each} 102 | {#each Object.entries(messagePositions) as [messageId, position]} 103 | {@const msg = $currentConversation.messages[messageId]} 104 | (hoveredMessageId = messageId)} 109 | on:mouseout={() => (hoveredMessageId = null)} 110 | on:click={() => currentConversation.selectMessageThreadThrough(msg)} 111 | > 112 | 113 | 114 | {#if msg.message.role === 'user'} 115 | 124 | 125 | 126 | 127 | {/if} 128 | {#if msg.message.role === 'assistant'} 129 | 138 | 139 | 140 | 141 | 142 | 143 | {/if} 144 | {#if msg.message.role === 'system'} 145 | 154 | 157 | 158 | 159 | {/if} 160 | 161 | {/each} 162 | 163 |
164 |
165 |
169 |

 

170 |
171 | {#if hoveredMessageId} 172 | 173 | {:else} 174 | Hover over a node to preview its message. 175 | {/if} 176 |
177 |
178 |
179 | -------------------------------------------------------------------------------- /src/lib/components/EditableString.svelte: -------------------------------------------------------------------------------- 1 | 13 | 14 | {#if isEditing} 15 |
16 | 17 | 26 |
27 | {:else} 28 |
29 |
{value}
30 | 35 |
36 | {/if} 37 | -------------------------------------------------------------------------------- /src/lib/components/Folder.svelte: -------------------------------------------------------------------------------- 1 | 59 | 60 | 129 | 130 | 140 | -------------------------------------------------------------------------------- /src/lib/components/Menu.svelte: -------------------------------------------------------------------------------- 1 | 16 | 17 | 27 | -------------------------------------------------------------------------------- /src/lib/components/MessageCard.svelte: -------------------------------------------------------------------------------- 1 | 83 | 84 | {#if placeholder} 85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 | {:else if isEditing} 102 |
106 |