├── .prettierignore ├── .npmrc ├── .eslintignore ├── src ├── types │ └── sortablejs.d.ts ├── i18n │ ├── index.ts │ ├── zh.json │ ├── en.json │ ├── ru.json │ └── de.json ├── utils.ts ├── processors │ └── pdf.ts ├── indexedDB.ts ├── ui │ ├── actionPaletteHistory.ts │ └── actionPalettePlugin.ts ├── interfaces.ts ├── defaultSettings.ts ├── logger.ts ├── spinnerPlugin.ts ├── rag.ts ├── LocalGPTSettingTab.ts └── main.ts ├── tests ├── __mocks__ │ ├── pdf.worker.js │ ├── pdfjs-dist.ts │ ├── electron.ts │ ├── logger.ts │ └── obsidian.ts ├── setupTests.ts ├── LocalGPT.test.ts ├── i18n.test.ts ├── Utils.test.ts ├── indexedDB.test.ts ├── actionPaletteHistory.test.ts ├── ActionPalette.vitest.ts └── RAG.test.ts ├── svelte.config.cjs ├── .editorconfig ├── manifest.json ├── .gitignore ├── copy-files-plugin.mjs ├── version-bump.mjs ├── tsconfig.json ├── .github ├── workflows │ └── main.yml └── ISSUE_TEMPLATE │ └── bug_report.md ├── versions.json ├── LICENSE ├── vitest.config.mts ├── .eslintrc ├── esbuild.config.mjs ├── package.json ├── docs └── prompt-templating.md ├── README.md └── styles.css /.prettierignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | tag-version-prefix="" -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | 3 | main.js 4 | -------------------------------------------------------------------------------- /src/types/sortablejs.d.ts: -------------------------------------------------------------------------------- 1 | declare module "sortablejs"; 2 | -------------------------------------------------------------------------------- /tests/__mocks__/pdf.worker.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | WorkerMessageHandler: {} 3 | }; -------------------------------------------------------------------------------- /tests/__mocks__/pdfjs-dist.ts: -------------------------------------------------------------------------------- 1 | import { vi } from "vitest"; 2 | 3 | export const getDocument = vi.fn(); 4 | -------------------------------------------------------------------------------- /svelte.config.cjs: -------------------------------------------------------------------------------- 1 | const sveltePreprocess = require("svelte-preprocess"); 2 | 3 | module.exports = { 4 | preprocess: sveltePreprocess(), 5 | }; 6 | -------------------------------------------------------------------------------- /tests/__mocks__/electron.ts: -------------------------------------------------------------------------------- 1 | import { vi } from "vitest"; 2 | 3 | export const remote = { 4 | net: { 5 | request: vi.fn(), 6 | }, 7 | }; 8 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | [*] 5 | charset = utf-8 6 | end_of_line = lf 7 | insert_final_newline = true 8 | indent_style = tab 9 | indent_size = 4 10 | tab_width = 4 11 | -------------------------------------------------------------------------------- /tests/__mocks__/logger.ts: -------------------------------------------------------------------------------- 1 | import { vi } from "vitest"; 2 | 3 | export const logger = { 4 | debug: vi.fn(), 5 | info: vi.fn(), 6 | warn: vi.fn(), 7 | error: vi.fn(), 8 | table: vi.fn(), 9 | time: vi.fn(), 10 | timeEnd: vi.fn(), 11 | }; 12 | -------------------------------------------------------------------------------- /manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "local-gpt", 3 | "name": "Local GPT", 4 | "version": "3.6.0", 5 | "minAppVersion": "0.15.0", 6 | "description": "Local GPT assistance for maximum privacy and offline access", 7 | "author": "Pavel Frankov", 8 | "authorUrl": "https://github.com/pfrankov", 9 | "isDesktopOnly": false, 10 | "css": [ 11 | "styles.css" 12 | ] 13 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # vscode 2 | .vscode 3 | 4 | # Intellij 5 | *.iml 6 | .idea 7 | 8 | # npm 9 | node_modules 10 | 11 | # Don't include the compiled main.js file in the repo. 12 | # They should be uploaded to GitHub releases instead. 13 | main.js 14 | 15 | # Exclude sourcemaps 16 | *.map 17 | 18 | # obsidian 19 | data.json 20 | 21 | # Exclude macOS Finder (System Explorer) View States 22 | .DS_Store 23 | 24 | dist 25 | obsidian-ai-providers 26 | AGENTS.md 27 | .kilocodemodes 28 | coverage -------------------------------------------------------------------------------- /copy-files-plugin.mjs: -------------------------------------------------------------------------------- 1 | import { copyFileSync, mkdirSync, existsSync } from 'fs'; 2 | 3 | export const copyFilesPlugin = (files = [ 4 | { from: './styles.css', to: './dist/styles.css' }, 5 | { from: './manifest.json', to: './dist/manifest.json' } 6 | ]) => ({ 7 | name: 'copy-files', 8 | setup(build) { 9 | build.onEnd(() => { 10 | if (!existsSync('./dist')) { 11 | mkdirSync('./dist'); 12 | } 13 | 14 | for (const file of files) { 15 | copyFileSync(file.from, file.to); 16 | } 17 | }); 18 | }, 19 | }); -------------------------------------------------------------------------------- /version-bump.mjs: -------------------------------------------------------------------------------- 1 | import { readFileSync, writeFileSync } from "fs"; 2 | 3 | const targetVersion = process.env.npm_package_version; 4 | 5 | // read minAppVersion from manifest.json and bump version to target version 6 | let manifest = JSON.parse(readFileSync("manifest.json", "utf8")); 7 | const { minAppVersion } = manifest; 8 | manifest.version = targetVersion; 9 | writeFileSync("manifest.json", JSON.stringify(manifest, null, "\t")); 10 | 11 | // update versions.json with target version and minAppVersion from manifest.json 12 | let versions = JSON.parse(readFileSync("versions.json", "utf8")); 13 | versions[targetVersion] = minAppVersion; 14 | writeFileSync("versions.json", JSON.stringify(versions, null, "\t")); 15 | -------------------------------------------------------------------------------- /tests/setupTests.ts: -------------------------------------------------------------------------------- 1 | import { vi } from "vitest"; 2 | import "@testing-library/jest-dom/vitest"; 3 | 4 | vi.mock("@obsidian-ai-providers/sdk", () => ({ 5 | initAI: vi.fn(async (_app?: unknown, _plugin?: unknown, onLoad?: () => void | Promise) => { 6 | if (onLoad) { 7 | await onLoad(); 8 | } 9 | }), 10 | waitForAI: vi.fn(() => 11 | Promise.resolve({ 12 | promise: Promise.resolve({ 13 | providers: [], 14 | execute: vi.fn(), 15 | retrieve: vi.fn(), 16 | fetchModels: vi.fn(), 17 | }), 18 | }), 19 | ), 20 | IAIProvider: class {}, 21 | IAIProvidersService: class {}, 22 | })); 23 | 24 | // Ensure global AbortController exists for jsdom environments 25 | if (!(globalThis as any).AbortController) { 26 | (globalThis as any).AbortController = AbortController; 27 | } 28 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "baseUrl": "./src", 4 | "outDir": "./dist", 5 | "inlineSourceMap": true, 6 | "inlineSources": true, 7 | "module": "ESNext", 8 | "target": "ES6", 9 | "allowJs": true, 10 | "noImplicitAny": true, 11 | "moduleResolution": "node", 12 | "skipLibCheck": true, 13 | "importHelpers": true, 14 | "isolatedModules": true, 15 | "strictNullChecks": true, 16 | "esModuleInterop": true, 17 | "resolveJsonModule": true, 18 | "lib": [ 19 | "DOM", 20 | "ES2020", 21 | "ES2021.WeakRef" 22 | ], 23 | "types": [ 24 | "svelte", 25 | "node", 26 | "vitest/globals", 27 | "@testing-library/jest-dom" 28 | ] 29 | }, 30 | "include": ["src/**/*.ts", "src/**/*.svelte", "src/types/**/*.d.ts"] 31 | } 32 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Release Local GPT plugin 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: actions/setup-node@v4 16 | with: 17 | node-version: 20 18 | 19 | - name: Build plugin 20 | run: | 21 | npm install 22 | npm run build 23 | 24 | - name: Create release 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | run: | 28 | tag="${GITHUB_REF#refs/tags/}" 29 | 30 | gh release create "$tag" \ 31 | --title="$tag" \ 32 | --draft \ 33 | dist/main.js dist/manifest.json dist/styles.css 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots or videos** 24 | If applicable, add screenshots or videos to help explain your problem. 25 | 26 | **How did you verify that the plugin is the problem?** 27 | Please double check settings, limits, work in other Obsidian plugins. 28 | 29 | **Desktop (please complete the following information):** 30 | - [ ] Desktop 31 | - [ ] Mobile 32 | 33 | **Additional context** 34 | Add any other context about the problem here. 35 | -------------------------------------------------------------------------------- /versions.json: -------------------------------------------------------------------------------- 1 | { 2 | "1.0.0": "0.15.0", 3 | "1.1.0": "0.15.0", 4 | "1.1.1": "0.15.0", 5 | "1.2.0": "0.15.0", 6 | "1.3.0": "0.15.0", 7 | "1.4.0": "0.15.0", 8 | "1.4.1": "0.15.0", 9 | "1.4.2": "0.15.0", 10 | "1.5.0": "0.15.0", 11 | "1.6.0": "0.15.0", 12 | "1.6.1": "0.15.0", 13 | "1.6.2": "0.15.0", 14 | "1.6.3": "0.15.0", 15 | "1.7.0": "0.15.0", 16 | "1.8.0": "0.15.0", 17 | "1.8.1": "0.15.0", 18 | "1.9.0": "0.15.0", 19 | "1.10.0": "0.15.0", 20 | "1.11.0": "0.15.0", 21 | "1.12.0": "0.15.0", 22 | "1.13.0": "0.15.0", 23 | "1.13.1": "0.15.0", 24 | "1.14.0": "0.15.0", 25 | "1.14.1": "0.15.0", 26 | "1.14.2": "0.15.0", 27 | "1.14.3": "0.15.0", 28 | "1.14.4": "0.15.0", 29 | "1.14.5": "0.15.0", 30 | "1.14.6": "0.15.0", 31 | "1.14.7": "0.15.0", 32 | "2.0.0": "0.15.0", 33 | "2.0.1": "0.15.0", 34 | "2.1.0": "0.15.0", 35 | "3.0.0": "0.15.0", 36 | "3.0.1": "0.15.0", 37 | "3.1.0": "0.15.0", 38 | "3.2.0": "0.15.0", 39 | "3.2.1": "0.15.0", 40 | "3.2.2": "0.15.0", 41 | "3.3.0": "0.15.0", 42 | "3.4.0": "0.15.0", 43 | "3.5.0": "0.15.0", 44 | "3.6.0": "0.15.0" 45 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Pavel Frankov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/i18n/index.ts: -------------------------------------------------------------------------------- 1 | import de from "./de.json"; 2 | import en from "./en.json"; 3 | import ru from "./ru.json"; 4 | import zh from "./zh.json"; 5 | import { logger } from "../logger"; 6 | 7 | const locales: { [key: string]: any } = { 8 | en, 9 | ru, 10 | de, 11 | zh, 12 | }; 13 | 14 | export class I18n { 15 | static t(key: string, params?: { [key: string]: string }): string { 16 | const locale = window.localStorage.getItem("language") || "en"; 17 | const keys = key.split("."); 18 | 19 | let translations = locales[locale] || locales["en"]; 20 | 21 | for (const k of keys) { 22 | if (translations?.[k] === undefined) { 23 | logger.warn(`Translation missing: ${key}`); 24 | translations = locales["en"]; 25 | let engValue = translations; 26 | for (const ek of keys) { 27 | engValue = engValue?.[ek]; 28 | } 29 | return engValue || key; 30 | } 31 | translations = translations[k]; 32 | } 33 | 34 | let result = translations; 35 | 36 | // Handle string interpolation if params are provided 37 | if (params) { 38 | Object.entries(params).forEach(([key, value]) => { 39 | result = result.replace(`{{${key}}}`, value); 40 | }); 41 | } 42 | 43 | return result; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /tests/__mocks__/obsidian.ts: -------------------------------------------------------------------------------- 1 | import { vi } from "vitest"; 2 | 3 | export const Plugin = vi.fn(); 4 | export const Notice = vi.fn(); 5 | export const Menu = vi.fn(); 6 | export const Editor = vi.fn(); 7 | export const App = vi.fn(); 8 | export const PluginManifest = vi.fn(); 9 | export class TFile { 10 | path: string = 'mock/path.md'; 11 | extension: string = 'md'; 12 | stat: { mtime: number } = { mtime: 123456789 }; 13 | basename: string = 'mock'; 14 | 15 | constructor() {} 16 | } 17 | export const Vault = vi.fn().mockImplementation(() => ({ 18 | cachedRead: vi.fn().mockImplementation((file) => { 19 | if (file.extension === 'unsupported') { 20 | throw new Error('Unsupported file type'); 21 | } 22 | return Promise.resolve('Mocked content'); 23 | }), 24 | getAbstractFileByPath: vi.fn().mockReturnValue(new TFile()), 25 | readBinary: vi.fn().mockResolvedValue(new ArrayBuffer(8)) 26 | })); 27 | export const MetadataCache = vi.fn().mockImplementation(() => ({ 28 | getFirstLinkpathDest: vi.fn().mockReturnValue(new TFile()), 29 | resolvedLinks: { 'mock/backlink.md': { 'mock/path.md': 1 } } 30 | })); 31 | export const requestUrl = vi.fn(); 32 | export const PluginSettingTab = vi.fn().mockImplementation(() => { 33 | return { 34 | display: vi.fn(), 35 | hide: vi.fn(), 36 | }; 37 | }); 38 | -------------------------------------------------------------------------------- /vitest.config.mts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "vitest/config"; 2 | import { svelte } from "@sveltejs/vite-plugin-svelte"; 3 | import sveltePreprocess from "svelte-preprocess"; 4 | import path from "path"; 5 | 6 | export default defineConfig({ 7 | resolve: { 8 | alias: { 9 | obsidian: path.resolve(__dirname, "tests/__mocks__/obsidian.ts"), 10 | electron: path.resolve(__dirname, "tests/__mocks__/electron.ts"), 11 | defaultSettings: path.resolve( 12 | __dirname, 13 | "src/defaultSettings.ts", 14 | ), 15 | "../logger.js": path.resolve(__dirname, "src/logger.ts"), 16 | "./pdf.worker.js": path.resolve( 17 | __dirname, 18 | "tests/__mocks__/pdf.worker.js", 19 | ), 20 | }, 21 | }, 22 | plugins: [ 23 | svelte({ 24 | preprocess: sveltePreprocess(), 25 | }), 26 | ], 27 | test: { 28 | environment: "jsdom", 29 | include: ["tests/**/*.{test,vitest}.ts"], 30 | setupFiles: ["./tests/setupTests.ts"], 31 | coverage: { 32 | provider: "v8", 33 | all: true, 34 | reporter: ["text", "lcov"], 35 | include: [ 36 | "src/rag.ts", 37 | "src/utils.ts", 38 | "src/ui/actionPaletteHistory.ts", 39 | "src/indexedDB.ts", 40 | "src/i18n/index.ts", 41 | "src/defaultSettings.ts", 42 | ], 43 | exclude: ["**/*.d.ts"], 44 | lines: 100, 45 | functions: 100, 46 | branches: 100, 47 | statements: 100, 48 | }, 49 | }, 50 | }); 51 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "parser": "@typescript-eslint/parser", 4 | "env": { "node": true }, 5 | "plugins": [ 6 | "@typescript-eslint", 7 | "sonarjs" 8 | ], 9 | "extends": [ 10 | "eslint:recommended", 11 | "plugin:@typescript-eslint/eslint-recommended", 12 | "plugin:@typescript-eslint/recommended", 13 | "plugin:sonarjs/recommended" 14 | ], 15 | "parserOptions": { 16 | "sourceType": "module" 17 | }, 18 | "overrides": [ 19 | { 20 | "files": ["tests/**/*.{ts,tsx}"], 21 | "env": { "jest": true, "browser": true }, 22 | "globals": { "vi": "readonly" } 23 | } 24 | ], 25 | "rules": { 26 | "no-unused-vars": "off", 27 | "@typescript-eslint/no-unused-vars": ["error", { "args": "none" }], 28 | "@typescript-eslint/ban-ts-comment": "off", 29 | "no-prototype-builtins": "off", 30 | "@typescript-eslint/no-empty-function": "off", 31 | "@typescript-eslint/no-explicit-any": "off", 32 | "@typescript-eslint/no-inferrable-types": "off", 33 | "@typescript-eslint/no-non-null-assertion": "off", 34 | "@typescript-eslint/no-var-requires": "off", 35 | "@typescript-eslint/triple-slash-reference": "off", 36 | "complexity": ["error", 10], 37 | "sonarjs/cognitive-complexity": ["error", 10], 38 | "sonarjs/no-duplicate-string": "off", 39 | "sonarjs/no-collapsible-if": "off" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /tests/LocalGPT.test.ts: -------------------------------------------------------------------------------- 1 | import { beforeEach, describe, expect, it, vi } from "vitest"; 2 | import LocalGPT from "../src/main"; 3 | import { App, PluginManifest } from "obsidian"; 4 | 5 | vi.mock("obsidian"); 6 | vi.mock("../src/spinnerPlugin", () => ({ 7 | spinnerPlugin: {}, 8 | })); 9 | vi.mock("../src/logger"); 10 | vi.mock("../src/ui/actionPalettePlugin", () => ({ 11 | actionPalettePlugin: [], 12 | showActionPalette: vi.fn(), 13 | hideActionPalette: vi.fn(), 14 | })); 15 | 16 | describe("LocalGPT", () => { 17 | let plugin: LocalGPT; 18 | 19 | beforeEach(() => { 20 | vi.clearAllMocks(); 21 | const app = { workspace: { updateOptions: vi.fn() } } as unknown as App; 22 | plugin = new LocalGPT(app, {} as PluginManifest); 23 | }); 24 | 25 | it("processText strips thinking tags and the selected text", () => { 26 | const selection = "{{SELECTION}}"; 27 | const result = plugin.processText( 28 | `internalFinal ${selection}`, 29 | selection, 30 | ); 31 | 32 | expect(result).toBe("\nFinal\n"); 33 | }); 34 | 35 | it("runFreeform forwards system prompt to executeAction", async () => { 36 | const executeAction = vi 37 | .spyOn(plugin as any, "executeAction") 38 | .mockResolvedValue(undefined); 39 | const editor = {} as any; 40 | 41 | await (plugin as any).runFreeform( 42 | editor, 43 | "user input", 44 | ["file.md"], 45 | "provider-1", 46 | 0.7, 47 | "system prompt", 48 | ); 49 | 50 | expect(executeAction).toHaveBeenCalledTimes(1); 51 | expect(executeAction).toHaveBeenCalledWith( 52 | expect.objectContaining({ 53 | prompt: "user input", 54 | system: "system prompt", 55 | replace: false, 56 | selectedFiles: ["file.md"], 57 | overrideProviderId: "provider-1", 58 | temperature: 0.7, 59 | }), 60 | editor, 61 | ); 62 | }); 63 | }); 64 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import { 2 | SELECTION_KEYWORD, 3 | CONTEXT_KEYWORD, 4 | CONTEXT_CONDITION_START, 5 | CONTEXT_CONDITION_END, 6 | } from "./defaultSettings"; 7 | 8 | export function preparePrompt( 9 | prompt: string = "", 10 | selectedText: string, 11 | context: string, 12 | ) { 13 | const withSelection = mergeSelection(prompt, selectedText); 14 | const withContext = injectContext(withSelection, context); 15 | return resolveConditionalContext(withContext, context); 16 | } 17 | 18 | function mergeSelection(prompt: string, selectedText: string): string { 19 | if (prompt.includes(SELECTION_KEYWORD)) { 20 | return prompt.replace(SELECTION_KEYWORD, selectedText || ""); 21 | } 22 | 23 | return [prompt, selectedText].filter(Boolean).join("\n\n"); 24 | } 25 | 26 | function injectContext(prompt: string, context: string): string { 27 | if (prompt.includes(CONTEXT_KEYWORD)) { 28 | return prompt.replace(CONTEXT_KEYWORD, context || ""); 29 | } 30 | 31 | if (context.trim()) { 32 | return [prompt, "Context:\n" + context].filter(Boolean).join("\n\n"); 33 | } 34 | 35 | return prompt; 36 | } 37 | 38 | function resolveConditionalContext(prompt: string, context: string): string { 39 | if ( 40 | prompt.includes(CONTEXT_CONDITION_START) && 41 | prompt.includes(CONTEXT_CONDITION_END) 42 | ) { 43 | const start = prompt.indexOf(CONTEXT_CONDITION_START) - 1; 44 | const end = prompt.indexOf(CONTEXT_CONDITION_END); 45 | if (start !== -1 && end !== -1 && start < end) { 46 | let contextBlock = prompt.substring( 47 | start + CONTEXT_CONDITION_START.length + 1, 48 | end, 49 | ); 50 | if (!context.trim()) { 51 | contextBlock = ""; 52 | } 53 | prompt = 54 | prompt.substring(0, start) + 55 | contextBlock + 56 | prompt.substring(end + CONTEXT_CONDITION_END.length + 1); 57 | } 58 | } 59 | 60 | return prompt; 61 | } 62 | -------------------------------------------------------------------------------- /src/processors/pdf.ts: -------------------------------------------------------------------------------- 1 | import { logger } from "../logger.js"; 2 | import * as pdfjs from "pdfjs-dist"; 3 | 4 | // @ts-ignore 5 | import WorkerMessageHandler from "./pdf.worker.js"; 6 | 7 | let isWorkerInitialized = false; 8 | 9 | function initializeWorker(): void { 10 | if (!isWorkerInitialized) { 11 | pdfjs.GlobalWorkerOptions.workerPort = new WorkerMessageHandler(); 12 | isWorkerInitialized = true; 13 | } 14 | } 15 | 16 | export async function extractTextFromPDF( 17 | arrayBuffer: ArrayBuffer, 18 | ): Promise { 19 | logger.time("Extracting text from PDF"); 20 | 21 | try { 22 | initializeWorker(); 23 | 24 | const pdf = await pdfjs.getDocument({ data: arrayBuffer }).promise; 25 | const numPages = pdf.numPages; 26 | 27 | const textContents = await Promise.all( 28 | Array.from({ length: numPages }, (_, i) => getPageText(pdf, i + 1)), 29 | ); 30 | 31 | const fullText = textContents.join("\n\n"); 32 | 33 | logger.table("Extracted text from PDF", { 34 | textLength: fullText.length, 35 | }); 36 | logger.timeEnd("Extracting text from PDF"); 37 | return fullText; 38 | } catch (error) { 39 | logger.error("Error extracting text from PDF", { error }); 40 | throw new Error(`Failed to extract text from PDF: ${error.message}`); 41 | } 42 | } 43 | 44 | async function getPageText( 45 | pdf: pdfjs.PDFDocumentProxy, 46 | pageNum: number, 47 | ): Promise { 48 | const page = await pdf.getPage(pageNum); 49 | const content = await page.getTextContent(); 50 | let lastY; 51 | const textItems = []; 52 | for (const item of content.items) { 53 | if ("str" in item) { 54 | if (lastY === item.transform[5] || !lastY) { 55 | textItems.push(item.str); 56 | } else { 57 | textItems.push(`\n${item.str}`); 58 | } 59 | lastY = item.transform[5]; 60 | } 61 | } 62 | return textItems.join("") + "\n\n"; 63 | } 64 | -------------------------------------------------------------------------------- /esbuild.config.mjs: -------------------------------------------------------------------------------- 1 | import esbuild from "esbuild"; 2 | import process from "process"; 3 | import builtins from "builtin-modules"; 4 | import inlineWorkerPlugin from "esbuild-plugin-inline-worker"; 5 | import sveltePlugin from "esbuild-svelte"; 6 | import sveltePreprocess from "svelte-preprocess"; 7 | import { copyFilesPlugin } from './copy-files-plugin.mjs'; 8 | 9 | const banner = 10 | `/* 11 | THIS IS A GENERATED/BUNDLED FILE BY ESBUILD 12 | if you want to view the source, please visit the github repository of this plugin 13 | */ 14 | `; 15 | 16 | const prod = (process.argv[2] === "production"); 17 | 18 | const context = await esbuild.context({ 19 | banner: { 20 | js: banner, 21 | }, 22 | entryPoints: { 23 | 'main': 'src/main.ts', 24 | 'styles': 'styles.css' 25 | }, 26 | outdir: "dist", 27 | bundle: true, 28 | external: [ 29 | "obsidian", 30 | "electron", 31 | "@codemirror/autocomplete", 32 | "@codemirror/collab", 33 | "@codemirror/commands", 34 | "@codemirror/language", 35 | "@codemirror/lint", 36 | "@codemirror/search", 37 | "@codemirror/state", 38 | "@codemirror/view", 39 | "@lezer/common", 40 | "@lezer/highlight", 41 | "@lezer/lr", 42 | ...builtins 43 | ], 44 | format: "cjs", 45 | target: "es2018", 46 | logLevel: "info", 47 | sourcemap: prod ? false : "inline", 48 | treeShaking: true, 49 | define: { 50 | "process.env.NODE_ENV": prod ? '"production"' : '"development"' 51 | }, 52 | loader: { 53 | ".ts": "ts", 54 | ".css": "css" 55 | }, 56 | plugins: [ 57 | sveltePlugin({ 58 | preprocess: sveltePreprocess({ 59 | typescript: true 60 | }) 61 | }), 62 | inlineWorkerPlugin({ 63 | target: 'es2018', 64 | format: 'cjs', 65 | }), 66 | copyFilesPlugin([ 67 | { from: './manifest.json', to: './dist/manifest.json' } 68 | ]) 69 | ] 70 | }); 71 | 72 | if (prod) { 73 | await context.rebuild(); 74 | process.exit(0); 75 | } else { 76 | await context.watch(); 77 | } -------------------------------------------------------------------------------- /src/indexedDB.ts: -------------------------------------------------------------------------------- 1 | import { openDB, IDBPDatabase } from "idb"; 2 | 3 | interface ContentCacheItem { 4 | mtime: number; 5 | content: string; 6 | } 7 | 8 | class FileCache { 9 | private db: IDBPDatabase | null = null; 10 | private vaultId = ""; 11 | async init(vaultId: string) { 12 | this.vaultId = vaultId; 13 | const dbName = `LocalGPTCache/${this.vaultId}`; 14 | this.db = await openDB(dbName, 3, { 15 | upgrade(db, oldVersion, newVersion) { 16 | // Version 1: embeddings store (deprecated) 17 | if (oldVersion < 1) { 18 | // Create embeddings store for old versions, but it will be removed in version 3 19 | if (!db.objectStoreNames.contains("embeddings")) { 20 | db.createObjectStore("embeddings"); 21 | } 22 | } 23 | // Version 2: content store 24 | if (oldVersion < 2) { 25 | if (!db.objectStoreNames.contains("content")) { 26 | db.createObjectStore("content"); 27 | } 28 | } 29 | // Version 3: remove embeddings store as caching moved to AI providers 30 | if (oldVersion < 3) { 31 | if (db.objectStoreNames.contains("embeddings")) { 32 | db.deleteObjectStore("embeddings"); 33 | } 34 | } 35 | }, 36 | }); 37 | } 38 | 39 | async getContent(key: string): Promise { 40 | if (!this.db) throw new Error("Database not initialized"); 41 | return this.db.get("content", key); 42 | } 43 | 44 | async setContent(key: string, value: ContentCacheItem): Promise { 45 | if (!this.db) throw new Error("Database not initialized"); 46 | await this.db.put("content", value, key); 47 | } 48 | 49 | async clearContent(): Promise { 50 | if (!this.db) throw new Error("Database not initialized"); 51 | await this.db.clear("content"); 52 | } 53 | 54 | async clearAll(): Promise { 55 | if (!this.db) throw new Error("Database not initialized"); 56 | await this.db.clear("content"); 57 | } 58 | } 59 | 60 | export const fileCache = new FileCache(); 61 | -------------------------------------------------------------------------------- /src/ui/actionPaletteHistory.ts: -------------------------------------------------------------------------------- 1 | const PROMPT_HISTORY_LIMIT = 50; 2 | const HISTORY_STORAGE_KEY = "local-gpt-action-palette-history"; 3 | const promptHistory: string[] = loadHistoryFromStorage(); 4 | 5 | function loadHistoryFromStorage(): string[] { 6 | if (typeof localStorage === "undefined") return []; 7 | 8 | try { 9 | const raw = localStorage.getItem(HISTORY_STORAGE_KEY); 10 | if (!raw) return []; 11 | const parsed = JSON.parse(raw); 12 | if (!Array.isArray(parsed)) return []; 13 | return parsed 14 | .filter((item): item is string => typeof item === "string") 15 | .slice(-PROMPT_HISTORY_LIMIT); 16 | } catch (error) { 17 | console.error("Failed to read Action Palette history:", error); 18 | return []; 19 | } 20 | } 21 | 22 | function persistHistory() { 23 | if (typeof localStorage === "undefined") return; 24 | try { 25 | localStorage.setItem( 26 | HISTORY_STORAGE_KEY, 27 | JSON.stringify(promptHistory.slice(-PROMPT_HISTORY_LIMIT)), 28 | ); 29 | } catch (error) { 30 | console.error("Failed to save Action Palette history:", error); 31 | } 32 | } 33 | 34 | export function addToPromptHistory(entry: string) { 35 | const normalized = entry.trim(); 36 | if (!normalized) return; 37 | 38 | const lastNormalized = promptHistory[promptHistory.length - 1]?.trim(); 39 | if (lastNormalized === normalized) return; 40 | 41 | promptHistory.push(entry); 42 | if (promptHistory.length > PROMPT_HISTORY_LIMIT) { 43 | promptHistory.shift(); 44 | } 45 | persistHistory(); 46 | } 47 | 48 | export function getPromptHistoryEntry(index: number): string | undefined { 49 | return promptHistory[index]; 50 | } 51 | 52 | export function getPromptHistoryLength(): number { 53 | return promptHistory.length; 54 | } 55 | 56 | export function resetPromptHistory() { 57 | promptHistory.length = 0; 58 | if (typeof localStorage === "undefined") return; 59 | try { 60 | localStorage.removeItem(HISTORY_STORAGE_KEY); 61 | } catch (error) { 62 | console.error("Failed to reset Action Palette history:", error); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /tests/i18n.test.ts: -------------------------------------------------------------------------------- 1 | import { beforeEach, describe, expect, it, vi } from "vitest"; 2 | 3 | vi.mock("../src/logger", () => ({ 4 | logger: { 5 | warn: vi.fn(), 6 | info: vi.fn(), 7 | debug: vi.fn(), 8 | error: vi.fn(), 9 | table: vi.fn(), 10 | time: vi.fn(), 11 | timeEnd: vi.fn(), 12 | separator: vi.fn(), 13 | setLogLevel: vi.fn(), 14 | }, 15 | })); 16 | 17 | import { I18n } from "../src/i18n"; 18 | import { logger } from "../src/logger"; 19 | import en from "../src/i18n/en.json"; 20 | import ru from "../src/i18n/ru.json"; 21 | 22 | beforeEach(() => { 23 | localStorage.clear(); 24 | vi.clearAllMocks(); 25 | }); 26 | 27 | describe("I18n", () => { 28 | it("falls back to english by default", () => { 29 | localStorage.removeItem("language"); 30 | 31 | expect(I18n.t("commands.actionPalette.placeholder")).toBe( 32 | en.commands.actionPalette.placeholder, 33 | ); 34 | }); 35 | 36 | it("uses selected language when available", () => { 37 | localStorage.setItem("language", "ru"); 38 | 39 | expect(I18n.t("commands.actionPalette.placeholder")).toBe( 40 | ru.commands.actionPalette.placeholder, 41 | ); 42 | }); 43 | 44 | it("falls back to english for unsupported languages", () => { 45 | localStorage.setItem("language", "fr"); 46 | 47 | expect(I18n.t("commands.actionPalette.placeholder")).toBe( 48 | en.commands.actionPalette.placeholder, 49 | ); 50 | expect(logger.warn).not.toHaveBeenCalled(); 51 | }); 52 | 53 | it("warns and returns key when translation is missing", () => { 54 | localStorage.setItem("language", "ru"); 55 | const missingKey = "missing.translation.key"; 56 | 57 | const result = I18n.t(missingKey); 58 | 59 | expect(result).toBe(missingKey); 60 | expect(logger.warn).toHaveBeenCalledWith( 61 | `Translation missing: ${missingKey}`, 62 | ); 63 | }); 64 | 65 | it("replaces template params", () => { 66 | localStorage.setItem("language", "en"); 67 | 68 | expect( 69 | I18n.t("notices.errorGenerating", { message: "oops" }), 70 | ).toBe("Error while generating text: oops"); 71 | }); 72 | }); 73 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "obsidian-local-gpt", 3 | "version": "3.6.0", 4 | "description": "Local GPT assistance for maximum privacy and offline access", 5 | "main": "main.js", 6 | "scripts": { 7 | "dev": "node esbuild.config.mjs", 8 | "typecheck": "tsc --noEmit && svelte-check --tsconfig tsconfig.json", 9 | "build": "npm run typecheck && node esbuild.config.mjs production", 10 | "version": "node version-bump.mjs && git add manifest.json versions.json", 11 | "format": "prettier 'styles.css' 'src/**/*.{ts,json}' --write", 12 | "lint": "eslint --ext .ts src tests", 13 | "test": "vitest run --coverage", 14 | "check": "npm run lint && npm run typecheck && npm run test", 15 | "full-check": "npm run format && npm run build && npm run test", 16 | "prepare": "npx simple-git-hooks" 17 | }, 18 | "keywords": [ 19 | "obsidian", 20 | "ollama", 21 | "ai", 22 | "plugin", 23 | "llm", 24 | "gpt" 25 | ], 26 | "author": "Pavel Frankov", 27 | "license": "MIT", 28 | "devDependencies": { 29 | "@sveltejs/vite-plugin-svelte": "^3.1.2", 30 | "@testing-library/jest-dom": "^6.9.1", 31 | "@types/aria-query": "^5.0.4", 32 | "@types/node": "^18.19.130", 33 | "@typescript-eslint/eslint-plugin": "5.29.0", 34 | "@typescript-eslint/parser": "5.29.0", 35 | "@vitest/coverage-v8": "^2.1.9", 36 | "builtin-modules": "3.3.0", 37 | "esbuild": "0.17.3", 38 | "esbuild-plugin-inline-worker": "^0.1.1", 39 | "esbuild-svelte": "^0.8.0", 40 | "eslint-plugin-sonarjs": "^0.25.1", 41 | "jsdom": "^27.3.0", 42 | "obsidian": "latest", 43 | "prettier": "3.7.4", 44 | "simple-git-hooks": "^2.11.0", 45 | "svelte": "^4.2.20", 46 | "svelte-check": "^3.8.6", 47 | "svelte-preprocess": "^5.1.4", 48 | "ts-node": "^10.9.2", 49 | "tslib": "2.4.0", 50 | "typescript": "4.7.4", 51 | "vite": "^5.4.21", 52 | "vitest": "^2.1.9" 53 | }, 54 | "dependencies": { 55 | "@obsidian-ai-providers/sdk": "^1.5.2", 56 | "idb": "^8.0.3", 57 | "pdfjs-dist": "4.6.82", 58 | "sortablejs": "^1.15.6" 59 | }, 60 | "simple-git-hooks": { 61 | "pre-commit": "npm run format" 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/interfaces.ts: -------------------------------------------------------------------------------- 1 | export interface LocalGPTSettings { 2 | aiProviders: { 3 | main: string | null; 4 | embedding: string | null; 5 | vision: string | null; 6 | }; 7 | defaults: { 8 | creativity: string; 9 | /** 10 | * Preset that controls the overall limit for context chunks in Enhanced Actions (RAG). 11 | * Values: 'local' | 'cloud' | 'advanced' | 'max' 12 | */ 13 | contextLimit?: string; 14 | }; 15 | actions: LocalGPTAction[]; 16 | _version: number; 17 | } 18 | 19 | export interface LocalGPTAction { 20 | name: string; 21 | prompt: string; 22 | temperature?: number; 23 | system?: string; 24 | replace?: boolean; 25 | } 26 | 27 | export type { 28 | IAIDocument, 29 | IAIProvidersRetrievalResult, 30 | } from "@obsidian-ai-providers/sdk"; 31 | 32 | export interface FileReference { 33 | path: string; 34 | basename: string; 35 | extension: string; 36 | } 37 | 38 | export interface CommandReference { 39 | name: string; 40 | description: string; 41 | } 42 | 43 | export interface ProviderReference { 44 | id: string; 45 | name: string; 46 | providerName: string; 47 | providerUrl?: string; 48 | } 49 | 50 | export interface ModelReference { 51 | id: string; 52 | name: string; 53 | } 54 | 55 | export interface CreativityReference { 56 | id: string; // "", "low", "medium", "high" 57 | name: string; // localized label from settings.creativity* 58 | } 59 | 60 | export interface SystemPromptReference { 61 | name: string; 62 | system: string; 63 | } 64 | 65 | export interface TextToken { 66 | type: "text" | "file" | "command"; 67 | content: string; 68 | start: number; 69 | end: number; 70 | filePath?: string; 71 | commandName?: string; 72 | } 73 | 74 | export interface ActionPaletteSubmitEvent { 75 | text: string; 76 | selectedFiles: string[]; 77 | systemPrompt?: string; 78 | } 79 | 80 | export type GetFilesCallback = () => FileReference[]; 81 | export type GetProvidersCallback = () => Promise; 82 | export type OnProviderChangeCallback = (providerId: string) => Promise; 83 | export type GetModelsCallback = ( 84 | providerId: string, 85 | ) => Promise; 86 | export type OnModelChangeCallback = (model: string) => Promise; 87 | export type OnCreativityChangeCallback = ( 88 | creativityKey: string, 89 | ) => Promise | void; 90 | export type GetSystemPromptsCallback = () => SystemPromptReference[]; 91 | -------------------------------------------------------------------------------- /src/i18n/zh.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": { 3 | "showContextMenu": "显示上下文菜单", 4 | "actionPalette": { 5 | "name": "动作面板", 6 | "placeholder": "您的提示... | Enter: 发送, Esc: 取消", 7 | "changeProvider": "切换 AI 提供商", 8 | "changeModel": "切换模型", 9 | "changeCreativity": "更改创造力", 10 | "changeSystemPrompt": "更改系统提示", 11 | "unknownModel": "未知模型" 12 | } 13 | }, 14 | "statusBar": { 15 | "enhancing": "✨ 增强中", 16 | "enhancingWithProgress": "✨ 增强中 {{percent}}%" 17 | }, 18 | "notices": { 19 | "errorGenerating": "生成文本时出错: {{message}}", 20 | "errorProcessingRag": "处理相关文档时出错: {{message}}。将继续使用原始文本。", 21 | "importantUpdate": "️🚨 重要提示!请更新 Local GPT 设置!", 22 | "newVersion": "⬆️ Local GPT: 新版本可用", 23 | "actionNameRequired": "请输入操作名称。", 24 | "actionNameExists": "名为\"{{name}}\"的操作已存在。", 25 | "actionRewritten": "已重写操作\"{{name}}\"", 26 | "actionAdded": "已添加操作\"{{name}}\"", 27 | "copied": "已复制" 28 | }, 29 | "settings": { 30 | "mainProvider": "主要AI提供商", 31 | "embeddingProvider": "嵌入AI提供商", 32 | "embeddingProviderDesc": "可选。用于✨增强操作。", 33 | "visionProvider": "视觉AI提供商", 34 | "visionProviderDesc": "可选。此选项用于图片。如果未设置,将使用主要的AI提供商。", 35 | "creativity": "创造力", 36 | "creativityNone": "⚪ 无", 37 | "creativityLow": "️💡 低", 38 | "creativityMedium": "🎨 中", 39 | "creativityHigh": "🚀 高", 40 | "actions": "操作", 41 | "quickAdd": "快速添加", 42 | "quickAddPlaceholder": "粘贴操作", 43 | "quickAddDesc": "您可以分享最佳的提示集,或从社区获取一个。
重要提示: 如果您已经有一个同名操作,它将被覆盖。", 44 | "addNewManually": "手动添加", 45 | "actionName": "操作名称", 46 | "actionNamePlaceholder": "总结选择", 47 | "systemPrompt": "系统提示", 48 | "systemPromptDesc": "可选", 49 | "systemPromptPlaceholder": "你是一个乐于助人的助手。", 50 | "prompt": "提示", 51 | "promptDesc": "如果您想自定义
您的结果提示,
请阅读提示模板", 52 | "replaceSelected": "替换选定的文本", 53 | "replaceSelectedDesc": "如果选中,突出显示的文本将被模型的响应替换。", 54 | "remove": "删除", 55 | "close": "关闭", 56 | "save": "保存", 57 | "actionsList": "操作列表", 58 | "changeOrder": "更改顺序", 59 | "done": "完成", 60 | "advancedSettings": "高级设置", 61 | "advancedSettingsDesc": "✨ 增强操作(RAG)、重置所有操作", 62 | "enhancedActions": "增强操作", 63 | "enhancedActionsLabel": "RAG 上下文", 64 | "enhancedActionsDesc": "更多上下文可提升强大模型的答案质量,但可能降低较弱模型的质量。更大的上下文也会消耗更多 Token,并提高每次请求成本(对付费模型而言)。", 65 | "contextLimitLocal": "本地模型", 66 | "contextLimitCloud": "云端模型", 67 | "contextLimitAdvanced": "顶级:GPT、Claude、Gemini", 68 | "contextLimitMax": "无限制(注意)", 69 | "dangerZone": "危险区域", 70 | "resetActions": "重置操作", 71 | "resetActionsDesc": "🚨 将所有操作重置为默认值。此操作无法撤销,并将删除所有您的自定义操作。", 72 | "reset": "重置", 73 | "confirmReset": "确认重置" 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /docs/prompt-templating.md: -------------------------------------------------------------------------------- 1 | # Prompt templating 2 | 3 | ## Selection 4 | By default, the selected text will be added to the end of the prompt. 5 | Let's say we have selected text `Some example text.` and prompt `You are an assistant helping a user write more content in a document based on a prompt.` 6 | 7 | The final prompt will be: 8 | ``` 9 | You are an assistant helping a user write more content in a document based on a prompt. 10 | 11 | Some example text. 12 | ``` 13 | ### Custom place for selection 14 | Use keyword `{{=SELECTION=}}` to insert selected text in different place: 15 | ``` 16 | {{=SELECTION=}} 17 | You are an assistant helping a user write more content in a document based on a prompt. 18 | ``` 19 | Translates to: 20 | ``` 21 | Some example text. 22 | You are an assistant helping a user write more content in a document based on a prompt. 23 | ``` 24 | 25 | 26 | ## Enhanced Actions (Context, RAG) 27 | By default, the context will be added to the end of the prompt after the default selection position: 28 | ``` 29 | Selected text with [[Some meaningful document]]. 30 | 31 | Context: 32 | Some example context about the selected text from some meaningful document. 33 | ``` 34 | 35 | ### Custom place for context 36 | The keyword `{{=CONTEXT=}}` will be replaced with multiline string of context. 37 | ``` 38 | # Relevant context 39 | {{=CONTEXT=}} 40 | 41 | # Selected text 42 | {{=SELECTION=}} 43 | ``` 44 | 45 | Translates to: 46 | ``` 47 | # Relevant context 48 | Some example context about the selected text from some meaningful document. 49 | 50 | # Selected text 51 | Selected text with [[Some meaningful document]]. 52 | ``` 53 | 54 | ### Conditional context 55 | Usually you want to add context conditionally, use keywords `{{=CONTEXT_START=}}` and `{{=CONTEXT_END=}}` to wrap context. 56 | 57 | ``` 58 | # Task 59 | {{=SELECTION=}} 60 | {{=CONTEXT_START=}} 61 | 62 | # Context 63 | {{=CONTEXT=}} 64 | {{=CONTEXT_END=}} 65 | 66 | # Instructions 67 | Do something with the selected text. 68 | ``` 69 | 70 | 🔴 If context is not empty, the entire block will be added to the prompt. 71 | ``` 72 | # Task 73 | Selected text with [[Some meaningful document]]. 74 | 75 | # Context 76 | Some example context about the selected text from some meaningful document. 77 | 78 | # Instructions 79 | Do something with the selected text. 80 | ``` 81 | 82 | ⭕️ If context is empty, the entire block will not be added to the prompt. 83 | ``` 84 | # Task 85 | Selected text with [[Some meaningful document]]. 86 | 87 | # Instructions 88 | Do something with the selected text. 89 | ``` 90 | ### Caveats 91 | 92 | Remember that both the selection and context will be added to the end of the prompt by default if you not specify custom places for them. 93 | ``` 94 | # Task 95 | Some task. 96 | 97 | # Instructions 98 | Do something with the selected text. 99 | ``` 100 | Translates to: 101 | ``` 102 | # Task 103 | Some task. 104 | 105 | # Instructions 106 | Do something with the selected text. 107 | 108 | Selected text with [[Some meaningful document]]. 109 | 110 | Context: 111 | Some example context about the selected text from some meaningful document. 112 | ``` 113 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Local GPT plugin for Obsidian 2 | 3 | ![demo](https://github.com/pfrankov/obsidian-local-gpt/assets/584632/724d4399-cb6c-4531-9f04-a1e5df2e3dad) 4 | _No speedup. MacBook Pro 13, M1, 16GB, Ollama, orca-mini._ 5 | 6 | The plugin allows you to open a context menu on selected text to pick an AI assistant action or open the Action Palette to run a one-time action. 7 | The most casual AI assistant for Obsidian. 8 | 9 | 10 | 11 | _Action Palette_ 12 | 13 | ## Features 14 | ### Works with images 15 | 16 | 17 | _No speedup. MacBook Pro 13, M1, 16GB, Ollama, bakllava._ 18 | 19 | ### Can use context from links, backlinks, and even PDF files (RAG) 20 | Enhanced Actions 21 |
22 | How to use (Ollama) 23 |

24 | 1. Install Embedding model: 25 |

26 |
    27 |
  • For English: ollama pull nomic-embed-text (fastest)
  • 28 |
  • For other languages: ollama pull bge-m3 (slower, but more accurate)
  • 29 |
30 |

31 | 2. Select Embedding provider in plugin's settings and try to use the largest model with largest context window. 32 |

33 |
34 | 35 | ### Default actions 36 | - Continue writing 37 | - Summarize text 38 | - Fix spelling and grammar 39 | - Find action items in text 40 | - General help (just use selected text as a prompt for any purpose) 41 | - New System Prompt to create actions for your needs 42 | 43 | You can also add your own, share the best actions, or get one [from the community](https://github.com/pfrankov/obsidian-local-gpt/discussions/2). 44 | 45 | Settings 46 | 47 | ### Supported languages 48 | - English 49 | - Chinese 50 | - German 51 | - Russian 52 | 53 | ## Installation 54 | ### 1. Install Plugin 55 | #### Obsidian plugin store (recommended) 56 | This plugin is available in the Obsidian community plugin store https://obsidian.md/plugins?id=local-gpt 57 | 58 | #### BRAT 59 | You can also install this plugin via [BRAT](https://obsidian.md/plugins?id=obsidian42-brat): `pfrankov/obsidian-local-gpt` 60 | 61 | ### 2. Install AI Providers Plugin 62 | You also need to install the AI Providers plugin from the plugin store to configure AI providers: https://obsidian.md/plugins?id=ai-providers 63 | 64 | ### 3. Configure AI Providers 65 | Follow the instructions in [AI Providers](https://github.com/pfrankov/obsidian-ai-providers#create-ai-provider) plugin. 66 | 67 | ### Configure Obsidian hotkeys 68 | 1. Open Obsidian Settings 69 | 2. Go to Hotkeys 70 | 3. Filter "Local" and you should see "Local GPT: Show context menu" 71 | 4. Click on `+` icon and press hotkey (e.g. `⌘ + M`) 72 | 5. Filter "Local" again and you should see "Local GPT: Action Palette" 73 | 6. Click on `+` icon and press hotkey (e.g. `⌘ + J`) 74 | 75 | ## My other Obsidian plugins 76 | - [Colored Tags](https://github.com/pfrankov/obsidian-colored-tags) that colorizes tags in distinguishable colors. 77 | - [Obsidian AI Providers](https://github.com/pfrankov/obsidian-ai-providers) is a hub for AI providers. 78 | 79 | ## Inspired by 80 | - [Obsidian Ollama](https://github.com/hinterdupfinger/obsidian-ollama). 81 | -------------------------------------------------------------------------------- /tests/Utils.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, expect, test } from "vitest"; 2 | import { preparePrompt } from '../src/utils'; 3 | import { SELECTION_KEYWORD, CONTEXT_KEYWORD, CONTEXT_CONDITION_START, CONTEXT_CONDITION_END } from '../src/defaultSettings'; 4 | 5 | describe('Utils', () => { 6 | test('preparePrompt with selection', () => { 7 | const prompt = `Process this: ${SELECTION_KEYWORD}`; 8 | const selectedText = 'Selected text'; 9 | const context = 'Some context'; 10 | 11 | const result = preparePrompt(prompt, selectedText, context); 12 | 13 | expect(result).toBe('Process this: Selected text\n\nContext:\nSome context'); 14 | }); 15 | 16 | test('preparePrompt replaces selection keyword with empty string', () => { 17 | const prompt = `Process this: ${SELECTION_KEYWORD}`; 18 | 19 | const result = preparePrompt(prompt, '', 'Context info'); 20 | 21 | expect(result).toBe('Process this: \n\nContext:\nContext info'); 22 | }); 23 | 24 | test('preparePrompt with context keyword', () => { 25 | const prompt = `Process this with context: ${CONTEXT_KEYWORD}`; 26 | const selectedText = 'Selected text'; 27 | const context = 'Some context'; 28 | 29 | const result = preparePrompt(prompt, selectedText, context); 30 | 31 | expect(result).toBe('Process this with context: Some context\n\nSelected text'); 32 | }); 33 | 34 | test('preparePrompt with empty prompt', () => { 35 | const result = preparePrompt('', 'Selected text', 'Some context'); 36 | expect(result).toBe('Selected text\n\nContext:\nSome context'); 37 | }); 38 | 39 | test('preparePrompt with empty selection and context', () => { 40 | const result = preparePrompt('Process this:', '', ''); 41 | expect(result).toBe('Process this:'); 42 | }); 43 | 44 | test('preparePrompt with context condition and non-empty context', () => { 45 | const prompt = `Before ${CONTEXT_CONDITION_START}Context: ${CONTEXT_KEYWORD}${CONTEXT_CONDITION_END} After`; 46 | const result = preparePrompt(prompt, 'Selected text', 'Some context'); 47 | expect(result).toBe('BeforeContext: Some contextAfter\n\nSelected text'); 48 | }); 49 | 50 | test('preparePrompt with context condition and empty context', () => { 51 | const prompt = `Before ${CONTEXT_CONDITION_START}Context: ${CONTEXT_KEYWORD}${CONTEXT_CONDITION_END} After`; 52 | const result = preparePrompt(prompt, 'Selected text', ''); 53 | expect(result).toBe('BeforeAfter\n\nSelected text'); 54 | }); 55 | 56 | test('preparePrompt with multiple context conditions', () => { 57 | const prompt = `${CONTEXT_CONDITION_START}Start${CONTEXT_CONDITION_END} Middle ${CONTEXT_CONDITION_START}End${CONTEXT_CONDITION_END}`; 58 | const result = preparePrompt(prompt, 'Selected text', 'Some context'); 59 | expect(result).toBe(`${CONTEXT_CONDITION_START}Start${CONTEXT_CONDITION_END} Middle ${CONTEXT_CONDITION_START}End${CONTEXT_CONDITION_END}\n\nSelected text\n\nContext:\nSome context`); 60 | }); 61 | 62 | test('preparePrompt with mismatched context conditions', () => { 63 | const prompt = `${CONTEXT_CONDITION_START}Incomplete ${CONTEXT_CONDITION_START}Condition${CONTEXT_CONDITION_END}`; 64 | const result = preparePrompt(prompt, 'Selected text', 'Some context'); 65 | expect(result).toBe(`${CONTEXT_CONDITION_START}Incomplete ${CONTEXT_CONDITION_START}Condition${CONTEXT_CONDITION_END}\n\nSelected text\n\nContext:\nSome context`); 66 | }); 67 | 68 | test('preparePrompt with selection and context keywords in reverse order', () => { 69 | const prompt = `Context: ${CONTEXT_KEYWORD}\nSelection: ${SELECTION_KEYWORD}`; 70 | const result = preparePrompt(prompt, 'Selected text', 'Some context'); 71 | expect(result).toBe('Context: Some context\nSelection: Selected text'); 72 | }); 73 | }); 74 | -------------------------------------------------------------------------------- /src/i18n/en.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": { 3 | "showContextMenu": "Show context menu", 4 | "actionPalette": { 5 | "name": "Action Palette", 6 | "placeholder": "Your prompt... | Enter: send, Esc: cancel", 7 | "changeProvider": "Change the AI provider", 8 | "changeModel": "Change the model", 9 | "changeCreativity": "Change creativity", 10 | "changeSystemPrompt": "Change system prompt", 11 | "unknownModel": "Unknown model" 12 | } 13 | }, 14 | "statusBar": { 15 | "enhancing": "✨ Enhancing", 16 | "enhancingWithProgress": "✨ Enhancing {{percent}}%" 17 | }, 18 | "notices": { 19 | "errorGenerating": "Error while generating text: {{message}}", 20 | "errorProcessingRag": "Error processing related documents: {{message}}. Continuing with original text.", 21 | "importantUpdate": "️🚨 IMPORTANT! Update Local GPT settings!", 22 | "newVersion": "⬆️ Local GPT: a new version is available", 23 | "actionNameRequired": "Please enter a name for the action.", 24 | "actionNameExists": "An action with the name \"{{name}}\" already exists.", 25 | "actionRewritten": "Rewritten \"{{name}}\" action", 26 | "actionAdded": "Added \"{{name}}\" action", 27 | "copied": "Copied" 28 | }, 29 | "settings": { 30 | "mainProvider": "Main AI Provider", 31 | "embeddingProvider": "Embedding AI Provider", 32 | "embeddingProviderDesc": "Optional. Used for ✨ Enhanced Actions.", 33 | "visionProvider": "Vision AI Provider", 34 | "visionProviderDesc": "Optional. This is used for images. If not set, the main AI provider will be used.", 35 | "creativity": "Creativity", 36 | "creativityNone": "⚪ None", 37 | "creativityLow": "️💡 Low", 38 | "creativityMedium": "🎨 Medium", 39 | "creativityHigh": "🚀 High", 40 | "actions": "Actions", 41 | "quickAdd": "Quick add", 42 | "quickAddPlaceholder": "Paste action", 43 | "quickAddDesc": "You can share the best sets prompts or get one from the community.
Important: if you already have an action with the same name it will be overwritten.", 44 | "addNewManually": "Add new manually", 45 | "actionName": "Action name", 46 | "actionNamePlaceholder": "Summarize selection", 47 | "systemPrompt": "System prompt", 48 | "systemPromptDesc": "Optional", 49 | "systemPromptPlaceholder": "You are a helpful assistant.", 50 | "prompt": "Prompt", 51 | "promptDesc": "Please read about
Prompt templating
if you want to customize
your resulting prompts", 52 | "replaceSelected": "Replace selected text", 53 | "replaceSelectedDesc": "If checked, the highlighted text will be replaced with a response from the model.", 54 | "remove": "Remove", 55 | "close": "Close", 56 | "save": "Save", 57 | "actionsList": "Actions list", 58 | "changeOrder": "Change order", 59 | "done": "Done", 60 | "advancedSettings": "Advanced settings", 61 | "advancedSettingsDesc": "✨ Enhanced Actions (RAG), Reset all actions", 62 | "enhancedActions": "Enhanced Actions", 63 | "enhancedActionsLabel": "RAG context", 64 | "enhancedActionsDesc": "More context can improve answer quality for powerful models, but may reduce quality for weaker ones. Larger context also consumes more tokens and increases request cost for paid models.", 65 | "contextLimitLocal": "Local models", 66 | "contextLimitCloud": "Cloud models", 67 | "contextLimitAdvanced": "Top: GPT, Claude, Gemini", 68 | "contextLimitMax": "No limits (danger)", 69 | "dangerZone": "Danger zone", 70 | "resetActions": "Reset actions", 71 | "resetActionsDesc": "🚨 Reset all actions to the default. This cannot be undone and will delete all your custom actions.", 72 | "reset": "Reset", 73 | "confirmReset": "Confirm reset" 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /tests/indexedDB.test.ts: -------------------------------------------------------------------------------- 1 | import { beforeEach, describe, expect, it, vi } from "vitest"; 2 | import { fileCache } from "../src/indexedDB"; 3 | 4 | // eslint-disable-next-line no-var -- var avoids TDZ when vi.mock is hoisted 5 | var openDBMock: ReturnType; 6 | 7 | vi.mock("idb", () => { 8 | const createDbStub = () => { 9 | const storeData = new Map>(); 10 | const storeNames = new Set(); 11 | const db = { 12 | objectStoreNames: { 13 | contains: (name: string) => storeNames.has(name), 14 | }, 15 | createObjectStore: vi.fn((name: string) => { 16 | storeNames.add(name); 17 | if (!storeData.has(name)) { 18 | storeData.set(name, new Map()); 19 | } 20 | }), 21 | deleteObjectStore: vi.fn((name: string) => { 22 | storeNames.delete(name); 23 | storeData.delete(name); 24 | }), 25 | get: vi.fn((store: string, key: string) => { 26 | return storeData.get(store)?.get(key); 27 | }), 28 | put: vi.fn((store: string, value: unknown, key: string) => { 29 | if (!storeData.has(store)) { 30 | storeData.set(store, new Map()); 31 | } 32 | storeData.get(store)!.set(key, value); 33 | }), 34 | clear: vi.fn((store: string) => { 35 | storeData.get(store)?.clear(); 36 | }), 37 | }; 38 | 39 | return { db, storeData, storeNames }; 40 | }; 41 | 42 | openDBMock = vi.fn( 43 | async ( 44 | _dbName: string, 45 | _version: number, 46 | options?: { 47 | upgrade?: (db: ReturnType["db"], oldVersion: number, newVersion?: number) => void; 48 | }, 49 | ) => { 50 | const stub = createDbStub(); 51 | options?.upgrade?.(stub.db as any, 0, 3); 52 | return stub.db as any; 53 | }, 54 | ); 55 | 56 | return { openDB: openDBMock }; 57 | }); 58 | 59 | beforeEach(() => { 60 | openDBMock?.mockClear(); 61 | (fileCache as any).db = null; 62 | (fileCache as any).vaultId = ""; 63 | }); 64 | 65 | describe("fileCache", () => { 66 | it("throws when database is not initialized", async () => { 67 | await expect(fileCache.getContent("missing")).rejects.toThrow( 68 | "Database not initialized", 69 | ); 70 | await expect( 71 | fileCache.setContent("missing", { mtime: 0, content: "" }), 72 | ).rejects.toThrow("Database not initialized"); 73 | await expect(fileCache.clearContent()).rejects.toThrow( 74 | "Database not initialized", 75 | ); 76 | await expect(fileCache.clearAll()).rejects.toThrow( 77 | "Database not initialized", 78 | ); 79 | }); 80 | 81 | it("initializes storage with migrations", async () => { 82 | await fileCache.init("vault-1"); 83 | 84 | expect(openDBMock).toHaveBeenCalledWith( 85 | "LocalGPTCache/vault-1", 86 | 3, 87 | expect.objectContaining({ upgrade: expect.any(Function) }), 88 | ); 89 | 90 | const db = await openDBMock.mock.results[0]!.value; 91 | expect(db.createObjectStore).toHaveBeenCalledWith("embeddings"); 92 | expect(db.createObjectStore).toHaveBeenCalledWith("content"); 93 | expect(db.deleteObjectStore).toHaveBeenCalledWith("embeddings"); 94 | }); 95 | 96 | it("stores, reads and clears cached content", async () => { 97 | await fileCache.init("vault-2"); 98 | await fileCache.setContent("a", { mtime: 1, content: "hello" }); 99 | await fileCache.setContent("b", { mtime: 2, content: "world" }); 100 | 101 | expect(await fileCache.getContent("a")).toEqual({ 102 | mtime: 1, 103 | content: "hello", 104 | }); 105 | 106 | await fileCache.clearContent(); 107 | expect(await fileCache.getContent("a")).toBeUndefined(); 108 | 109 | await fileCache.setContent("c", { mtime: 3, content: "!" }); 110 | await fileCache.clearAll(); 111 | expect(await fileCache.getContent("c")).toBeUndefined(); 112 | }); 113 | }); 114 | -------------------------------------------------------------------------------- /src/i18n/ru.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": { 3 | "showContextMenu": "Показать контекстное меню", 4 | "actionPalette": { 5 | "name": "Палитра действий", 6 | "placeholder": "Ваш промпт... | Enter: отправить, Esc: отмена", 7 | "changeProvider": "Сменить AI‑провайдера", 8 | "changeModel": "Сменить модель", 9 | "changeCreativity": "Сменить креативность", 10 | "changeSystemPrompt": "Сменить системную инструкцию", 11 | "unknownModel": "Неизвестная модель" 12 | } 13 | }, 14 | "statusBar": { 15 | "enhancing": "✨ Улучшение", 16 | "enhancingWithProgress": "✨ Улучшение {{percent}}%" 17 | }, 18 | "notices": { 19 | "errorGenerating": "Ошибка генерации текста: {{message}}", 20 | "errorProcessingRag": "Ошибка обработки связанных документов: {{message}}. Продолжаю с исходным текстом.", 21 | "importantUpdate": "️🚨 ВАЖНО! Обновите настройки Local GPT!", 22 | "newVersion": "⬆️ Local GPT: доступна новая версия", 23 | "actionNameRequired": "Пожалуйста, введите название для действия.", 24 | "actionNameExists": "Действие с названием «{{name}}» уже существует.", 25 | "actionRewritten": "Действие «{{name}}» перезаписано", 26 | "actionAdded": "Действие «{{name}}» добавлено", 27 | "copied": "Скопировано" 28 | }, 29 | "settings": { 30 | "mainProvider": "Основной AI-провайдер", 31 | "embeddingProvider": "Провайдер для эмбеддингов", 32 | "embeddingProviderDesc": "Опционально. Используется для ✨ Расширенных действий.", 33 | "visionProvider": "Провайдер для изображений", 34 | "visionProviderDesc": "Опционально. Используется для работы с изображениями. Если не выбран, будет использован основной AI-провайдер.", 35 | "creativity": "Креативность", 36 | "creativityNone": "⚪ Отсутствует", 37 | "creativityLow": "️💡 Низкая", 38 | "creativityMedium": "🎨 Средняя", 39 | "creativityHigh": "🚀 Высокая", 40 | "actions": "Действия", 41 | "quickAdd": "Быстрое добавление", 42 | "quickAddPlaceholder": "Вставьте готовое действие", 43 | "quickAddDesc": "Вы можете поделиться своими наборами промптов или найти новые в сообществе.
Важно: если у вас уже есть действие с таким же названием, оно будет перезаписано.", 44 | "addNewManually": "Добавить вручную", 45 | "actionName": "Название действия", 46 | "actionNamePlaceholder": "Кратко пересказать выделенное", 47 | "systemPrompt": "Системная инструкция", 48 | "systemPromptDesc": "Опционально", 49 | "systemPromptPlaceholder": "Вы — полезный ассистент.", 50 | "prompt": "Промпт", 51 | "promptDesc": "Узнайте больше о шаблонизации промптов, чтобы настроить их под себя.", 52 | "replaceSelected": "Заменять выделенный текст", 53 | "replaceSelectedDesc": "Если опция включена, выделенный текст будет заменён ответом от модели.", 54 | "remove": "Удалить", 55 | "close": "Закрыть", 56 | "save": "Сохранить", 57 | "actionsList": "Список действий", 58 | "changeOrder": "Изменить порядок", 59 | "done": "Готово", 60 | "advancedSettings": "Расширенные настройки", 61 | "advancedSettingsDesc": "✨ Расширенные действия (RAG), Сброс всех действий", 62 | "enhancedActions": "Расширенные действия", 63 | "enhancedActionsLabel": "RAG‑контекст", 64 | "enhancedActionsDesc": "Чем больше контекст — тем выше качество ответа у мощных моделей, но ниже качество у слабых. Большой контекст также расходует больше токенов и увеличивает стоимость запроса для платных моделей.", 65 | "contextLimitLocal": "Локальные модели", 66 | "contextLimitCloud": "Облачные модели", 67 | "contextLimitAdvanced": "Топ: GPT, Claude, Gemini", 68 | "contextLimitMax": "Без ограничений (опасно)", 69 | "dangerZone": "Опасная зона", 70 | "resetActions": "Сбросить действия", 71 | "resetActionsDesc": "🚨 Сбросить все действия до стандартных. Это действие необратимо и удалит все ваши кастомные действия.", 72 | "reset": "Сбросить", 73 | "confirmReset": "Подтвердить сброс" 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/i18n/de.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": { 3 | "showContextMenu": "Kontextmenü anzeigen", 4 | "actionPalette": { 5 | "name": "Aktionspalette", 6 | "placeholder": "Ihr Prompt... | Enter: senden, Esc: abbrechen", 7 | "changeProvider": "KI‑Provider wechseln", 8 | "changeModel": "Modell wechseln", 9 | "changeCreativity": "Kreativität ändern", 10 | "changeSystemPrompt": "System-Prompt wechseln", 11 | "unknownModel": "Unbekanntes Modell" 12 | } 13 | }, 14 | "statusBar": { 15 | "enhancing": "✨ Verbessern", 16 | "enhancingWithProgress": "✨ Verbessern {{percent}}%" 17 | }, 18 | "notices": { 19 | "errorGenerating": "Fehler bei der Texterzeugung: {{message}}", 20 | "errorProcessingRag": "Fehler bei der Verarbeitung verknüpfter Dokumente: {{message}}. Fahre mit dem Originaltext fort.", 21 | "importantUpdate": "️🚨 WICHTIG! Aktualisiere die Local GPT Einstellungen!", 22 | "newVersion": "⬆️ Local GPT: Eine neue Version ist verfügbar", 23 | "actionNameRequired": "Bitte gib einen Namen für die Aktion ein.", 24 | "actionNameExists": "Eine Aktion mit dem Namen \"{{name}}\" existiert bereits.", 25 | "actionRewritten": "Aktion \"{{name}}\" überschrieben", 26 | "actionAdded": "Aktion \"{{name}}\" hinzugefügt", 27 | "copied": "Kopiert" 28 | }, 29 | "settings": { 30 | "mainProvider": "Haupt-AI-Provider", 31 | "embeddingProvider": "Embedding AI Provider", 32 | "embeddingProviderDesc": "Optional. Wird für ✨ Erweiterte Aktionen.", 33 | "visionProvider": "Vision AI Provider", 34 | "visionProviderDesc": "Optional. Dies wird für Bilder verwendet. Wenn nicht festgelegt, wird der Haupt-AI-Provider verwendet.", 35 | "creativity": "Kreativität", 36 | "creativityNone": "⚪ Keine", 37 | "creativityLow": "️💡 Niedrig", 38 | "creativityMedium": "🎨 Mittel", 39 | "creativityHigh": "🚀 Hoch", 40 | "actions": "Aktionen", 41 | "quickAdd": "Schnell hinzufügen", 42 | "quickAddPlaceholder": "Aktion einfügen", 43 | "quickAddDesc": "Sie können die besten Sätze von Prompts teilen oder einen aus der Community erhalten.
Wichtig: Wenn Sie bereits eine Aktion mit demselben Namen haben, wird sie überschrieben.", 44 | "addNewManually": "Manuell hinzufügen", 45 | "actionName": "Aktionsname", 46 | "actionNamePlaceholder": "Auswahl zusammenfassen", 47 | "systemPrompt": "System-Prompt", 48 | "systemPromptDesc": "Optional", 49 | "systemPromptPlaceholder": "Du bist ein hilfreicher Assistent.", 50 | "prompt": "Prompt", 51 | "promptDesc": "Bitte lesen Sie über
Prompt-Vorlagen
, wenn Sie Ihre
resultierenden Prompts anpassen möchten", 52 | "replaceSelected": "Ausgewählten Text ersetzen", 53 | "replaceSelectedDesc": "Wenn diese Option aktiviert ist, wird der hervorgehobene Text durch eine Antwort des Modells ersetzt.", 54 | "remove": "Entfernen", 55 | "close": "Schließen", 56 | "save": "Speichern", 57 | "actionsList": "Aktionsliste", 58 | "changeOrder": "Reihenfolge ändern", 59 | "done": "Fertig", 60 | "advancedSettings": "Erweiterte Einstellungen", 61 | "advancedSettingsDesc": "✨ Erweiterte Aktionen (RAG), Alle Aktionen zurücksetzen", 62 | "enhancedActions": "Erweiterte Aktionen", 63 | "enhancedActionsLabel": "RAG‑Kontext", 64 | "enhancedActionsDesc": "Mehr Kontext kann die Antwortqualität bei leistungsstarken Modellen verbessern, bei schwächeren jedoch verschlechtern. Größerer Kontext verbraucht außerdem mehr Tokens und erhöht die Kosten pro Anfrage (bei kostenpflichtigen Modellen).", 65 | "contextLimitLocal": "Lokale Modelle", 66 | "contextLimitCloud": "Cloud‑Modelle", 67 | "contextLimitAdvanced": "Top: GPT, Claude, Gemini", 68 | "contextLimitMax": "Keine Limits (Achtung)", 69 | "dangerZone": "Gefahrenzone", 70 | "resetActions": "Aktionen zurücksetzen", 71 | "resetActionsDesc": "🚨 Setzt alle Aktionen auf die Standardeinstellungen zurück. Dies kann nicht rückgängig gemacht werden und löscht alle Ihre benutzerdefinierten Aktionen.", 72 | "reset": "Zurücksetzen", 73 | "confirmReset": "Zurücksetzen bestätigen" 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/defaultSettings.ts: -------------------------------------------------------------------------------- 1 | import { LocalGPTSettings } from "./interfaces"; 2 | 3 | export const DEFAULT_SETTINGS: LocalGPTSettings = { 4 | aiProviders: { 5 | main: null, 6 | embedding: null, 7 | vision: null, 8 | }, 9 | defaults: { 10 | creativity: "low", 11 | contextLimit: "local", 12 | }, 13 | actions: [ 14 | { 15 | name: "🪄 General help", 16 | prompt: "", 17 | system: "You are an assistant helping a user write more content in a document based on a prompt. Output in markdown format. Do not use links. Do not include literal content from the original document.", 18 | }, 19 | { 20 | name: "✍️ Continue writing", 21 | prompt: "Act as a professional editor with many years of experience as a writer. Carefully finalize the following text, add details, use facts and make sure that the meaning and original style are preserved. Purposely write in detail, with examples, so that your reader is comfortable, even if they don't understand the specifics. Don't use clericalisms, evaluations without proof with facts, passive voice. Use Markdown markup language for formatting. Answer only content and nothing else, no introductory words, only substance.", 22 | system: "You are an AI assistant that follows instruction extremely well. Help as much as you can.", 23 | }, 24 | { 25 | name: "🍭 Summarize", 26 | prompt: "Make a concise summary of the key points of the following text.", 27 | system: "You are an AI assistant that follows instruction extremely well. Help as much as you can.", 28 | }, 29 | { 30 | name: "📖 Fix spelling and grammar", 31 | prompt: "Proofread the below for spelling and grammar.", 32 | system: "You are an AI assistant that follows instruction extremely well. Help as much as you can.", 33 | replace: true, 34 | }, 35 | { 36 | name: "✅ Find action items", 37 | prompt: 'Act as an assistant helping find action items inside a document. An action item is an extracted task or to-do found inside of an unstructured document. Use Markdown checkbox format: each line starts with "- [ ] "', 38 | system: "You are an AI assistant that follows instruction extremely well. Help as much as you can.", 39 | }, 40 | { 41 | name: "🧠 New System Prompt", 42 | prompt: "", 43 | system: `You are a highly skilled AI prompt engineer with expertise in creating tailored prompts for a wide range of professional roles. You have a deep knowledge of how to craft prompts that effectively guide the language model to produce high-quality, contextually appropriate responses.\n\nYour task is to generate a custom system prompt for different roles based on user input. This involves understanding the specific requirements of each role, the context in which the prompt will be used, and the desired output format. You are skilled in structuring prompts that ensure clarity, relevance, and utility.\n\nCreate a custom system prompt for an LLM to assist users in generating contextually appropriate and highly effective responses for various roles. The prompt should provide clear instructions to the LLM on how to handle specific scenarios related to the role, including the tone and format of the response.\n\nStart by providing a role "You are..." and context as a summary of the situation or background information relevant to the prompt. Define the main objective, outlining what the LLM needs to accomplish.\n\nInclude instructions on the appropriate style and tone (e.g., formal, casual, technical, empathetic) based on the role and audience. Identify the target audience to tailor the LLM's output effectively. Specify the format of the response, whether it should be a narrative, bullet points, step-by-step guide, code, or another format. Avoid using headings or examples; the prompt should read as a continuous, cohesive set of instructions.\nANSWER PROMPT AND NOTHING ELSE!`, 44 | }, 45 | ], 46 | _version: 8, 47 | }; 48 | 49 | export const CREATIVITY: { [index: string]: any } = { 50 | "": { 51 | temperature: 0, 52 | }, 53 | low: { 54 | temperature: 0.2, 55 | }, 56 | medium: { 57 | temperature: 0.5, 58 | }, 59 | high: { 60 | temperature: 1, 61 | }, 62 | }; 63 | 64 | export const SELECTION_KEYWORD = "{{=SELECTION=}}"; 65 | export const CONTEXT_KEYWORD = "{{=CONTEXT=}}"; 66 | export const CONTEXT_CONDITION_START = "{{=CONTEXT_START=}}"; 67 | export const CONTEXT_CONDITION_END = "{{=CONTEXT_END=}}"; 68 | -------------------------------------------------------------------------------- /tests/actionPaletteHistory.test.ts: -------------------------------------------------------------------------------- 1 | import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; 2 | import { 3 | addToPromptHistory, 4 | getPromptHistoryEntry, 5 | getPromptHistoryLength, 6 | resetPromptHistory, 7 | } from "../src/ui/actionPaletteHistory"; 8 | 9 | const STORAGE_KEY = "local-gpt-action-palette-history"; 10 | const importFreshHistoryModule = async () => { 11 | vi.resetModules(); 12 | return import("../src/ui/actionPaletteHistory"); 13 | }; 14 | 15 | afterEach(() => { 16 | vi.restoreAllMocks(); 17 | }); 18 | 19 | beforeEach(() => { 20 | localStorage.clear(); 21 | resetPromptHistory(); 22 | }); 23 | 24 | describe("actionPaletteHistory", () => { 25 | it("persists normalized, non-duplicate entries", () => { 26 | addToPromptHistory(" first "); 27 | addToPromptHistory("first"); 28 | addToPromptHistory("second"); 29 | 30 | expect(getPromptHistoryLength()).toBe(2); 31 | expect(getPromptHistoryEntry(0)).toBe(" first "); 32 | expect(getPromptHistoryEntry(1)).toBe("second"); 33 | const stored = JSON.parse(localStorage.getItem(STORAGE_KEY) || "[]"); 34 | expect(stored).toEqual([" first ", "second"]); 35 | }); 36 | 37 | it("keeps only the newest 50 records", () => { 38 | for (let i = 0; i < 55; i += 1) { 39 | addToPromptHistory(`entry-${i}`); 40 | } 41 | 42 | expect(getPromptHistoryLength()).toBe(50); 43 | expect(getPromptHistoryEntry(0)).toBe("entry-5"); 44 | expect(getPromptHistoryEntry(49)).toBe("entry-54"); 45 | }); 46 | 47 | it("resets history and storage", () => { 48 | addToPromptHistory("temp"); 49 | resetPromptHistory(); 50 | 51 | expect(getPromptHistoryLength()).toBe(0); 52 | expect(localStorage.getItem(STORAGE_KEY)).toBeNull(); 53 | }); 54 | 55 | it("restores stored string history and filters non-string entries", async () => { 56 | localStorage.setItem(STORAGE_KEY, JSON.stringify(["first", 123, "second"])); 57 | const historyModule = await importFreshHistoryModule(); 58 | 59 | expect(historyModule.getPromptHistoryLength()).toBe(2); 60 | expect(historyModule.getPromptHistoryEntry(0)).toBe("first"); 61 | expect(historyModule.getPromptHistoryEntry(1)).toBe("second"); 62 | }); 63 | 64 | it("ignores non-array payloads in storage", async () => { 65 | localStorage.setItem(STORAGE_KEY, JSON.stringify({ foo: "bar" })); 66 | const historyModule = await importFreshHistoryModule(); 67 | 68 | expect(historyModule.getPromptHistoryLength()).toBe(0); 69 | }); 70 | 71 | it("returns empty history when storage is missing", async () => { 72 | const originalStorage = (globalThis as any).localStorage; 73 | // @ts-ignore 74 | delete (globalThis as any).localStorage; 75 | const historyModule = await importFreshHistoryModule(); 76 | 77 | expect(historyModule.getPromptHistoryLength()).toBe(0); 78 | 79 | (globalThis as any).localStorage = originalStorage; 80 | }); 81 | 82 | it("ignores malformed history payloads", async () => { 83 | const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); 84 | localStorage.setItem(STORAGE_KEY, "{{broken json}}"); 85 | const historyModule = await importFreshHistoryModule(); 86 | 87 | expect(historyModule.getPromptHistoryLength()).toBe(0); 88 | expect(consoleSpy).toHaveBeenCalled(); 89 | 90 | consoleSpy.mockRestore(); 91 | }); 92 | 93 | it("continues when persisting history fails", async () => { 94 | const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); 95 | const setItemSpy = vi 96 | .spyOn(Storage.prototype, "setItem") 97 | .mockImplementation(() => { 98 | throw new Error("persist failed"); 99 | }); 100 | 101 | addToPromptHistory("failsave"); 102 | 103 | expect(setItemSpy).toHaveBeenCalled(); 104 | expect(consoleSpy).toHaveBeenCalled(); 105 | consoleSpy.mockRestore(); 106 | setItemSpy.mockRestore(); 107 | }); 108 | 109 | it("handles reset errors", async () => { 110 | const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); 111 | const removeSpy = vi 112 | .spyOn(Storage.prototype, "removeItem") 113 | .mockImplementation(() => { 114 | throw new Error("reset failed"); 115 | }); 116 | 117 | resetPromptHistory(); 118 | 119 | expect(removeSpy).toHaveBeenCalled(); 120 | expect(consoleSpy).toHaveBeenCalled(); 121 | consoleSpy.mockRestore(); 122 | removeSpy.mockRestore(); 123 | }); 124 | 125 | it("skips persistence when storage is unavailable", () => { 126 | const originalStorage = (globalThis as any).localStorage; 127 | // @ts-ignore 128 | delete (globalThis as any).localStorage; 129 | 130 | expect(() => addToPromptHistory("no-storage")).not.toThrow(); 131 | resetPromptHistory(); 132 | 133 | (globalThis as any).localStorage = originalStorage; 134 | }); 135 | 136 | it("clears history without storage present", () => { 137 | addToPromptHistory("temp"); 138 | const originalStorage = (globalThis as any).localStorage; 139 | // @ts-ignore 140 | delete (globalThis as any).localStorage; 141 | 142 | expect(() => resetPromptHistory()).not.toThrow(); 143 | expect(getPromptHistoryLength()).toBe(0); 144 | 145 | (globalThis as any).localStorage = originalStorage; 146 | }); 147 | }); 148 | -------------------------------------------------------------------------------- /src/logger.ts: -------------------------------------------------------------------------------- 1 | enum LogLevel { 2 | DEBUG, 3 | INFO, 4 | WARN, 5 | ERROR, 6 | SUCCESS, 7 | } 8 | 9 | class Logger { 10 | private static instance: Logger; 11 | private logLevel: LogLevel = LogLevel.DEBUG; 12 | private isDevMode: boolean; 13 | private timers: Map = 14 | new Map(); 15 | private colorIndex: number = 0; 16 | private colors: string[] = [ 17 | "#FFB3BA", 18 | "#BAFFC9", 19 | "#BAE1FF", 20 | "#FFFFBA", 21 | "#FFDFBA", 22 | "#E0BBE4", 23 | ]; 24 | 25 | private constructor() { 26 | this.isDevMode = process.env.NODE_ENV === "development"; 27 | } 28 | 29 | static getInstance(): Logger { 30 | if (!Logger.instance) { 31 | Logger.instance = new Logger(); 32 | } 33 | return Logger.instance; 34 | } 35 | 36 | setLogLevel(level: LogLevel) { 37 | this.logLevel = level; 38 | } 39 | 40 | private logWithEmoji(level: LogLevel, message: string, ...args: any[]) { 41 | if (this.isDevMode && level >= this.logLevel) { 42 | const emoji = this.getEmojiForLevel(level); 43 | if (args.length === 0) { 44 | console.log(`${emoji} %c${message}`, "font-weight: bold;"); 45 | } else { 46 | console.group(`${emoji} %c${message}`, "font-weight: bold;"); 47 | this.logArgs(args); 48 | console.groupEnd(); 49 | } 50 | } 51 | } 52 | 53 | private logArgs(args: any[]) { 54 | args.forEach((arg) => { 55 | console.log(arg); 56 | }); 57 | } 58 | 59 | private getEmojiForLevel(level: LogLevel): string { 60 | switch (level) { 61 | case LogLevel.DEBUG: 62 | return "🐛"; // Жук для отладки 63 | case LogLevel.INFO: 64 | return "ℹ️"; // Информация 65 | case LogLevel.WARN: 66 | return "⚠️"; // Предупреждение 67 | case LogLevel.ERROR: 68 | return "🚫"; // Ошибка 69 | case LogLevel.SUCCESS: 70 | return "✅"; // Успех 71 | default: 72 | return ""; 73 | } 74 | } 75 | 76 | debug(message: string, ...args: any[]) { 77 | this.logWithEmoji(LogLevel.DEBUG, message, ...args); 78 | } 79 | 80 | info(message: string, ...args: any[]) { 81 | this.logWithEmoji(LogLevel.INFO, message, ...args); 82 | } 83 | 84 | warn(message: string, ...args: any[]) { 85 | this.logWithEmoji(LogLevel.WARN, message, ...args); 86 | } 87 | 88 | error(message: string, ...args: any[]) { 89 | this.logWithEmoji(LogLevel.ERROR, message, ...args); 90 | } 91 | 92 | success(message: string, ...args: any[]) { 93 | this.logWithEmoji(LogLevel.SUCCESS, message, ...args); 94 | } 95 | 96 | table(message: string, ...args: any[]) { 97 | if (this.isDevMode && this.logLevel <= LogLevel.DEBUG) { 98 | console.group(`📊 %c${message}`, "font-weight: bold;"); 99 | this.logNestedGroups(args); 100 | console.groupEnd(); 101 | } 102 | } 103 | 104 | private logNestedGroups(args: any[]) { 105 | args.forEach((arg) => { 106 | if (typeof arg === "object" && arg !== null) { 107 | this.logObjectAsGroups(arg); 108 | } else { 109 | console.log(arg); 110 | } 111 | }); 112 | } 113 | 114 | private logObjectAsGroups(obj: object) { 115 | Object.entries(obj).forEach(([key, value]) => { 116 | if (typeof value === "object" && value !== null) { 117 | console.group(`${key}:`); 118 | this.logObjectAsGroups(value); 119 | console.groupEnd(); 120 | } else { 121 | console.log(`${key}: ${value}`); 122 | } 123 | }); 124 | } 125 | 126 | time(label: string) { 127 | if (this.isDevMode && this.logLevel <= LogLevel.DEBUG) { 128 | const color = this.getNextColor(); 129 | this.timers.set(label, { startTime: performance.now(), color }); 130 | console.log( 131 | `⏱️ %c${label}: timer started`, 132 | `color: black; font-weight: bold; background-color: ${color}; padding: 2px 5px; border-radius: 3px;`, 133 | ); 134 | } 135 | } 136 | 137 | timeEnd(label: string) { 138 | if (this.isDevMode && this.logLevel <= LogLevel.DEBUG) { 139 | const timerData = this.timers.get(label); 140 | if (timerData) { 141 | const duration = performance.now() - timerData.startTime; 142 | console.log( 143 | `⏱️ %c${label}: ${duration.toFixed(2)}ms`, 144 | `color: black; font-weight: bold; background-color: ${timerData.color}; padding: 2px 5px; border-radius: 3px;`, 145 | ); 146 | this.timers.delete(label); 147 | } else { 148 | console.warn(`Timer '${label}' does not exist`); 149 | } 150 | } 151 | } 152 | 153 | private getNextColor(): string { 154 | const color = this.colors[this.colorIndex]; 155 | this.colorIndex = (this.colorIndex + 1) % this.colors.length; 156 | return color; 157 | } 158 | 159 | // Добавляем новый метод для создания разделителя 160 | separator(message: string = "") { 161 | if (this.isDevMode) { 162 | const lineLength = 20; 163 | const line = "━".repeat(lineLength); 164 | const paddedMessage = message ? ` ${message} ` : ""; 165 | const leftPadding = Math.floor( 166 | (lineLength - paddedMessage.length) / 2, 167 | ); 168 | const rightPadding = 169 | lineLength - paddedMessage.length - leftPadding; 170 | 171 | const separatorLine = message 172 | ? line.slice(0, leftPadding) + 173 | paddedMessage + 174 | line.slice(lineLength - rightPadding) 175 | : line; 176 | 177 | console.log( 178 | "\n%c" + separatorLine, 179 | "color: #FF4500; font-weight: bold; font-size: 1.2em;", 180 | ); 181 | } 182 | } 183 | } 184 | 185 | export const logger = Logger.getInstance(); 186 | -------------------------------------------------------------------------------- /tests/ActionPalette.vitest.ts: -------------------------------------------------------------------------------- 1 | /// 2 | 3 | import { describe, test, expect, vi, afterEach } from "vitest"; 4 | import { tick } from "svelte"; 5 | import ActionPalette from "../src/ui/ActionPalette.svelte"; 6 | import { 7 | addToPromptHistory, 8 | resetPromptHistory, 9 | } from "../src/ui/actionPaletteHistory"; 10 | import { I18n } from "../src/i18n"; 11 | 12 | const setCaretToEnd = (element: HTMLElement) => { 13 | const range = document.createRange(); 14 | range.selectNodeContents(element); 15 | range.collapse(false); 16 | const selection = window.getSelection(); 17 | selection?.removeAllRanges(); 18 | selection?.addRange(range); 19 | }; 20 | 21 | const setCaretAtIndex = (element: HTMLElement, index: number) => { 22 | const range = document.createRange(); 23 | const textNode = element.firstChild; 24 | if (textNode && textNode.nodeType === Node.TEXT_NODE) { 25 | const clampedIndex = Math.max( 26 | 0, 27 | Math.min(index, textNode.textContent?.length ?? 0), 28 | ); 29 | range.setStart(textNode, clampedIndex); 30 | range.setEnd(textNode, clampedIndex); 31 | const selection = window.getSelection(); 32 | selection?.removeAllRanges(); 33 | selection?.addRange(range); 34 | } 35 | }; 36 | 37 | const requireElement = ( 38 | container: ParentNode, 39 | selector: string, 40 | ) => { 41 | const el = container.querySelector(selector); 42 | if (!el) { 43 | throw new Error(`Element not found: ${selector}`); 44 | } 45 | return el as T; 46 | }; 47 | 48 | const createComponent = (props: Record = {}) => { 49 | const target = document.createElement("div"); 50 | document.body.appendChild(target); 51 | // Cast to any to align with the testing runtime signature 52 | const component = new (ActionPalette as any)({ target, props }); 53 | return { target, component }; 54 | }; 55 | 56 | const typeIntoPalette = async (textbox: HTMLDivElement, text: string) => { 57 | textbox.textContent = text; 58 | setCaretToEnd(textbox); 59 | textbox.dispatchEvent( 60 | new InputEvent("input", { 61 | bubbles: true, 62 | data: text.slice(-1), 63 | inputType: "insertText", 64 | }), 65 | ); 66 | await tick(); 67 | }; 68 | 69 | afterEach(() => { 70 | document.body.innerHTML = ""; 71 | localStorage.clear(); 72 | resetPromptHistory(); 73 | }); 74 | 75 | describe("ActionPalette component", () => { 76 | test("renders placeholder from i18n", () => { 77 | const { target, component } = createComponent(); 78 | const textbox = requireElement( 79 | target, 80 | ".local-gpt-action-palette", 81 | ); 82 | 83 | expect(textbox.dataset.placeholder).toBe( 84 | I18n.t("commands.actionPalette.placeholder"), 85 | ); 86 | component.$destroy(); 87 | }); 88 | 89 | test("selects a system prompt via command dropdown and submits it", async () => { 90 | const { target, component } = createComponent({ 91 | getSystemPrompts: () => [{ name: "Preset", system: "You are kind" }], 92 | }); 93 | const submitSpy = vi.fn(); 94 | component.$on("submit", (event) => submitSpy(event.detail)); 95 | 96 | const textbox = requireElement( 97 | target, 98 | ".local-gpt-action-palette", 99 | ); 100 | textbox.focus(); 101 | setCaretToEnd(textbox); 102 | textbox.textContent = "/"; 103 | textbox.dispatchEvent( 104 | new InputEvent("input", { bubbles: true, data: "/", inputType: "insertText" }), 105 | ); 106 | await tick(); 107 | 108 | const commandItems = Array.from( 109 | target.querySelectorAll(".local-gpt-command-name"), 110 | ); 111 | const systemCommand = commandItems.find((el) => 112 | el.textContent?.trim().includes("/system"), 113 | ) as HTMLElement; 114 | systemCommand?.dispatchEvent(new MouseEvent("click", { bubbles: true })); 115 | await tick(); 116 | 117 | const systemItems = Array.from( 118 | target.querySelectorAll(".local-gpt-system-name"), 119 | ); 120 | const presetItem = systemItems.find( 121 | (el) => el.textContent?.trim() === "Preset", 122 | ) as HTMLElement; 123 | presetItem?.dispatchEvent(new MouseEvent("click", { bubbles: true })); 124 | await tick(); 125 | 126 | setCaretToEnd(textbox); 127 | textbox.dispatchEvent( 128 | new KeyboardEvent("keydown", { key: "Enter", bubbles: true }), 129 | ); 130 | 131 | expect(submitSpy).toHaveBeenCalledWith( 132 | expect.objectContaining({ systemPrompt: "You are kind" }), 133 | ); 134 | component.$destroy(); 135 | }); 136 | 137 | test("filters system prompts by name only", async () => { 138 | const { target, component } = createComponent({ 139 | getSystemPrompts: () => [ 140 | { name: "Continue writing", system: "Continue the text" }, 141 | { name: "Find action items", system: "Find actions in text" }, 142 | ], 143 | }); 144 | 145 | const textbox = requireElement( 146 | target, 147 | ".local-gpt-action-palette", 148 | ); 149 | textbox.focus(); 150 | await typeIntoPalette(textbox, "/system cont"); 151 | 152 | const systemItems = Array.from( 153 | target.querySelectorAll(".local-gpt-system-name"), 154 | ) 155 | .map((el) => el.textContent?.trim()) 156 | .filter(Boolean) as string[]; 157 | 158 | expect(systemItems).toEqual(["Continue writing"]); 159 | component.$destroy(); 160 | }); 161 | 162 | test("Shift+Enter inserts newline instead of submitting", async () => { 163 | const { target, component } = createComponent(); 164 | const submitSpy = vi.fn(); 165 | component.$on("submit", submitSpy); 166 | 167 | const textbox = requireElement( 168 | target, 169 | ".local-gpt-action-palette", 170 | ); 171 | textbox.focus(); 172 | textbox.textContent = "Hello"; 173 | setCaretToEnd(textbox); 174 | 175 | textbox.dispatchEvent( 176 | new KeyboardEvent("keydown", { 177 | key: "Enter", 178 | shiftKey: true, 179 | bubbles: true, 180 | }), 181 | ); 182 | 183 | // Simulate browser inserting a newline when default is not prevented 184 | textbox.textContent = "Hello\n"; 185 | textbox.dispatchEvent( 186 | new InputEvent("input", { 187 | bubbles: true, 188 | data: "\n", 189 | inputType: "insertLineBreak", 190 | }), 191 | ); 192 | await tick(); 193 | 194 | expect(submitSpy).not.toHaveBeenCalled(); 195 | expect(textbox.textContent?.includes("\n")).toBe(true); 196 | component.$destroy(); 197 | }); 198 | 199 | test("ArrowUp uses history only from the first line", async () => { 200 | addToPromptHistory("history-entry"); 201 | const { target, component } = createComponent(); 202 | const textbox = requireElement( 203 | target, 204 | ".local-gpt-action-palette", 205 | ); 206 | 207 | await typeIntoPalette(textbox, "line1\nline2"); 208 | setCaretAtIndex(textbox, 7); // position on second line 209 | 210 | textbox.dispatchEvent( 211 | new KeyboardEvent("keydown", { key: "ArrowUp", bubbles: true }), 212 | ); 213 | await tick(); 214 | 215 | expect(textbox.textContent).toBe("line1\nline2"); 216 | component.$destroy(); 217 | }); 218 | 219 | test("ArrowDown uses history only from the last line", async () => { 220 | addToPromptHistory("draft"); 221 | addToPromptHistory("history\nentry"); 222 | const { target, component } = createComponent(); 223 | const textbox = requireElement( 224 | target, 225 | ".local-gpt-action-palette", 226 | ); 227 | 228 | textbox.focus(); 229 | setCaretToEnd(textbox); 230 | // Move to last history entry 231 | textbox.dispatchEvent( 232 | new KeyboardEvent("keydown", { key: "ArrowUp", bubbles: true }), 233 | ); 234 | await tick(); 235 | 236 | // Place cursor on the first line (not last line) 237 | setCaretAtIndex(textbox, 2); 238 | textbox.dispatchEvent( 239 | new KeyboardEvent("keydown", { key: "ArrowDown", bubbles: true }), 240 | ); 241 | await tick(); 242 | expect(textbox.textContent).toBe("history\nentry"); 243 | 244 | // Now place cursor at the end (last line) and allow forward history 245 | setCaretToEnd(textbox); 246 | textbox.dispatchEvent( 247 | new KeyboardEvent("keydown", { key: "ArrowDown", bubbles: true }), 248 | ); 249 | await tick(); 250 | expect(textbox.textContent).toBe(""); 251 | component.$destroy(); 252 | }); 253 | }); 254 | -------------------------------------------------------------------------------- /src/spinnerPlugin.ts: -------------------------------------------------------------------------------- 1 | import { RangeSetBuilder, EditorState } from "@codemirror/state"; 2 | import { 3 | Decoration, 4 | DecorationSet, 5 | EditorView, 6 | PluginValue, 7 | ViewPlugin, 8 | ViewUpdate, 9 | WidgetType, 10 | } from "@codemirror/view"; 11 | 12 | class LoaderWidget extends WidgetType { 13 | static readonly element: HTMLSpanElement = document.createElement("span"); 14 | 15 | static { 16 | this.element.addClasses(["local-gpt-loading", "local-gpt-dots"]); 17 | } 18 | 19 | toDOM(view: EditorView): HTMLElement { 20 | return LoaderWidget.element.cloneNode(true) as HTMLElement; 21 | } 22 | } 23 | 24 | class ThinkingWidget extends WidgetType { 25 | private static createDOMStructure(): HTMLElement { 26 | const container = document.createElement("div"); 27 | container.addClass("local-gpt-thinking-container"); 28 | 29 | // Add a line break element 30 | container.appendChild(document.createElement("br")); 31 | 32 | const textElement = document.createElement("span"); 33 | textElement.addClass("local-gpt-thinking"); 34 | textElement.textContent = "Thinking"; 35 | textElement.setAttribute("data-text", "Thinking"); 36 | 37 | container.appendChild(textElement); 38 | return container; 39 | } 40 | 41 | toDOM(view: EditorView): HTMLElement { 42 | return ThinkingWidget.createDOMStructure(); 43 | } 44 | } 45 | 46 | class ContentWidget extends WidgetType { 47 | private dom: HTMLElement | null = null; 48 | 49 | constructor(private text: string) { 50 | super(); 51 | } 52 | 53 | eq(other: ContentWidget) { 54 | return other.text === this.text; 55 | } 56 | 57 | updateText(newText: string) { 58 | if (this.dom && this.text !== newText) { 59 | const addedText = newText.slice(this.text.length); 60 | 61 | this.dom.textContent = newText.slice(0, -addedText.length); 62 | let lastSpan = this.dom.querySelector("span:last-child"); 63 | if (!lastSpan) { 64 | lastSpan = document.createElement("span"); 65 | this.dom.appendChild(lastSpan); 66 | } 67 | lastSpan.textContent = addedText; 68 | 69 | this.text = newText; 70 | } 71 | } 72 | 73 | toDOM(view: EditorView): HTMLElement { 74 | if (!this.dom) { 75 | this.dom = document.createElement("div"); 76 | this.dom.addClass("local-gpt-content"); 77 | this.updateText(this.text); 78 | } 79 | return this.dom; 80 | } 81 | } 82 | 83 | /** 84 | * Processed result of handling text with thinking tags 85 | */ 86 | interface ProcessedThinkingResult { 87 | // Whether we're in thinking mode 88 | isThinking: boolean; 89 | 90 | // The text to display (without thinking content) 91 | displayText: string; 92 | } 93 | 94 | export class SpinnerPlugin implements PluginValue { 95 | decorations: DecorationSet; 96 | private positions: Map< 97 | number, 98 | { isEndOfLine: boolean; widget: WidgetType; isThinking: boolean } 99 | >; 100 | 101 | constructor(private editorView: EditorView) { 102 | this.positions = new Map(); 103 | this.decorations = Decoration.none; 104 | } 105 | 106 | /** 107 | * Process text with potential tags and update UI accordingly 108 | * 109 | * @param text Raw text that may include tags 110 | * @param processFunc Optional function to process the display text 111 | * @param position Optional position to update specific spinner 112 | * @returns void 113 | */ 114 | processText( 115 | text: string, 116 | processFunc?: (text: string) => string, 117 | position?: number, 118 | ) { 119 | const result = this.processThinkingTags(text); 120 | 121 | // Update thinking state 122 | this.showThinking(result.isThinking, position); 123 | 124 | // Only update visible content if there's content to show 125 | if (result.displayText.trim()) { 126 | const displayText = processFunc 127 | ? processFunc(result.displayText) 128 | : result.displayText; 129 | this.updateContent(displayText, position); 130 | } 131 | } 132 | 133 | /** 134 | * Process text with potential tags 135 | * 136 | * @param text Raw text that may contain tags 137 | * @returns Object with parsed thinking state and display text 138 | */ 139 | private processThinkingTags(text: string): ProcessedThinkingResult { 140 | // Simple case - no thinking tags at all 141 | if (!text.startsWith("")) { 142 | return { 143 | isThinking: false, 144 | displayText: text, 145 | }; 146 | } 147 | 148 | // Check if we have a complete thinking tag 149 | const thinkingMatch = text.match( 150 | /^([\s\S]*?)(<\/think>\s*([\s\S]*))?$/, 151 | ); 152 | 153 | if (!thinkingMatch) { 154 | return { 155 | isThinking: true, 156 | displayText: "", // No display text while in thinking mode 157 | }; 158 | } 159 | 160 | // If we have a closing tag, extract content after it 161 | if (thinkingMatch[2]) { 162 | const afterThinkTag = thinkingMatch[3] || ""; 163 | return { 164 | isThinking: false, 165 | displayText: afterThinkTag, 166 | }; 167 | } 168 | 169 | // Open thinking tag without a closing tag 170 | return { 171 | isThinking: true, 172 | displayText: "", // No display text while in thinking mode 173 | }; 174 | } 175 | 176 | show(position: number): () => void { 177 | const isEndOfLine = this.isPositionAtEndOfLine( 178 | this.editorView.state, 179 | position, 180 | ); 181 | this.positions.set(position, { 182 | isEndOfLine, 183 | widget: new LoaderWidget(), 184 | isThinking: false, 185 | }); 186 | this.updateDecorations(); 187 | return () => this.hide(position); 188 | } 189 | 190 | hide(position: number) { 191 | this.positions.delete(position); 192 | this.updateDecorations(); 193 | } 194 | 195 | showThinking(enabled: boolean, position?: number) { 196 | let updated = false; 197 | 198 | const updatePosition = (data: { 199 | widget: WidgetType; 200 | isThinking: boolean; 201 | }) => { 202 | if (enabled && !data.isThinking) { 203 | data.widget = new ThinkingWidget(); 204 | data.isThinking = true; 205 | updated = true; 206 | } else if (!enabled && data.isThinking) { 207 | data.widget = new LoaderWidget(); 208 | data.isThinking = false; 209 | updated = true; 210 | } 211 | }; 212 | 213 | if (position !== undefined) { 214 | const data = this.positions.get(position); 215 | if (data) updatePosition(data); 216 | } else { 217 | this.positions.forEach(updatePosition); 218 | } 219 | 220 | if (updated) { 221 | this.updateDecorations(); 222 | } 223 | } 224 | 225 | updateContent(text: string, position?: number) { 226 | let updated = false; 227 | const updatePosition = (data: { 228 | widget: WidgetType; 229 | isThinking: boolean; 230 | }) => { 231 | // Don't update content while in thinking mode 232 | if (data.isThinking) return; 233 | 234 | if (data.widget instanceof LoaderWidget) { 235 | data.widget = new ContentWidget(text); 236 | updated = true; 237 | } else if (data.widget instanceof ContentWidget) { 238 | data.widget.updateText(text); 239 | updated = true; 240 | } 241 | }; 242 | 243 | if (position !== undefined) { 244 | const data = this.positions.get(position); 245 | if (data) updatePosition(data); 246 | } else { 247 | this.positions.forEach(updatePosition); 248 | } 249 | 250 | if (updated) { 251 | this.updateDecorations(); 252 | } 253 | } 254 | 255 | update(update: ViewUpdate) { 256 | if (update.docChanged || update.viewportChanged) { 257 | this.updateDecorations(); 258 | } 259 | } 260 | 261 | private updateDecorations() { 262 | const builder = new RangeSetBuilder(); 263 | this.positions.forEach((data, position) => { 264 | builder.add( 265 | position, 266 | position, 267 | Decoration.widget({ 268 | widget: data.widget, 269 | side: data.isEndOfLine ? 1 : -1, 270 | }), 271 | ); 272 | }); 273 | this.decorations = builder.finish(); 274 | this.editorView.requestMeasure(); 275 | } 276 | 277 | private isPositionAtEndOfLine( 278 | state: EditorState, 279 | position: number, 280 | ): boolean { 281 | return position === state.doc.lineAt(position).to; 282 | } 283 | } 284 | 285 | export const spinnerPlugin = ViewPlugin.fromClass(SpinnerPlugin, { 286 | decorations: (v) => v.decorations, 287 | }); 288 | -------------------------------------------------------------------------------- /src/ui/actionPalettePlugin.ts: -------------------------------------------------------------------------------- 1 | import { 2 | EditorSelection, 3 | RangeSetBuilder, 4 | StateEffect, 5 | StateField, 6 | } from "@codemirror/state"; 7 | import { 8 | Decoration, 9 | DecorationSet, 10 | EditorView, 11 | WidgetType, 12 | } from "@codemirror/view"; 13 | import { I18n } from "../i18n"; 14 | import ActionPalette from "./ActionPalette.svelte"; 15 | 16 | export interface ActionPaletteOptions { 17 | onSubmit: ( 18 | text: string, 19 | selectedFiles?: string[], 20 | systemPrompt?: string, 21 | ) => void; 22 | onCancel?: () => void; 23 | placeholder?: string; 24 | /** 25 | * Optional label showing currently selected provider/model 26 | */ 27 | modelLabel?: string; // kept for backward compat mapping, passed to providerLabel 28 | /** 29 | * Function to get available files for selection 30 | */ 31 | getFiles?: () => { path: string; basename: string; extension: string }[]; 32 | /** 33 | * Function to get available providers for selection 34 | */ 35 | getProviders?: () => Promise< 36 | { 37 | id: string; 38 | name: string; 39 | providerName: string; 40 | providerUrl?: string; 41 | }[] 42 | >; 43 | /** 44 | * Function to handle provider change 45 | */ 46 | onProviderChange?: (providerId: string) => Promise; 47 | /** 48 | * Currently selected provider id 49 | */ 50 | providerId?: string; 51 | /** 52 | * Function to get models for current provider 53 | */ 54 | getModels?: (providerId: string) => Promise<{ id: string; name: string }[]>; 55 | /** 56 | * Function to handle model change 57 | */ 58 | onModelChange?: (model: string) => Promise; 59 | /** 60 | * Function to handle creativity change (palette-only) 61 | * Accepts a key: "", "low", "medium", "high" 62 | */ 63 | onCreativityChange?: (creativityKey: string) => Promise | void; 64 | /** 65 | * Function to get available system prompts 66 | */ 67 | getSystemPrompts?: () => { name: string; system: string }[]; 68 | } 69 | 70 | class SvelteActionPaletteWidget extends WidgetType { 71 | private container: HTMLElement | null = null; 72 | private app: ActionPalette | null = null; 73 | 74 | constructor(private options: ActionPaletteOptions) { 75 | super(); 76 | } 77 | 78 | toDOM(view: EditorView): HTMLElement { 79 | this.container = document.createElement("div"); 80 | this.container.addClass("local-gpt-action-palette-container"); 81 | const mountTarget = document.createElement("div"); 82 | this.container.appendChild(mountTarget); 83 | 84 | this.app = new ActionPalette({ 85 | target: mountTarget, 86 | props: { 87 | placeholder: 88 | this.options.placeholder || 89 | I18n.t("commands.actionPalette.placeholder"), 90 | providerLabel: this.options.modelLabel || "", 91 | providerId: this.options.providerId, 92 | getFiles: this.options.getFiles, 93 | getProviders: this.options.getProviders, 94 | onProviderChange: this.options.onProviderChange, 95 | getModels: this.options.getModels, 96 | onModelChange: this.options.onModelChange, 97 | onCreativityChange: this.options.onCreativityChange, 98 | getSystemPrompts: this.options.getSystemPrompts, 99 | }, 100 | }); 101 | 102 | this.app.$on( 103 | "submit", 104 | ( 105 | e: CustomEvent<{ 106 | text: string; 107 | selectedFiles: string[]; 108 | systemPrompt?: string; 109 | }>, 110 | ) => { 111 | this.options.onSubmit?.( 112 | e.detail.text, 113 | e.detail.selectedFiles, 114 | e.detail.systemPrompt, 115 | ); 116 | }, 117 | ); 118 | this.app.$on("cancel", () => { 119 | this.options.onCancel?.(); 120 | }); 121 | 122 | return this.container; 123 | } 124 | 125 | destroy(dom: HTMLElement): void { 126 | this.app?.$destroy(); 127 | this.app = null; 128 | this.container = null; 129 | } 130 | } 131 | 132 | type SelectionRange = { from: number; to: number }; 133 | 134 | const ShowActionPaletteEffect = StateEffect.define<{ 135 | pos: number; 136 | options: ActionPaletteOptions; 137 | fakeSelections: SelectionRange[] | null; 138 | previousSelectionRanges: SelectionRange[] | null; 139 | previousCursor: number | null; 140 | }>(); 141 | 142 | const HideActionPaletteEffect = StateEffect.define(); 143 | 144 | interface SelectionSnapshot { 145 | fakeSelections: SelectionRange[] | null; 146 | previousSelectionRanges: SelectionRange[] | null; 147 | previousCursor: number | null; 148 | } 149 | 150 | interface ActionPaletteState extends SelectionSnapshot { 151 | deco: DecorationSet; 152 | pos: number | null; 153 | } 154 | 155 | function captureSelectionSnapshot(view: EditorView): SelectionSnapshot { 156 | const rangesAll = view.state.selection.ranges.map((r) => ({ 157 | from: r.from, 158 | to: r.to, 159 | })); 160 | const nonEmpty = rangesAll.filter((r) => r.from !== r.to); 161 | return { 162 | fakeSelections: nonEmpty.length ? nonEmpty : null, 163 | previousSelectionRanges: rangesAll.length ? rangesAll : null, 164 | previousCursor: view.state.selection.main.head, 165 | }; 166 | } 167 | 168 | function mapRanges( 169 | ranges: SelectionRange[] | null, 170 | changes: import("@codemirror/state").ChangeDesc, 171 | ): SelectionRange[] | null { 172 | return ranges 173 | ? ranges.map((r) => ({ 174 | from: changes.mapPos(r.from), 175 | to: changes.mapPos(r.to), 176 | })) 177 | : null; 178 | } 179 | 180 | function buildDecorations( 181 | pos: number, 182 | options: ActionPaletteOptions, 183 | fakeSelections: SelectionRange[] | null, 184 | ): DecorationSet { 185 | const builder = new RangeSetBuilder(); 186 | const widget = new SvelteActionPaletteWidget(options); 187 | builder.add(pos, pos, Decoration.widget({ widget, side: -1, block: true })); 188 | if (fakeSelections) { 189 | for (const r of fakeSelections) { 190 | builder.add( 191 | r.from, 192 | r.to, 193 | Decoration.mark({ class: "local-gpt-fake-selection" }), 194 | ); 195 | } 196 | } 197 | return builder.finish(); 198 | } 199 | 200 | const actionPaletteStateField = StateField.define({ 201 | create() { 202 | return { 203 | deco: Decoration.none, 204 | pos: null, 205 | fakeSelections: null, 206 | previousSelectionRanges: null, 207 | previousCursor: null, 208 | }; 209 | }, 210 | update(value, tr) { 211 | let { 212 | deco, 213 | pos, 214 | fakeSelections, 215 | previousSelectionRanges, 216 | previousCursor, 217 | } = value; 218 | 219 | if (tr.docChanged) { 220 | deco = deco.map(tr.changes); 221 | if (pos !== null) pos = tr.changes.mapPos(pos); 222 | fakeSelections = mapRanges(fakeSelections, tr.changes); 223 | previousSelectionRanges = mapRanges( 224 | previousSelectionRanges, 225 | tr.changes, 226 | ); 227 | if (previousCursor !== null) 228 | previousCursor = tr.changes.mapPos(previousCursor); 229 | } 230 | 231 | for (const e of tr.effects) { 232 | if (e.is(ShowActionPaletteEffect)) { 233 | pos = e.value.pos; 234 | fakeSelections = e.value.fakeSelections; 235 | previousSelectionRanges = e.value.previousSelectionRanges; 236 | previousCursor = e.value.previousCursor; 237 | deco = buildDecorations(pos, e.value.options, fakeSelections); 238 | } else if (e.is(HideActionPaletteEffect)) { 239 | pos = null; 240 | fakeSelections = null; 241 | previousSelectionRanges = null; 242 | previousCursor = null; 243 | deco = Decoration.none; 244 | } 245 | } 246 | 247 | return { 248 | deco, 249 | pos, 250 | fakeSelections, 251 | previousSelectionRanges, 252 | previousCursor, 253 | }; 254 | }, 255 | provide: (f) => EditorView.decorations.from(f, (v) => v.deco), 256 | }); 257 | 258 | export const actionPalettePlugin = [actionPaletteStateField]; 259 | 260 | export function showActionPalette( 261 | view: EditorView, 262 | pos: number, 263 | options: ActionPaletteOptions, 264 | ) { 265 | // Capture current selection ranges and cursor before showing 266 | const { fakeSelections, previousSelectionRanges, previousCursor } = 267 | captureSelectionSnapshot(view); 268 | 269 | view.dispatch({ 270 | effects: ShowActionPaletteEffect.of({ 271 | pos, 272 | options, 273 | fakeSelections, 274 | previousSelectionRanges, 275 | previousCursor, 276 | }), 277 | }); 278 | } 279 | 280 | export function hideActionPalette(view: EditorView) { 281 | // Restore previous selection/caret (mapped across edits). Read from field. 282 | const state = view.state.field(actionPaletteStateField, false); 283 | if (state) { 284 | if ( 285 | state.previousSelectionRanges && 286 | state.previousSelectionRanges.length 287 | ) { 288 | const selection = EditorSelection.create( 289 | state.previousSelectionRanges.map((r) => 290 | EditorSelection.range(r.from, r.to), 291 | ), 292 | ); 293 | view.dispatch({ selection }); 294 | view.focus(); 295 | } else if (state.previousCursor !== null) { 296 | view.dispatch({ selection: { anchor: state.previousCursor } }); 297 | view.focus(); 298 | } 299 | } 300 | 301 | view.dispatch({ effects: HideActionPaletteEffect.of(null) }); 302 | } 303 | -------------------------------------------------------------------------------- /src/rag.ts: -------------------------------------------------------------------------------- 1 | import { TFile, Vault, MetadataCache } from "obsidian"; 2 | import { IAIDocument, IAIProvidersRetrievalResult } from "./interfaces"; 3 | import { logger } from "./logger"; 4 | import { extractTextFromPDF } from "./processors/pdf"; 5 | import { fileCache } from "./indexedDB"; 6 | 7 | const MAX_DEPTH = 10; 8 | 9 | export interface ProcessingContext { 10 | vault: Vault; 11 | metadataCache: MetadataCache; 12 | activeFile: TFile; 13 | } 14 | 15 | export async function startProcessing( 16 | linkedFiles: TFile[], 17 | vault: Vault, 18 | metadataCache: MetadataCache, 19 | activeFile: TFile, 20 | updateCompletedSteps?: (steps: number) => void, 21 | ): Promise> { 22 | logger.info("Starting RAG processing"); 23 | const processedDocs = new Map(); 24 | const context: ProcessingContext = { vault, metadataCache, activeFile }; 25 | 26 | await Promise.all( 27 | linkedFiles.map(async (file) => { 28 | await processDocumentForRAG(file, context, processedDocs, 0, false); 29 | updateCompletedSteps?.(1); 30 | }), 31 | ); 32 | 33 | return processedDocs; 34 | } 35 | 36 | export async function getFileContent( 37 | file: TFile, 38 | vault: Vault, 39 | ): Promise { 40 | if (file.extension === "pdf") { 41 | const cachedContent = await fileCache.getContent(file.path); 42 | if (cachedContent?.mtime === file.stat.mtime) { 43 | return cachedContent.content; 44 | } 45 | 46 | const arrayBuffer = await vault.readBinary(file); 47 | const pdfContent = await extractTextFromPDF(arrayBuffer); 48 | await fileCache.setContent(file.path, { 49 | mtime: file.stat.mtime, 50 | content: pdfContent, 51 | }); 52 | return pdfContent; 53 | } 54 | 55 | return vault.cachedRead(file); 56 | } 57 | 58 | export async function processDocumentForRAG( 59 | file: TFile, 60 | context: ProcessingContext, 61 | processedDocs: Map, 62 | depth: number, 63 | isBacklink: boolean, 64 | ): Promise> { 65 | if ( 66 | depth > MAX_DEPTH || 67 | processedDocs.has(file.path) || 68 | file.path === context.activeFile.path 69 | ) { 70 | return processedDocs; 71 | } 72 | 73 | try { 74 | const content = await getFileContent(file, context.vault); 75 | processedDocs.set(file.path, { 76 | content: content, 77 | meta: { 78 | source: file.path, 79 | basename: file.basename, 80 | stat: file.stat, 81 | depth, 82 | isBacklink, 83 | }, 84 | }); 85 | 86 | if (file.extension === "md" && !isBacklink) { 87 | const linkedFiles = getLinkedFiles( 88 | content, 89 | context.vault, 90 | context.metadataCache, 91 | file.path, 92 | ); 93 | const backlinkFiles = getBacklinkFiles( 94 | file, 95 | context, 96 | processedDocs, 97 | ); 98 | 99 | await Promise.all([ 100 | ...linkedFiles.map((linkedFile) => 101 | processDocumentForRAG( 102 | linkedFile, 103 | context, 104 | processedDocs, 105 | depth + 1, 106 | false, 107 | ), 108 | ), 109 | ...backlinkFiles.map((backlinkFile) => 110 | processDocumentForRAG( 111 | backlinkFile, 112 | context, 113 | processedDocs, 114 | depth, 115 | true, 116 | ), 117 | ), 118 | ]); 119 | } 120 | } catch (error) { 121 | console.error(`Error processing document ${file.path}:`, error); 122 | } 123 | 124 | return processedDocs; 125 | } 126 | 127 | export function getLinkedFiles( 128 | content: string, 129 | vault: Vault, 130 | metadataCache: MetadataCache, 131 | currentFilePath: string, 132 | ): TFile[] { 133 | const linkRegex = /\[\[([^\]|#]+)(?:#[^\]|]+)?(?:\|[^\]]+)?\]\]/g; 134 | 135 | return Array.from(content.matchAll(linkRegex), (match) => match[1]) 136 | .map((linkText) => { 137 | const linkPath = metadataCache.getFirstLinkpathDest( 138 | linkText, 139 | currentFilePath, 140 | ); 141 | return linkPath ? vault.getAbstractFileByPath(linkPath.path) : null; 142 | }) 143 | .filter( 144 | (file): file is TFile => 145 | file instanceof TFile && 146 | (file.extension === "md" || file.extension === "pdf"), 147 | ); 148 | } 149 | 150 | export function getBacklinkFiles( 151 | file: TFile, 152 | context: ProcessingContext, 153 | processedDocs: Map, 154 | ): TFile[] { 155 | const resolvedLinks = context.metadataCache.resolvedLinks || {}; 156 | const backlinks: TFile[] = []; 157 | 158 | for (const [sourcePath, links] of Object.entries(resolvedLinks)) { 159 | if (processedDocs.has(sourcePath) || !links?.[file.path]) { 160 | continue; 161 | } 162 | const backlinkFile = context.vault.getAbstractFileByPath( 163 | sourcePath, 164 | ) as TFile | null; 165 | if (backlinkFile?.extension === "md") { 166 | backlinks.push(backlinkFile); 167 | } 168 | } 169 | 170 | return backlinks; 171 | } 172 | 173 | export async function searchDocuments( 174 | query: string, 175 | documents: IAIDocument[], 176 | aiProviders: any, 177 | embeddingProvider: any, 178 | abortController: AbortController, 179 | updateCompletedSteps: (steps: number) => void, 180 | addTotalProgressSteps: (steps: number) => void, 181 | contextLimit: number, 182 | ): Promise { 183 | if (abortController?.signal.aborted) return ""; 184 | 185 | try { 186 | let lastProcessedChunks = 0; 187 | let initialized = false; 188 | 189 | logger.info("Passed contextLimit for context", contextLimit); 190 | 191 | const results = await aiProviders.retrieve({ 192 | query, 193 | documents, 194 | embeddingProvider, 195 | onProgress: (progress: any) => { 196 | if (abortController?.signal.aborted) return; 197 | // Initialize dynamic steps based on total chunks when first progress event arrives 198 | if (!initialized) { 199 | initialized = true; 200 | // Allocate steps for each chunk 201 | addTotalProgressSteps?.(progress.totalChunks || 0); 202 | } 203 | const processed = progress.processedChunks?.length || 0; 204 | if (processed > lastProcessedChunks) { 205 | updateCompletedSteps(processed - lastProcessedChunks); 206 | lastProcessedChunks = processed; 207 | } 208 | }, 209 | abortController, 210 | }); 211 | // Fallback: if no progress events fired but we got results, mimic one step for backward compatibility 212 | if (!initialized && results?.length) { 213 | updateCompletedSteps(1); 214 | } 215 | return formatResults(results, contextLimit); 216 | } catch (error) { 217 | if (!abortController?.signal.aborted) { 218 | console.error("Error in searchDocuments:", error); 219 | } 220 | return ""; 221 | } 222 | } 223 | 224 | function formatResults( 225 | results: IAIProvidersRetrievalResult[], 226 | contextLimit: number, 227 | ): string { 228 | if (!results?.length) return ""; 229 | 230 | const groupedResults = groupResultsByBasename(results); 231 | const sortedGroups = sortResultGroups(groupedResults); 232 | const { text, length } = formatGroupedResults(sortedGroups, contextLimit); 233 | 234 | logger.info("Total length of context", length); 235 | 236 | return text; 237 | } 238 | 239 | function groupResultsByBasename( 240 | results: IAIProvidersRetrievalResult[], 241 | ): Map { 242 | return results.reduce((map, result) => { 243 | const basename = result.document.meta?.basename; 244 | const existing = map.get(basename) || []; 245 | existing.push(result); 246 | map.set(basename, existing); 247 | return map; 248 | }, new Map()); 249 | } 250 | 251 | function sortResultGroups( 252 | groupedResults: Map, 253 | ): Array<[string, IAIProvidersRetrievalResult[]]> { 254 | return Array.from(groupedResults.entries()).sort( 255 | (a, b) => 256 | (b[1][0]?.document.meta?.stat?.ctime || 0) - 257 | (a[1][0]?.document.meta?.stat?.ctime || 0), 258 | ); 259 | } 260 | 261 | function formatGroupedResults( 262 | groups: Array<[string, IAIProvidersRetrievalResult[]]>, 263 | contextLimit: number, 264 | ): { text: string; length: number } { 265 | let formattedResults = ""; 266 | let totalLength = 0; 267 | 268 | for (const [basename, groupResults] of groups) { 269 | if (totalLength >= contextLimit) break; 270 | 271 | formattedResults += `[[${basename}]]\n`; 272 | const { text, length } = formatSingleGroup( 273 | groupResults, 274 | contextLimit, 275 | totalLength, 276 | ); 277 | formattedResults += text; 278 | totalLength += length; 279 | } 280 | 281 | const trimmed = formattedResults.trim(); 282 | return { text: trimmed, length: trimmed.length }; 283 | } 284 | 285 | function formatSingleGroup( 286 | groupResults: IAIProvidersRetrievalResult[], 287 | contextLimit: number, 288 | currentLength: number, 289 | ): { text: string; length: number } { 290 | let groupText = ""; 291 | let addedLength = 0; 292 | const sortedResults = [...groupResults].sort((a, b) => b.score - a.score); 293 | 294 | for (const result of sortedResults) { 295 | const content = result.content.trim(); 296 | const projectedLength = 297 | currentLength + addedLength + content.length + 2; 298 | if (!content || projectedLength >= contextLimit) { 299 | continue; 300 | } 301 | groupText += `${content}\n\n`; 302 | addedLength += content.length + 2; 303 | } 304 | 305 | return { text: groupText, length: addedLength }; 306 | } 307 | -------------------------------------------------------------------------------- /styles.css: -------------------------------------------------------------------------------- 1 | @import "@obsidian-ai-providers/sdk/styles.css"; 2 | .local-gpt-settings-separator { 3 | margin: 1em 0; 4 | height: 1em; 5 | } 6 | 7 | /* Advanced settings styling (match AI Providers look-and-feel) */ 8 | .local-gpt-advanced-toggle { 9 | margin-top: 1.5em; 10 | border: 0; 11 | } 12 | 13 | .local-gpt-advanced-group { 14 | margin-top: 0.75em; 15 | padding: 1em; 16 | border: 1px solid var(--background-modifier-border); 17 | border-radius: var(--radius-m); 18 | } 19 | 20 | .local-gpt-advanced-group > h4 { 21 | margin: 0 0 0.75em; 22 | font-weight: 700; 23 | } 24 | 25 | .local-gpt-advanced-group .setting-item { 26 | border-top: 1px solid var(--background-modifier-border); 27 | margin-top: 0.5em; 28 | padding-top: 0.5em; 29 | } 30 | 31 | .local-gpt-content { 32 | color: var(--text-faint); 33 | display: block; 34 | white-space: pre-wrap; 35 | word-break: break-word; 36 | } 37 | 38 | .local-gpt-content > span:last-child { 39 | display: inline; 40 | background-image: linear-gradient( 41 | to right, 42 | var(--text-faint) 0%, 43 | var(--interactive-accent) 100% 44 | ); 45 | background-clip: text; 46 | -webkit-text-fill-color: transparent; 47 | box-decoration-break: slice; 48 | } 49 | 50 | .local-gpt-loading { 51 | display: inline-block; 52 | overflow: hidden; 53 | height: 1.3em; 54 | line-height: 1.5em; 55 | vertical-align: text-bottom; 56 | margin: -0.3em 0.3em 0; 57 | } 58 | 59 | .local-gpt-thinking-container { 60 | display: block; 61 | opacity: 0; 62 | animation: local-gpt-fadeIn 0.1s ease-out forwards; 63 | } 64 | 65 | @keyframes local-gpt-fadeIn { 66 | from { 67 | opacity: 0; 68 | } 69 | to { 70 | opacity: 1; 71 | } 72 | } 73 | 74 | .local-gpt-thinking { 75 | position: relative; 76 | display: inline-block; 77 | color: var(--interactive-accent); 78 | font-style: italic; 79 | animation: local-gpt-pulse 1.7s ease-in-out infinite; 80 | } 81 | 82 | .local-gpt-thinking::before { 83 | content: attr(data-text); 84 | position: absolute; 85 | top: 0; 86 | left: 0; 87 | right: 0; 88 | bottom: 0; 89 | color: var(--interactive-accent-hover); 90 | display: inline-block; 91 | white-space: nowrap; 92 | filter: blur(2px); 93 | mask-image: linear-gradient( 94 | to right, 95 | transparent 30%, 96 | black 50%, 97 | transparent 70% 98 | ); 99 | mask-size: 200% 100%; 100 | mask-position: -50% 0; 101 | animation: local-gpt-maskMove 1.7s linear infinite reverse; 102 | mix-blend-mode: color-dodge; 103 | } 104 | 105 | .local-gpt-thinking::after { 106 | content: ""; 107 | display: inline-block; 108 | width: 1em; 109 | text-align: left; 110 | animation: local-gpt-dots 1.7s infinite; 111 | opacity: 1; 112 | } 113 | 114 | /* Inline input container in editor */ 115 | .cm-editor .local-gpt-action-palette-container { 116 | position: relative; 117 | display: block; 118 | /* Ensure palette ignores any surrounding list/text indentation */ 119 | padding: 0; 120 | text-indent: 0; 121 | } 122 | 123 | /* Override Obsidian CM6 container resets for our block widget */ 124 | .markdown-source-view.mod-cm6 125 | .cm-content 126 | > .local-gpt-action-palette-container[contenteditable="false"] { 127 | /* Allow elements to paint outside container (badge, shadows) */ 128 | contain: none !important; 129 | overflow: visible; 130 | /* Restore margins wiped by .cm-content > * { margin: 0 !important } */ 131 | margin: 0.3rem -10px 1.5rem !important; 132 | } 133 | 134 | .cm-editor .local-gpt-action-palette-shell { 135 | display: inline-flex; 136 | width: 100%; 137 | padding: var(--size-2-1) 0; 138 | position: relative; 139 | z-index: var(--layer-popover); 140 | } 141 | 142 | .cm-editor .local-gpt-action-palette { 143 | width: 100%; 144 | box-sizing: border-box; 145 | padding: var(--size-4-2) var(--size-4-2); 146 | border-radius: var(--radius-m); 147 | border: 2px solid var(--background-modifier-border); 148 | background: var(--background-primary); 149 | color: var(--text-normal); 150 | font: inherit; 151 | outline: none; 152 | box-shadow: var(--shadow-s); 153 | transition: 154 | border-color var(--anim-duration-moderate) ease, 155 | box-shadow var(--anim-duration-moderate) ease; 156 | min-height: var(--line-height-normal); 157 | line-height: var(--line-height-normal); 158 | z-index: var(--layer-popover); 159 | } 160 | 161 | /* Contenteditable placeholder */ 162 | .cm-editor .local-gpt-action-palette[contenteditable="true"]:empty::before { 163 | content: attr(data-placeholder); 164 | color: var(--text-faint); 165 | pointer-events: none; 166 | } 167 | 168 | /* File mention styling */ 169 | .cm-editor .local-gpt-action-palette .file-mention { 170 | background: var(--interactive-accent); 171 | color: var(--text-on-accent); 172 | padding: var(--size-2-1) var(--size-4-2); 173 | border-radius: var(--radius-s); 174 | cursor: pointer; 175 | user-select: none; 176 | transition: background-color var(--anim-duration-fast) ease; 177 | margin: 0 var(--size-2-1); 178 | display: inline-block; 179 | } 180 | 181 | .cm-editor .local-gpt-action-palette .file-mention:hover { 182 | background: var(--interactive-accent-hover); 183 | } 184 | 185 | .cm-editor .local-gpt-action-palette .file-mention:active { 186 | background: var(--interactive-accent-hover); 187 | transform: translateY(var(--size-2-1)); 188 | } 189 | 190 | /* Generic dropdown container/items (shared by file/command/provider) */ 191 | .local-gpt-dropdown { 192 | position: absolute; 193 | top: 100%; 194 | left: 0; 195 | right: 0; 196 | margin-top: calc(var(--line-height-normal) * -1em); 197 | padding-top: calc(var(--line-height-normal) * 1em); 198 | background: var(--background-primary); 199 | border: var(--border-width) solid var(--background-modifier-border); 200 | border-radius: 0 0 var(--radius-l) var(--radius-l); 201 | box-shadow: var(--shadow-l); 202 | max-height: 240px; 203 | overflow-y: auto; 204 | scroll-behavior: smooth; 205 | z-index: 1; 206 | } 207 | 208 | .local-gpt-dropdown-item { 209 | display: grid; 210 | padding: var(--size-4-2) var(--size-4-3); 211 | cursor: pointer; 212 | border-bottom: var(--border-width) solid 213 | var(--background-modifier-border-hover); 214 | transition: background-color var(--anim-duration-fast) ease; 215 | font-size: var(--font-ui-small); 216 | line-height: var(--line-height-tight); 217 | } 218 | 219 | .local-gpt-dropdown-item:last-child { 220 | border-bottom: none; 221 | } 222 | 223 | .local-gpt-dropdown-item:hover, 224 | .local-gpt-dropdown-item.local-gpt-selected { 225 | background: var(--background-modifier-hover); 226 | } 227 | 228 | .local-gpt-dropdown-item.local-gpt-selected { 229 | background: var(--interactive-accent); 230 | color: var(--text-on-accent); 231 | } 232 | 233 | .local-gpt-file-name { 234 | display: block; 235 | font-weight: var(--font-ui-medium); 236 | margin-bottom: var(--size-2-1); 237 | } 238 | 239 | .local-gpt-file-path { 240 | display: block; 241 | font-size: var(--font-ui-smaller); 242 | color: var(--text-faint); 243 | white-space: nowrap; 244 | overflow: hidden; 245 | text-overflow: ellipsis; 246 | } 247 | 248 | .local-gpt-dropdown-item.local-gpt-selected .local-gpt-file-path { 249 | color: var(--text-on-accent); 250 | opacity: 0.8; 251 | } 252 | 253 | .local-gpt-command-name { 254 | display: block; 255 | font-weight: var(--font-ui-medium); 256 | margin-bottom: var(--size-2-1); 257 | } 258 | 259 | .local-gpt-command-description { 260 | display: block; 261 | font-size: var(--font-ui-smaller); 262 | color: var(--text-faint); 263 | white-space: nowrap; 264 | overflow: hidden; 265 | text-overflow: ellipsis; 266 | } 267 | 268 | .local-gpt-dropdown-item.local-gpt-selected .local-gpt-command-description { 269 | color: var(--text-on-accent); 270 | opacity: 0.8; 271 | } 272 | 273 | /* Command mention styling */ 274 | .cm-editor .local-gpt-action-palette .command-mention { 275 | background: var(--color-accent); 276 | color: var(--text-on-accent); 277 | padding: var(--size-2-1) var(--size-4-2); 278 | border-radius: var(--radius-s); 279 | cursor: pointer; 280 | user-select: none; 281 | transition: background-color var(--anim-duration-fast) ease; 282 | margin: 0 var(--size-2-1); 283 | display: inline-block; 284 | } 285 | 286 | .cm-editor .local-gpt-action-palette .command-mention:hover { 287 | background: var(--color-accent-hover); 288 | } 289 | 290 | .cm-editor .local-gpt-action-palette .command-mention:active { 291 | background: var(--color-accent-hover); 292 | transform: translateY(var(--size-2-1)); 293 | } 294 | 295 | .cm-editor .local-gpt-action-palette:focus { 296 | border-color: var(--interactive-accent); 297 | box-shadow: 298 | 0 0 0 2px var(--interactive-accent), 299 | var(--shadow-s); 300 | } 301 | 302 | /* Fake selection while inline input is open - match native selection color */ 303 | .cm-editor .local-gpt-fake-selection { 304 | /* Visually extend selection beyond line box without affecting layout */ 305 | box-shadow: 306 | inset 0 0 0 9999px var(--text-selection), 307 | 0 0 0 calc((var(--line-height-normal) - 1) / 4 * 1em) 308 | var(--text-selection); 309 | 310 | -webkit-box-decoration-break: clone; 311 | box-decoration-break: clone; 312 | } 313 | 314 | @keyframes local-gpt-dots { 315 | 0% { 316 | content: ""; 317 | opacity: 1; 318 | } 319 | 59% { 320 | content: ""; 321 | opacity: 1; 322 | } 323 | 60% { 324 | content: "."; 325 | opacity: 1; 326 | } 327 | 70% { 328 | content: ".."; 329 | opacity: 1; 330 | } 331 | 80% { 332 | content: "..."; 333 | opacity: 1; 334 | } 335 | 100% { 336 | opacity: 0.5; 337 | content: "..."; 338 | } 339 | } 340 | 341 | @keyframes local-gpt-pulse { 342 | 0% { 343 | opacity: 0.8; 344 | } 345 | 50% { 346 | opacity: 1; 347 | } 348 | 100% { 349 | opacity: 0.8; 350 | } 351 | } 352 | 353 | .local-gpt-loading::after { 354 | display: inline-table; 355 | white-space: pre; 356 | text-align: left; 357 | } 358 | 359 | .local-gpt-loading.local-gpt-dots::after { 360 | content: "⠋\A⠙\A⠹\A⠸\A⠼\A⠴\A⠦\A⠧\A⠇\A⠏"; 361 | animation: local-gpt-spin10 1s steps(10) infinite; 362 | } 363 | 364 | .local-gpt-status { 365 | position: relative; 366 | } 367 | 368 | .local-gpt-status::before { 369 | content: attr(data-text); 370 | background-image: linear-gradient( 371 | to right, 372 | var(--status-bar-text-color) 30%, 373 | var(--interactive-accent-hover) 50%, 374 | var(--status-bar-text-color) 70% 375 | ); 376 | background-clip: text; 377 | -webkit-text-fill-color: transparent; 378 | box-decoration-break: slice; 379 | 380 | display: inline-block; 381 | overflow: hidden; 382 | white-space: nowrap; 383 | animation: 384 | local-gpt-expandText 0.3s ease-out forwards, 385 | local-gpt-gradientMove 1.7s linear infinite; 386 | background-size: 200% 100%; 387 | background-position: -50% 0; 388 | } 389 | 390 | .local-gpt-status::after { 391 | content: attr(data-text); 392 | position: absolute; 393 | padding: 10px; 394 | margin-left: -10px; 395 | color: var(--interactive-accent-hover); 396 | display: inline-block; 397 | white-space: nowrap; 398 | filter: blur(1px); 399 | mask-image: linear-gradient( 400 | to right, 401 | transparent 30%, 402 | black 50%, 403 | transparent 70% 404 | ); 405 | mask-size: 200% 100%; 406 | mask-position: -50% 0; 407 | animation: local-gpt-maskMove 1.7s linear infinite; 408 | mix-blend-mode: color-dodge; 409 | } 410 | 411 | @keyframes local-gpt-spin10 { 412 | to { 413 | transform: translateY(-15em); 414 | } 415 | } 416 | 417 | @keyframes local-gpt-expandText { 418 | from { 419 | max-width: 0; 420 | } 421 | to { 422 | max-width: 100%; 423 | } 424 | } 425 | 426 | @keyframes local-gpt-gradientMove { 427 | 0% { 428 | background-position: -50% 0; 429 | } 430 | 100% { 431 | background-position: 150% 0; 432 | } 433 | } 434 | 435 | @keyframes local-gpt-maskMove { 436 | 0% { 437 | mask-position: -50% 0; 438 | } 439 | 100% { 440 | mask-position: 150% 0; 441 | } 442 | } 443 | 444 | /* Provider dropdown: removed — use .local-gpt-dropdown */ 445 | 446 | .local-gpt-provider-name { 447 | display: block; 448 | font-weight: var(--font-ui-medium); 449 | margin-bottom: var(--size-2-1); 450 | } 451 | 452 | .local-gpt-provider-url { 453 | display: block; 454 | font-size: var(--font-ui-smaller); 455 | color: var(--text-faint); 456 | white-space: nowrap; 457 | max-width: 50%; 458 | overflow: hidden; 459 | text-overflow: ellipsis; 460 | } 461 | 462 | .local-gpt-dropdown-item.local-gpt-selected .local-gpt-provider-url { 463 | color: var(--text-on-accent); 464 | opacity: 0.8; 465 | } 466 | 467 | .cm-editor .local-gpt-provider-badge { 468 | position: absolute; 469 | left: 0; 470 | right: 0; 471 | bottom: calc(var(--line-height-normal) * -1em); 472 | padding: var(--size-2-1) var(--size-4-2); 473 | border-radius: var(--radius-m); 474 | color: var(--text-muted); 475 | background: var(--background-secondary); 476 | border: var(--border-width) solid var(--background-modifier-border); 477 | font-size: var(--font-ui-medium); 478 | font-weight: var(--font-light); 479 | pointer-events: none; 480 | z-index: 0; 481 | display: grid; 482 | align-items: end; 483 | justify-items: end; 484 | height: 3em; 485 | } 486 | 487 | .cm-editor .local-gpt-provider-badge-label { 488 | justify-self: end; 489 | align-self: end; 490 | font-size: var(--font-smallest); 491 | line-height: var(--line-height-normal); 492 | color: var(--text-faint); 493 | text-shadow: 494 | var(--background-primary) 0 0 1px, 495 | var(--background-primary) 0 0 1px; 496 | transition: 497 | color 250ms ease-in-out, 498 | text-shadow 250ms ease-in-out; 499 | transition-delay: 0s; 500 | } 501 | 502 | .cm-editor .local-gpt-provider-badge-label.local-gpt-badge-highlight { 503 | color: var(--text-accent); 504 | text-shadow: 505 | 0 0 1px var(--background-primary), 506 | 0 0 2px var(--background-primary), 507 | 0 0 4px var(--color-accent-2); 508 | transition-delay: 0.5s; 509 | } 510 | 511 | .local-gpt-provider-header { 512 | display: flex; 513 | align-items: baseline; 514 | justify-content: space-between; 515 | gap: var(--size-2-2); 516 | margin-bottom: var(--size-2-1); 517 | } 518 | 519 | .local-gpt-provider-model { 520 | display: block; 521 | font-size: var(--font-ui-smaller); 522 | color: var(--text-faint); 523 | white-space: nowrap; 524 | overflow: hidden; 525 | text-overflow: ellipsis; 526 | } 527 | 528 | .local-gpt-dropdown-item.local-gpt-selected .local-gpt-provider-model { 529 | color: var(--text-on-accent); 530 | opacity: 0.9; 531 | } 532 | 533 | /* Actions list drag handle and SortableJS states */ 534 | .local-gpt-action-row { 535 | display: flex; 536 | align-items: center; 537 | cursor: grab; 538 | } 539 | 540 | .local-gpt-drag-handle { 541 | display: flex; 542 | align-items: center; 543 | margin-inline-end: var(--size-4-2); 544 | color: var(--text-faint); 545 | } 546 | 547 | .local-gpt-drag-handle:hover { 548 | color: var(--text-muted); 549 | } 550 | 551 | .local-gpt-sortable-chosen, 552 | .local-gpt-sortable-drag { 553 | background: var(--background-primary); 554 | border-radius: var(--radius-m); 555 | cursor: grabbing; 556 | } 557 | 558 | .local-gpt-sortable-ghost { 559 | opacity: 0; 560 | } 561 | 562 | /* Settings list: ensure first item keeps top padding while dragging */ 563 | .local-gpt-actions-container .setting-item:first-child { 564 | padding-top: var(--size-4-2); 565 | } 566 | 567 | /* Override system spacing for first child inside setting-item when it is our drag handle */ 568 | .setting-item > .local-gpt-drag-handle { 569 | margin-inline-end: var(--size-4-2); 570 | } 571 | 572 | /* Drop animation: subtle squish with a soft hover-tinted highlight */ 573 | .local-gpt-drop-animate { 574 | position: relative; 575 | transform-origin: center; 576 | animation: local-gpt-goo-in var(--anim-duration-moderate) ease-out both; 577 | will-change: transform; 578 | } 579 | 580 | .local-gpt-drop-animate::before { 581 | content: ""; 582 | position: absolute; 583 | inset: 0; 584 | border-radius: inherit; 585 | background: var(--background-modifier-hover); 586 | opacity: 0; 587 | pointer-events: none; 588 | animation: local-gpt-goo-glow var(--anim-duration-moderate) ease-out both; 589 | } 590 | 591 | @keyframes local-gpt-goo-in { 592 | 0% { 593 | transform: translateY(-6px) scaleY(0.96); 594 | } 595 | 35% { 596 | transform: translateY(2px) scaleY(1.04); 597 | } 598 | 60% { 599 | transform: translateY(-2px) scaleY(0.99); 600 | } 601 | 100% { 602 | transform: translateY(0) scaleY(1); 603 | } 604 | } 605 | 606 | /* While dragging: look slightly larger and pressed-in, without distorting horizontally */ 607 | .local-gpt-sortable-drag { 608 | position: relative; 609 | transform-origin: center; 610 | box-shadow: var(--shadow-m); 611 | z-index: var(--layer-popover); 612 | will-change: transform; 613 | } 614 | 615 | .local-gpt-sortable-drag::before { 616 | content: ""; 617 | position: absolute; 618 | inset: 0; 619 | border-radius: inherit; 620 | background: var(--background-modifier-hover); 621 | opacity: 0.25; 622 | pointer-events: none; 623 | } 624 | 625 | /* Neighbor nudges (pure transforms: no layout shift) */ 626 | .local-gpt-drop-neighbor-prev { 627 | animation: local-gpt-goo-nudge-prev var(--anim-duration-fast) ease-out both; 628 | will-change: transform; 629 | } 630 | 631 | .local-gpt-drop-neighbor-next { 632 | animation: local-gpt-goo-nudge-next var(--anim-duration-fast) ease-out both; 633 | will-change: transform; 634 | } 635 | 636 | @keyframes local-gpt-goo-nudge-prev { 637 | 0% { 638 | transform: translateY(0); 639 | } 640 | 30% { 641 | transform: translateY(-3px); 642 | } 643 | 100% { 644 | transform: translateY(0); 645 | } 646 | } 647 | 648 | @keyframes local-gpt-goo-nudge-next { 649 | 0% { 650 | transform: translateY(0); 651 | } 652 | 30% { 653 | transform: translateY(3px); 654 | } 655 | 100% { 656 | transform: translateY(0); 657 | } 658 | } 659 | 660 | @keyframes local-gpt-goo-glow { 661 | 0% { 662 | opacity: 0.35; 663 | } 664 | 100% { 665 | opacity: 0; 666 | } 667 | } 668 | -------------------------------------------------------------------------------- /src/LocalGPTSettingTab.ts: -------------------------------------------------------------------------------- 1 | import { App, Notice, PluginSettingTab, Setting, setIcon } from "obsidian"; 2 | import { DEFAULT_SETTINGS } from "defaultSettings"; 3 | import LocalGPT from "./main"; 4 | import { LocalGPTAction } from "./interfaces"; 5 | import { waitForAI } from "@obsidian-ai-providers/sdk"; 6 | import { I18n } from "./i18n"; 7 | import Sortable from "sortablejs"; 8 | 9 | const SEPARATOR = "✂️"; 10 | 11 | function escapeTitle(title?: string) { 12 | if (!title) { 13 | return ""; 14 | } 15 | 16 | return title 17 | .replace(/&/g, "&") 18 | .replace(//g, ">") 20 | .replace(/"/g, """) 21 | .replace(/'/g, "'"); 22 | } 23 | 24 | export class LocalGPTSettingTab extends PluginSettingTab { 25 | plugin: LocalGPT; 26 | editEnabled = false; 27 | editExistingAction?: LocalGPTAction; 28 | modelsOptions: Record = {}; 29 | changingOrder = false; 30 | // Controls visibility of the Advanced settings section 31 | private isAdvancedMode = false; 32 | // Guard to require a second click before destructive reset 33 | private isConfirmingReset = false; 34 | 35 | constructor(app: App, plugin: LocalGPT) { 36 | super(app, plugin); 37 | this.plugin = plugin; 38 | } 39 | 40 | async display(): Promise { 41 | const { containerEl } = this; 42 | 43 | containerEl.empty(); 44 | 45 | try { 46 | const aiProvidersWaiter = await waitForAI(); 47 | const aiProvidersResponse = await aiProvidersWaiter.promise; 48 | 49 | const providers = aiProvidersResponse.providers.reduce( 50 | ( 51 | acc: Record, 52 | provider: { id: string; name: string; model?: string }, 53 | ) => ({ 54 | ...acc, 55 | [provider.id]: provider.model 56 | ? [provider.name, provider.model].join(" ~ ") 57 | : provider.name, 58 | }), 59 | { 60 | "": "", 61 | }, 62 | ); 63 | 64 | new Setting(containerEl) 65 | .setHeading() 66 | .setName(I18n.t("settings.mainProvider")) 67 | .setClass("ai-providers-select") 68 | .addDropdown((dropdown) => 69 | dropdown 70 | .addOptions(providers) 71 | .setValue(String(this.plugin.settings.aiProviders.main)) 72 | .onChange(async (value) => { 73 | this.plugin.settings.aiProviders.main = value; 74 | // Also update Action Palette override to follow new default 75 | this.plugin.actionPaletteProviderId = value; 76 | await this.plugin.saveSettings(); 77 | await this.display(); 78 | }), 79 | ); 80 | 81 | new Setting(containerEl) 82 | .setName(I18n.t("settings.embeddingProvider")) 83 | .setDesc(I18n.t("settings.embeddingProviderDesc")) 84 | .setClass("ai-providers-select") 85 | .addDropdown((dropdown) => 86 | dropdown 87 | .addOptions(providers) 88 | .setValue( 89 | String(this.plugin.settings.aiProviders.embedding), 90 | ) 91 | .onChange(async (value) => { 92 | this.plugin.settings.aiProviders.embedding = value; 93 | await this.plugin.saveSettings(); 94 | await this.display(); 95 | }), 96 | ); 97 | 98 | new Setting(containerEl) 99 | .setName(I18n.t("settings.visionProvider")) 100 | .setClass("ai-providers-select") 101 | .setDesc(I18n.t("settings.visionProviderDesc")) 102 | .addDropdown((dropdown) => 103 | dropdown 104 | .addOptions(providers) 105 | .setValue( 106 | String(this.plugin.settings.aiProviders.vision), 107 | ) 108 | .onChange(async (value) => { 109 | this.plugin.settings.aiProviders.vision = value; 110 | await this.plugin.saveSettings(); 111 | await this.display(); 112 | }), 113 | ); 114 | 115 | new Setting(containerEl) 116 | .setName(I18n.t("settings.creativity")) 117 | .setDesc("") 118 | .addDropdown((dropdown) => { 119 | dropdown 120 | .addOption("", I18n.t("settings.creativityNone")) 121 | .addOptions({ 122 | low: I18n.t("settings.creativityLow"), 123 | medium: I18n.t("settings.creativityMedium"), 124 | high: I18n.t("settings.creativityHigh"), 125 | }) 126 | .setValue( 127 | String(this.plugin.settings.defaults.creativity) || 128 | "", 129 | ) 130 | .onChange(async (value) => { 131 | this.plugin.settings.defaults.creativity = value; 132 | await this.plugin.saveSettings(); 133 | await this.display(); 134 | }); 135 | }); 136 | } catch (error) { 137 | console.error(error); 138 | } 139 | 140 | const editingAction: LocalGPTAction = this.editExistingAction || { 141 | name: "", 142 | prompt: "", 143 | temperature: undefined, 144 | system: "", 145 | replace: false, 146 | }; 147 | 148 | const sharingActionsMapping = { 149 | name: "Name: ", 150 | system: "System: ", 151 | prompt: "Prompt: ", 152 | replace: "Replace: ", 153 | model: "Model: ", 154 | }; 155 | 156 | containerEl.createEl("div", { cls: "local-gpt-settings-separator" }); 157 | 158 | containerEl.createEl("h3", { text: I18n.t("settings.actions") }); 159 | 160 | if (!this.editEnabled) { 161 | const quickAdd = new Setting(containerEl) 162 | .setName(I18n.t("settings.quickAdd")) 163 | .setDesc("") 164 | .addText((text) => { 165 | text.inputEl.style.minWidth = "100%"; 166 | text.setPlaceholder(I18n.t("settings.quickAddPlaceholder")); 167 | text.onChange(async (value) => { 168 | const quickAddAction: LocalGPTAction = value 169 | .split(SEPARATOR) 170 | .map((part) => part.trim()) 171 | .reduce((acc, part) => { 172 | const foundMatchKey = Object.keys( 173 | sharingActionsMapping, 174 | ).find((key) => { 175 | return part.startsWith( 176 | sharingActionsMapping[ 177 | key as keyof typeof sharingActionsMapping 178 | ], 179 | ); 180 | }); 181 | 182 | if (foundMatchKey) { 183 | // @ts-ignore 184 | acc[foundMatchKey] = part.substring( 185 | sharingActionsMapping[ 186 | foundMatchKey as keyof typeof sharingActionsMapping 187 | ].length, 188 | part.length, 189 | ); 190 | } 191 | 192 | return acc; 193 | }, {} as LocalGPTAction); 194 | 195 | if (quickAddAction.name) { 196 | await this.addNewAction(quickAddAction); 197 | text.setValue(""); 198 | this.display(); 199 | } 200 | }); 201 | }); 202 | 203 | quickAdd.descEl.innerHTML = I18n.t("settings.quickAddDesc"); 204 | 205 | new Setting(containerEl) 206 | .setName(I18n.t("settings.addNewManually")) 207 | .addButton((button) => 208 | button.setIcon("plus").onClick(async () => { 209 | this.editEnabled = true; 210 | this.editExistingAction = undefined; 211 | this.display(); 212 | }), 213 | ); 214 | } else { 215 | new Setting(containerEl) 216 | .setName(I18n.t("settings.actionName")) 217 | .addText((text) => { 218 | editingAction?.name && text.setValue(editingAction.name); 219 | text.inputEl.style.minWidth = "100%"; 220 | text.setPlaceholder( 221 | I18n.t("settings.actionNamePlaceholder"), 222 | ); 223 | text.onChange(async (value) => { 224 | editingAction.name = value; 225 | }); 226 | }); 227 | 228 | new Setting(containerEl) 229 | .setName(I18n.t("settings.systemPrompt")) 230 | .setDesc(I18n.t("settings.systemPromptDesc")) 231 | .addTextArea((text) => { 232 | editingAction?.system && 233 | text.setValue(editingAction.system); 234 | text.inputEl.style.minWidth = "100%"; 235 | text.inputEl.style.minHeight = "6em"; 236 | text.inputEl.style.resize = "vertical"; 237 | text.setPlaceholder( 238 | I18n.t("settings.systemPromptPlaceholder"), 239 | ); 240 | text.onChange(async (value) => { 241 | editingAction.system = value; 242 | }); 243 | }); 244 | 245 | const promptSetting = new Setting(containerEl) 246 | .setName(I18n.t("settings.prompt")) 247 | .setDesc("") 248 | .addTextArea((text) => { 249 | editingAction?.prompt && 250 | text.setValue(editingAction.prompt); 251 | text.inputEl.style.minWidth = "100%"; 252 | text.inputEl.style.minHeight = "6em"; 253 | text.inputEl.style.resize = "vertical"; 254 | text.setPlaceholder(""); 255 | text.onChange(async (value) => { 256 | editingAction.prompt = value; 257 | }); 258 | }); 259 | 260 | promptSetting.descEl.innerHTML = I18n.t("settings.promptDesc"); 261 | 262 | new Setting(containerEl) 263 | .setName(I18n.t("settings.replaceSelected")) 264 | .setDesc(I18n.t("settings.replaceSelectedDesc")) 265 | .addToggle((component) => { 266 | editingAction?.replace && 267 | component.setValue(editingAction.replace); 268 | component.onChange(async (value) => { 269 | editingAction.replace = value; 270 | }); 271 | }); 272 | 273 | const actionButtonsRow = new Setting(containerEl).setName(""); 274 | 275 | if (this.editExistingAction) { 276 | actionButtonsRow.addButton((button) => { 277 | button.buttonEl.style.marginRight = "2em"; 278 | button 279 | .setButtonText(I18n.t("settings.remove")) 280 | .onClick(async () => { 281 | if (!button.buttonEl.hasClass("mod-warning")) { 282 | button.setClass("mod-warning"); 283 | return; 284 | } 285 | 286 | this.plugin.settings.actions = 287 | this.plugin.settings.actions.filter( 288 | (innerAction) => 289 | innerAction !== editingAction, 290 | ); 291 | await this.plugin.saveSettings(); 292 | this.editExistingAction = undefined; 293 | this.editEnabled = false; 294 | this.display(); 295 | }); 296 | }); 297 | } 298 | 299 | actionButtonsRow 300 | .addButton((button) => { 301 | button 302 | .setButtonText(I18n.t("settings.close")) 303 | .onClick(async () => { 304 | this.editEnabled = false; 305 | this.editExistingAction = undefined; 306 | this.display(); 307 | }); 308 | }) 309 | .addButton((button) => 310 | button 311 | .setCta() 312 | .setButtonText(I18n.t("settings.save")) 313 | .onClick(async () => { 314 | if (!editingAction.name) { 315 | new Notice( 316 | I18n.t("notices.actionNameRequired"), 317 | ); 318 | return; 319 | } 320 | 321 | if (!this.editExistingAction) { 322 | if ( 323 | this.plugin.settings.actions.find( 324 | (action) => 325 | action.name === editingAction.name, 326 | ) 327 | ) { 328 | new Notice( 329 | I18n.t("notices.actionNameExists", { 330 | name: editingAction.name, 331 | }), 332 | ); 333 | return; 334 | } 335 | 336 | await this.addNewAction(editingAction); 337 | } else { 338 | if ( 339 | this.plugin.settings.actions.filter( 340 | (action) => 341 | action.name === editingAction.name, 342 | ).length > 1 343 | ) { 344 | new Notice( 345 | I18n.t("notices.actionNameExists", { 346 | name: editingAction.name, 347 | }), 348 | ); 349 | return; 350 | } 351 | 352 | const index = 353 | this.plugin.settings.actions.findIndex( 354 | (innerAction) => 355 | innerAction === editingAction, 356 | ); 357 | 358 | this.plugin.settings.actions[index] = 359 | editingAction; 360 | } 361 | 362 | await this.plugin.saveSettings(); 363 | 364 | this.editEnabled = false; 365 | this.editExistingAction = undefined; 366 | this.display(); 367 | }), 368 | ); 369 | } 370 | 371 | containerEl.createEl("h4", { text: I18n.t("settings.actionsList") }); 372 | 373 | const actionsContainer = containerEl.createDiv( 374 | "local-gpt-actions-container", 375 | ); 376 | 377 | this.plugin.settings.actions.forEach((action, actionIndex) => { 378 | const sharingString = [ 379 | action.name && `${sharingActionsMapping.name}${action.name}`, 380 | action.system && 381 | `${sharingActionsMapping.system}${action.system}`, 382 | action.prompt && 383 | `${sharingActionsMapping.prompt}${action.prompt}`, 384 | action.replace && 385 | `${sharingActionsMapping.replace}${action.replace}`, 386 | ] 387 | .filter(Boolean) 388 | .join(` ${SEPARATOR}\n`); 389 | 390 | if (!this.changingOrder) { 391 | const actionRow = new Setting(actionsContainer) 392 | .setName(action.name) 393 | .setDesc("") 394 | .addButton((button) => 395 | button.setIcon("copy").onClick(async () => { 396 | navigator.clipboard.writeText(sharingString); 397 | new Notice(I18n.t("notices.copied")); 398 | }), 399 | ) 400 | .addButton((button) => 401 | button.setButtonText("Edit").onClick(async () => { 402 | this.editEnabled = true; 403 | this.editExistingAction = 404 | this.plugin.settings.actions.find( 405 | (innerAction) => 406 | innerAction.name == action.name, 407 | ); 408 | this.display(); 409 | }), 410 | ); 411 | 412 | const systemTitle = escapeTitle(action.system); 413 | 414 | const promptTitle = escapeTitle(action.prompt); 415 | 416 | actionRow.descEl.innerHTML = [ 417 | action.system && 418 | `
419 | ${sharingActionsMapping.system}${action.system}
`, 420 | action.prompt && 421 | `
422 | ${sharingActionsMapping.prompt}${action.prompt} 423 |
`, 424 | ] 425 | .filter(Boolean) 426 | .join("
\n"); 427 | } else { 428 | const actionRow = new Setting(actionsContainer) 429 | .setName(action.name) 430 | .setDesc(""); 431 | 432 | actionRow.settingEl.addClass("local-gpt-action-row"); 433 | const handle = actionRow.settingEl.createDiv( 434 | "local-gpt-drag-handle", 435 | ); 436 | setIcon(handle, "grip-vertical"); 437 | actionRow.settingEl.prepend(handle); 438 | } 439 | }); 440 | 441 | if (this.changingOrder) { 442 | // Manual edge auto-scroll helpers 443 | const getScrollableParent = (el: HTMLElement): HTMLElement => { 444 | let node: HTMLElement | null = el.parentElement; 445 | while (node) { 446 | const style = getComputedStyle(node); 447 | const overflowY = style.overflowY; 448 | if ( 449 | node.scrollHeight > node.clientHeight && 450 | (overflowY === "auto" || overflowY === "scroll") 451 | ) { 452 | return node; 453 | } 454 | node = node.parentElement; 455 | } 456 | return (document.scrollingElement || 457 | document.documentElement) as HTMLElement; 458 | }; 459 | 460 | let autoScrollFrame: number | null = null; 461 | let autoScrollDelta = 0; 462 | let scrollEl: HTMLElement | null = null; 463 | 464 | const stepScroll = () => { 465 | if (!scrollEl) return; 466 | if (autoScrollDelta !== 0) { 467 | scrollEl.scrollTop += autoScrollDelta; 468 | autoScrollFrame = requestAnimationFrame(stepScroll); 469 | } else { 470 | autoScrollFrame = null; 471 | } 472 | }; 473 | 474 | const handleEdgeScroll = (evt: any) => { 475 | if (!scrollEl) return; 476 | const clientY = evt?.clientY ?? evt?.touches?.[0]?.clientY ?? 0; 477 | const rect = scrollEl.getBoundingClientRect(); 478 | const threshold = 48; // px from top/bottom edge 479 | const maxStep = 18; // px per frame 480 | 481 | if (clientY < rect.top + threshold) { 482 | const dist = rect.top + threshold - clientY; 483 | autoScrollDelta = -Math.min(maxStep, Math.ceil(dist / 4)); 484 | } else if (clientY > rect.bottom - threshold) { 485 | const dist = clientY - (rect.bottom - threshold); 486 | autoScrollDelta = Math.min(maxStep, Math.ceil(dist / 4)); 487 | } else { 488 | autoScrollDelta = 0; 489 | } 490 | 491 | if (autoScrollDelta !== 0 && autoScrollFrame === null) { 492 | autoScrollFrame = requestAnimationFrame(stepScroll); 493 | } 494 | }; 495 | 496 | const addEdgeScrollListeners = () => { 497 | if (!scrollEl) return; 498 | scrollEl.addEventListener("dragover", handleEdgeScroll); 499 | scrollEl.addEventListener("pointermove", handleEdgeScroll, { 500 | passive: true, 501 | }); 502 | scrollEl.addEventListener("touchmove", handleEdgeScroll, { 503 | passive: true, 504 | }); 505 | }; 506 | 507 | const removeEdgeScrollListeners = () => { 508 | if (!scrollEl) return; 509 | scrollEl.removeEventListener("dragover", handleEdgeScroll); 510 | scrollEl.removeEventListener( 511 | "pointermove", 512 | handleEdgeScroll as any, 513 | ); 514 | scrollEl.removeEventListener( 515 | "touchmove", 516 | handleEdgeScroll as any, 517 | ); 518 | }; 519 | 520 | Sortable.create(actionsContainer, { 521 | animation: 150, 522 | // Allow dragging by the whole item (not just the handle) 523 | draggable: ".setting-item", 524 | // We provide manual edge autoscroll for reliability in Obsidian's settings modal 525 | ghostClass: "local-gpt-sortable-ghost", 526 | chosenClass: "local-gpt-sortable-chosen", 527 | dragClass: "local-gpt-sortable-drag", 528 | onStart: (evt: any) => { 529 | // Prepare autoscroll on drag start 530 | scrollEl = getScrollableParent(actionsContainer); 531 | addEdgeScrollListeners(); 532 | }, 533 | onEnd: async (evt: any) => { 534 | // Cleanup autoscroll 535 | removeEdgeScrollListeners(); 536 | if (autoScrollFrame !== null) { 537 | cancelAnimationFrame(autoScrollFrame); 538 | autoScrollFrame = null; 539 | } 540 | autoScrollDelta = 0; 541 | scrollEl = null; 542 | // Add a transient class to play a drop animation 543 | const droppedEl: HTMLElement | undefined = evt?.item; 544 | if (droppedEl) { 545 | droppedEl.classList.add("local-gpt-drop-animate"); 546 | droppedEl.addEventListener( 547 | "animationend", 548 | () => 549 | droppedEl.classList.remove( 550 | "local-gpt-drop-animate", 551 | ), 552 | { once: true }, 553 | ); 554 | 555 | // Nudge immediate siblings without affecting layout 556 | const prevEl = 557 | droppedEl.previousElementSibling as HTMLElement | null; 558 | const nextEl = 559 | droppedEl.nextElementSibling as HTMLElement | null; 560 | if ( 561 | prevEl && 562 | prevEl.classList.contains("setting-item") 563 | ) { 564 | prevEl.classList.add( 565 | "local-gpt-drop-neighbor-prev", 566 | ); 567 | prevEl.addEventListener( 568 | "animationend", 569 | () => 570 | prevEl.classList.remove( 571 | "local-gpt-drop-neighbor-prev", 572 | ), 573 | { once: true }, 574 | ); 575 | } 576 | if ( 577 | nextEl && 578 | nextEl.classList.contains("setting-item") 579 | ) { 580 | nextEl.classList.add( 581 | "local-gpt-drop-neighbor-next", 582 | ); 583 | nextEl.addEventListener( 584 | "animationend", 585 | () => 586 | nextEl.classList.remove( 587 | "local-gpt-drop-neighbor-next", 588 | ), 589 | { once: true }, 590 | ); 591 | } 592 | } 593 | if ( 594 | evt.oldIndex !== undefined && 595 | evt.newIndex !== undefined && 596 | evt.oldIndex !== evt.newIndex 597 | ) { 598 | const [moved] = this.plugin.settings.actions.splice( 599 | evt.oldIndex, 600 | 1, 601 | ); 602 | this.plugin.settings.actions.splice( 603 | evt.newIndex, 604 | 0, 605 | moved, 606 | ); 607 | await this.plugin.saveSettings(); 608 | } 609 | }, 610 | }); 611 | } 612 | 613 | if (this.plugin.settings.actions.length) { 614 | new Setting(containerEl).setName("").addButton((button) => { 615 | this.changingOrder && button.setCta(); 616 | button 617 | .setButtonText( 618 | this.changingOrder 619 | ? I18n.t("settings.done") 620 | : I18n.t("settings.changeOrder"), 621 | ) 622 | .onClick(async () => { 623 | this.changingOrder = !this.changingOrder; 624 | this.display(); 625 | }); 626 | }); 627 | } 628 | 629 | // Advanced settings toggle (similar to AI Providers "For developers") 630 | new Setting(containerEl) 631 | .setHeading() 632 | .setName(I18n.t("settings.advancedSettings")) 633 | .setDesc(I18n.t("settings.advancedSettingsDesc")) 634 | .setClass("local-gpt-advanced-toggle") 635 | .addToggle((toggle) => 636 | toggle.setValue(this.isAdvancedMode).onChange((value) => { 637 | this.isAdvancedMode = value; 638 | this.display(); 639 | }), 640 | ); 641 | 642 | if (this.isAdvancedMode) { 643 | // Group: ✨ Enhanced Actions (RAG) — styled container 644 | const enhancedSection = containerEl.createDiv( 645 | "local-gpt-advanced-group", 646 | ); 647 | enhancedSection.createEl("h4", { 648 | text: I18n.t("settings.enhancedActions"), 649 | }); 650 | new Setting(enhancedSection) 651 | .setName(I18n.t("settings.enhancedActionsLabel")) 652 | .setDesc(I18n.t("settings.enhancedActionsDesc")) 653 | .setClass("ai-providers-select") 654 | .addDropdown((dropdown) => { 655 | // Preset options with non-numeric labels 656 | dropdown 657 | .addOptions({ 658 | local: I18n.t("settings.contextLimitLocal"), 659 | cloud: I18n.t("settings.contextLimitCloud"), 660 | advanced: I18n.t("settings.contextLimitAdvanced"), 661 | max: I18n.t("settings.contextLimitMax"), 662 | }) 663 | .setValue( 664 | String( 665 | this.plugin.settings.defaults.contextLimit || 666 | "local", 667 | ), 668 | ) 669 | .onChange(async (value) => { 670 | this.plugin.settings.defaults.contextLimit = value; 671 | await this.plugin.saveSettings(); 672 | }); 673 | }); 674 | 675 | // Group: Danger zone — reset all actions (moved here as-is) in a styled container 676 | const dangerSection = containerEl.createDiv( 677 | "local-gpt-advanced-group", 678 | ); 679 | dangerSection.createEl("h4", { 680 | text: I18n.t("settings.dangerZone"), 681 | }); 682 | new Setting(dangerSection) 683 | .setName(I18n.t("settings.resetActions")) 684 | .setDesc(I18n.t("settings.resetActionsDesc")) 685 | .addButton((button) => 686 | button 687 | .setClass("mod-warning") 688 | .setButtonText(I18n.t("settings.reset")) 689 | .onClick(async () => { 690 | if (!this.isConfirmingReset) { 691 | this.isConfirmingReset = true; 692 | button.setButtonText( 693 | I18n.t("settings.confirmReset"), 694 | ); 695 | return; 696 | } 697 | 698 | button.setDisabled(true); 699 | button.buttonEl.setAttribute("disabled", "true"); 700 | button.buttonEl.classList.remove("mod-warning"); 701 | this.plugin.settings.actions = 702 | DEFAULT_SETTINGS.actions; 703 | await this.plugin.saveSettings(); 704 | this.isConfirmingReset = false; 705 | this.display(); 706 | }), 707 | ); 708 | } 709 | } 710 | 711 | async addNewAction(editingAction: LocalGPTAction) { 712 | const alreadyExistingActionIndex = 713 | this.plugin.settings.actions.findIndex( 714 | (action) => action.name === editingAction.name, 715 | ); 716 | 717 | if (alreadyExistingActionIndex >= 0) { 718 | this.plugin.settings.actions[alreadyExistingActionIndex] = 719 | editingAction; 720 | new Notice( 721 | I18n.t("notices.actionRewritten", { name: editingAction.name }), 722 | ); 723 | } else { 724 | this.plugin.settings.actions = [ 725 | editingAction, 726 | ...this.plugin.settings.actions, 727 | ]; 728 | new Notice( 729 | I18n.t("notices.actionAdded", { name: editingAction.name }), 730 | ); 731 | } 732 | await this.plugin.saveSettings(); 733 | } 734 | } 735 | -------------------------------------------------------------------------------- /tests/RAG.test.ts: -------------------------------------------------------------------------------- 1 | import { beforeEach, describe, expect, it, vi } from "vitest"; 2 | import { 3 | startProcessing, 4 | getLinkedFiles, 5 | getFileContent, 6 | processDocumentForRAG, 7 | ProcessingContext, 8 | searchDocuments 9 | } from '../src/rag'; 10 | import { IAIDocument, IAIProvidersRetrievalResult } from '../src/interfaces'; 11 | import { extractTextFromPDF } from '../src/processors/pdf'; 12 | import { fileCache } from '../src/indexedDB'; 13 | import { TFile, Vault, MetadataCache } from 'obsidian'; 14 | import * as ragModule from '../src/rag'; 15 | 16 | vi.mock('obsidian'); 17 | vi.mock('../src/processors/pdf'); 18 | vi.mock('../src/indexedDB'); 19 | vi.mock('../src/logger'); 20 | vi.mock('pdfjs-dist', () => ({ 21 | getDocument: vi.fn(), 22 | GlobalWorkerOptions: { 23 | workerPort: null 24 | } 25 | })); 26 | 27 | // Mock AI Providers SDK types and methods 28 | const mockAIProviders = { 29 | retrieve: vi.fn() 30 | }; 31 | 32 | const mockEmbeddingProvider = { 33 | id: 'test-embedding-provider', 34 | name: 'Test Embedding Provider' 35 | }; 36 | 37 | describe('RAG Functions', () => { 38 | beforeEach(() => { 39 | vi.clearAllMocks(); 40 | mockAIProviders.retrieve.mockReset(); 41 | }); 42 | 43 | describe('getFileContent', () => { 44 | it('should read MD files using vault.cachedRead', async () => { 45 | const mockFile = { extension: 'md' } as TFile; 46 | const mockVault = { cachedRead: vi.fn().mockResolvedValue('Markdown content') } as unknown as Vault; 47 | 48 | const content = await getFileContent(mockFile, mockVault); 49 | 50 | expect(content).toBe('Markdown content'); 51 | expect(mockVault.cachedRead).toHaveBeenCalledWith(mockFile); 52 | }); 53 | 54 | it('should extract text from PDF files', async () => { 55 | const mockFile = { extension: 'pdf', path: 'test.pdf', stat: { mtime: 1000 } } as TFile; 56 | const mockVault = { readBinary: vi.fn().mockResolvedValue(new ArrayBuffer(8)) } as unknown as Vault; 57 | (extractTextFromPDF as vi.Mock).mockResolvedValue('PDF content'); 58 | (fileCache.getContent as vi.Mock).mockResolvedValue(null); 59 | 60 | const content = await getFileContent(mockFile, mockVault); 61 | 62 | expect(content).toBe('PDF content'); 63 | expect(mockVault.readBinary).toHaveBeenCalledWith(mockFile); 64 | expect(extractTextFromPDF).toHaveBeenCalledWith(expect.any(ArrayBuffer)); 65 | expect(fileCache.setContent).toHaveBeenCalledWith('test.pdf', { 66 | mtime: 1000, 67 | content: 'PDF content' 68 | }); 69 | }); 70 | 71 | it('should use cached PDF content when available and up to date', async () => { 72 | const mockFile = { extension: 'pdf', path: 'test.pdf', stat: { mtime: 1000 } } as TFile; 73 | const mockVault = { readBinary: vi.fn() } as unknown as Vault; 74 | (fileCache.getContent as vi.Mock).mockResolvedValue({ 75 | mtime: 1000, 76 | content: 'Cached PDF content' 77 | }); 78 | 79 | const content = await getFileContent(mockFile, mockVault); 80 | 81 | expect(content).toBe('Cached PDF content'); 82 | expect(mockVault.readBinary).not.toHaveBeenCalled(); 83 | expect(extractTextFromPDF).not.toHaveBeenCalled(); 84 | }); 85 | 86 | it('should handle default case for MD files', async () => { 87 | const mockFile = { extension: 'txt' } as TFile; 88 | const mockVault = { cachedRead: vi.fn().mockResolvedValue('Text content') } as unknown as Vault; 89 | 90 | const content = await getFileContent(mockFile, mockVault); 91 | 92 | expect(content).toBe('Text content'); 93 | expect(mockVault.cachedRead).toHaveBeenCalledWith(mockFile); 94 | }); 95 | }); 96 | 97 | describe('getLinkedFiles', () => { 98 | it('should extract linked files from content', () => { 99 | const content = '[[File1.md]] and [[File2.pdf]] and [[File3.txt]]'; 100 | 101 | // Create mock TFile instances using the new TFile class 102 | const mockFile1 = new TFile(); 103 | mockFile1.path = 'File1.md'; 104 | mockFile1.extension = 'md'; 105 | 106 | const mockFile2 = new TFile(); 107 | mockFile2.path = 'File2.pdf'; 108 | mockFile2.extension = 'pdf'; 109 | 110 | const mockFile3 = new TFile(); 111 | mockFile3.path = 'File3.txt'; 112 | mockFile3.extension = 'txt'; 113 | 114 | const mockVault = { 115 | getAbstractFileByPath: vi.fn() 116 | .mockImplementation((path: string) => { 117 | if (path === 'File1.md') return mockFile1; 118 | if (path === 'File2.pdf') return mockFile2; 119 | if (path === 'File3.txt') return mockFile3; 120 | return null; 121 | }) 122 | } as unknown as Vault; 123 | 124 | const mockMetadataCache = { 125 | getFirstLinkpathDest: vi.fn() 126 | .mockImplementation((linkText: string) => { 127 | if (linkText === 'File1.md') return { path: 'File1.md' }; 128 | if (linkText === 'File2.pdf') return { path: 'File2.pdf' }; 129 | if (linkText === 'File3.txt') return { path: 'File3.txt' }; 130 | return null; 131 | }) 132 | } as unknown as MetadataCache; 133 | const currentFilePath = 'current.md'; 134 | 135 | const linkedFiles = getLinkedFiles(content, mockVault, mockMetadataCache, currentFilePath); 136 | 137 | expect(linkedFiles).toHaveLength(2); // Only md and pdf files 138 | expect(linkedFiles[0].extension).toBe('md'); 139 | expect(linkedFiles[1].extension).toBe('pdf'); 140 | }); 141 | 142 | it('should handle files with unsupported extensions', () => { 143 | const content = '[[Unsupported.txt]]'; 144 | const mockVault = { 145 | getAbstractFileByPath: vi.fn().mockReturnValue({ path: 'Unsupported.txt', extension: 'txt' }) 146 | } as unknown as Vault; 147 | const mockMetadataCache = { 148 | getFirstLinkpathDest: vi.fn().mockReturnValue({ path: 'Unsupported.txt' }), 149 | } as unknown as MetadataCache; 150 | const currentFilePath = 'current.md'; 151 | 152 | const linkedFiles = getLinkedFiles(content, mockVault, mockMetadataCache, currentFilePath); 153 | 154 | expect(linkedFiles).toHaveLength(0); 155 | }); 156 | 157 | it('should handle links with sections and aliases', () => { 158 | const content = '[[File1.md#section|alias]]'; 159 | 160 | const mockFile1 = new TFile(); 161 | mockFile1.path = 'File1.md'; 162 | mockFile1.extension = 'md'; 163 | 164 | const mockVault = { 165 | getAbstractFileByPath: vi.fn().mockImplementation((path: string) => { 166 | if (path === 'File1.md') return mockFile1; 167 | return null; 168 | }) 169 | } as unknown as Vault; 170 | const mockMetadataCache = { 171 | getFirstLinkpathDest: vi.fn().mockImplementation((linkText: string) => { 172 | if (linkText === 'File1.md') return { path: 'File1.md' }; 173 | return null; 174 | }) 175 | } as unknown as MetadataCache; 176 | const currentFilePath = 'current.md'; 177 | 178 | const linkedFiles = getLinkedFiles(content, mockVault, mockMetadataCache, currentFilePath); 179 | 180 | expect(linkedFiles).toHaveLength(1); 181 | expect(mockMetadataCache.getFirstLinkpathDest).toHaveBeenCalledWith('File1.md', currentFilePath); 182 | }); 183 | 184 | it('ignores unresolved links', () => { 185 | const content = '[[Missing.md]]'; 186 | const mockVault = { 187 | getAbstractFileByPath: vi.fn(), 188 | } as unknown as Vault; 189 | const mockMetadataCache = { 190 | getFirstLinkpathDest: vi.fn().mockReturnValue(null), 191 | } as unknown as MetadataCache; 192 | 193 | const linkedFiles = getLinkedFiles(content, mockVault, mockMetadataCache, 'current.md'); 194 | 195 | expect(linkedFiles).toHaveLength(0); 196 | expect(mockVault.getAbstractFileByPath).not.toHaveBeenCalled(); 197 | }); 198 | }); 199 | 200 | 201 | describe('startProcessing', () => { 202 | it('should process linked files and return a map of documents', async () => { 203 | const mockLinkedFiles = [ 204 | { path: 'file1.md', extension: 'md', basename: 'file1', stat: { ctime: 1000 } } as TFile, 205 | { path: 'file2.md', extension: 'md', basename: 'file2', stat: { ctime: 2000 } } as TFile, 206 | ]; 207 | const mockVault = { cachedRead: vi.fn().mockResolvedValue('Mock content') } as unknown as Vault; 208 | const mockMetadataCache = new MetadataCache(); 209 | const mockActiveFile = { path: 'active.md' } as TFile; 210 | 211 | vi.spyOn(ragModule, 'getLinkedFiles').mockReturnValue([]); 212 | vi.spyOn(ragModule, 'getBacklinkFiles').mockReturnValue([]); 213 | 214 | const result = await startProcessing(mockLinkedFiles, mockVault, mockMetadataCache, mockActiveFile); 215 | 216 | expect(result.size).toBe(2); 217 | expect(result.get('file1.md')).toBeDefined(); 218 | expect(result.get('file2.md')).toBeDefined(); 219 | }); 220 | }); 221 | 222 | describe('processDocumentForRAG', () => { 223 | it('should not process files beyond MAX_DEPTH', async () => { 224 | const mockFile = { path: 'deep.md', extension: 'md' } as TFile; 225 | const mockContext: ProcessingContext = { 226 | vault: new Vault(), 227 | metadataCache: new MetadataCache(), 228 | activeFile: { path: 'active.md' } as TFile, 229 | }; 230 | const processedDocs = new Map(); 231 | 232 | const result = await processDocumentForRAG(mockFile, mockContext, processedDocs, 11, false); 233 | 234 | expect(result.size).toBe(0); 235 | }); 236 | 237 | it('should not process the active file', async () => { 238 | const mockFile = { path: 'active.md', extension: 'md' } as TFile; 239 | const mockContext: ProcessingContext = { 240 | vault: new Vault(), 241 | metadataCache: new MetadataCache(), 242 | activeFile: { path: 'active.md' } as TFile, 243 | }; 244 | const processedDocs = new Map(); 245 | 246 | const result = await processDocumentForRAG(mockFile, mockContext, processedDocs, 0, false); 247 | 248 | expect(result.size).toBe(0); 249 | }); 250 | 251 | it('should not process already processed files', async () => { 252 | const mockFile = { path: 'processed.md', extension: 'md' } as TFile; 253 | const mockContext: ProcessingContext = { 254 | vault: new Vault(), 255 | metadataCache: new MetadataCache(), 256 | activeFile: { path: 'active.md' } as TFile, 257 | }; 258 | const processedDocs = new Map(); 259 | processedDocs.set('processed.md', { 260 | content: 'Already processed', 261 | meta: { 262 | source: 'processed.md', 263 | basename: 'processed', 264 | stat: { ctime: 1000 }, 265 | depth: 0, 266 | isBacklink: false 267 | } 268 | }); 269 | 270 | const result = await processDocumentForRAG(mockFile, mockContext, processedDocs, 0, false); 271 | 272 | expect(result.size).toBe(1); // Still has the original document 273 | }); 274 | 275 | it('handles file processing errors gracefully', async () => { 276 | const mockFile = { path: 'error.md', extension: 'md' } as TFile; 277 | const mockContext: ProcessingContext = { 278 | vault: new Vault(), 279 | metadataCache: new MetadataCache(), 280 | activeFile: { path: 'active.md' } as TFile, 281 | }; 282 | const processedDocs = new Map(); 283 | const getFileContentSpy = vi 284 | .spyOn(ragModule, 'getFileContent') 285 | .mockRejectedValue(new Error('boom')); 286 | const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); 287 | 288 | const result = await processDocumentForRAG( 289 | mockFile, 290 | mockContext, 291 | processedDocs, 292 | 0, 293 | false, 294 | ); 295 | 296 | expect(result.size).toBe(0); 297 | expect(consoleSpy).toHaveBeenCalled(); 298 | getFileContentSpy.mockRestore(); 299 | consoleSpy.mockRestore(); 300 | }); 301 | 302 | it('processes linked and backlink files recursively', async () => { 303 | const rootFile = new TFile(); 304 | rootFile.path = 'root.md'; 305 | rootFile.extension = 'md'; 306 | rootFile.basename = 'root'; 307 | rootFile.stat = { ctime: 0 } as any; 308 | 309 | const linkedFile = new TFile(); 310 | linkedFile.path = 'linked.md'; 311 | linkedFile.extension = 'md'; 312 | linkedFile.basename = 'linked'; 313 | linkedFile.stat = { ctime: 1 } as any; 314 | 315 | const backlinkFile = new TFile(); 316 | backlinkFile.path = 'back.md'; 317 | backlinkFile.extension = 'md'; 318 | backlinkFile.basename = 'back'; 319 | backlinkFile.stat = { ctime: 2 } as any; 320 | const processedDocs = new Map(); 321 | const mockContext: ProcessingContext = { 322 | vault: { 323 | cachedRead: vi.fn().mockResolvedValue('[[linked.md]]'), 324 | readBinary: vi.fn(), 325 | getAbstractFileByPath: vi.fn().mockImplementation((path: string) => { 326 | if (path === 'linked.md') return linkedFile; 327 | if (path === 'back.md') return backlinkFile; 328 | return null; 329 | }), 330 | } as unknown as Vault, 331 | metadataCache: { 332 | getFirstLinkpathDest: vi 333 | .fn() 334 | .mockImplementation((linkText: string) => 335 | linkText === 'linked.md' ? { path: 'linked.md' } : null, 336 | ), 337 | resolvedLinks: { 338 | 'back.md': { 'root.md': 1 }, 339 | }, 340 | } as unknown as MetadataCache, 341 | activeFile: { path: 'active.md' } as TFile, 342 | }; 343 | 344 | const result = await processDocumentForRAG( 345 | rootFile, 346 | mockContext, 347 | processedDocs, 348 | 0, 349 | false, 350 | ); 351 | 352 | expect(result.size).toBe(3); 353 | expect(result.get('linked.md')?.meta.isBacklink).toBe(false); 354 | expect(result.get('back.md')?.meta.isBacklink).toBe(true); 355 | }); 356 | }); 357 | 358 | 359 | describe('searchDocuments', () => { 360 | it('should call aiProviders.retrieve and format results', async () => { 361 | const query = 'test query'; 362 | const documents: IAIDocument[] = [ 363 | { 364 | content: 'Test content 1', 365 | meta: { basename: 'file1', stat: { ctime: 1000 } } 366 | }, 367 | { 368 | content: 'Test content 2', 369 | meta: { basename: 'file2', stat: { ctime: 2000 } } 370 | } 371 | ]; 372 | 373 | const mockResults: IAIProvidersRetrievalResult[] = [ 374 | { 375 | content: 'Relevant content 1', 376 | score: 0.9, 377 | document: documents[0] 378 | }, 379 | { 380 | content: 'Relevant content 2', 381 | score: 0.7, 382 | document: documents[1] 383 | } 384 | ]; 385 | 386 | mockAIProviders.retrieve.mockResolvedValue(mockResults); 387 | const mockUpdateCompletedSteps = vi.fn(); 388 | const abortController = new AbortController(); 389 | 390 | const result = await searchDocuments( 391 | query, 392 | documents, 393 | mockAIProviders, 394 | mockEmbeddingProvider, 395 | abortController, 396 | mockUpdateCompletedSteps, 397 | vi.fn(), 398 | 10000 399 | ); 400 | 401 | expect(mockAIProviders.retrieve).toHaveBeenCalledWith(expect.objectContaining({ 402 | query, 403 | documents, 404 | embeddingProvider: mockEmbeddingProvider 405 | })); 406 | expect(mockUpdateCompletedSteps).toHaveBeenCalledWith(1); 407 | expect(result).toContain('[[file2]]'); 408 | expect(result).toContain('[[file1]]'); 409 | expect(result).toContain('Relevant content 1'); 410 | expect(result).toContain('Relevant content 2'); 411 | }); 412 | 413 | it('should return empty string when aborted', async () => { 414 | const abortController = new AbortController(); 415 | abortController.abort(); 416 | 417 | const result = await searchDocuments( 418 | 'query', 419 | [], 420 | mockAIProviders, 421 | mockEmbeddingProvider, 422 | abortController, 423 | vi.fn(), 424 | vi.fn(), 425 | 10000 426 | ); 427 | 428 | expect(result).toBe(''); 429 | expect(mockAIProviders.retrieve).not.toHaveBeenCalled(); 430 | }); 431 | 432 | it('should handle errors gracefully', async () => { 433 | mockAIProviders.retrieve.mockRejectedValue(new Error('Retrieval failed')); 434 | const abortController = new AbortController(); 435 | const consoleSpy = vi.spyOn(console, 'error').mockImplementation(); 436 | 437 | const result = await searchDocuments( 438 | 'query', 439 | [], 440 | mockAIProviders, 441 | mockEmbeddingProvider, 442 | abortController, 443 | vi.fn(), 444 | vi.fn(), 445 | 10000 446 | ); 447 | 448 | expect(result).toBe(''); 449 | expect(consoleSpy).toHaveBeenCalledWith('Error in searchDocuments:', expect.any(Error)); 450 | consoleSpy.mockRestore(); 451 | }); 452 | 453 | it('should return empty string when aborted during error', async () => { 454 | const abortController = new AbortController(); 455 | mockAIProviders.retrieve.mockImplementation(() => { 456 | abortController.abort(); 457 | return Promise.reject(new Error('Aborted')); 458 | }); 459 | 460 | const result = await searchDocuments( 461 | 'query', 462 | [], 463 | mockAIProviders, 464 | mockEmbeddingProvider, 465 | abortController, 466 | vi.fn(), 467 | vi.fn(), 468 | 10000 469 | ); 470 | 471 | expect(result).toBe(''); 472 | }); 473 | }); 474 | 475 | describe('formatResults (internal function)', () => { 476 | // Since formatResults is not exported, we test it through searchDocuments 477 | it('should format results with proper grouping and sorting', async () => { 478 | const documents: IAIDocument[] = [ 479 | { 480 | content: 'Content from file1', 481 | meta: { basename: 'file1', stat: { ctime: 3000 } } 482 | }, 483 | { 484 | content: 'Content from file2', 485 | meta: { basename: 'file2', stat: { ctime: 1000 } } 486 | } 487 | ]; 488 | 489 | const mockResults: IAIProvidersRetrievalResult[] = [ 490 | { 491 | content: 'High score content from file1', 492 | score: 0.9, 493 | document: documents[0] 494 | }, 495 | { 496 | content: 'Low score content from file1', 497 | score: 0.3, 498 | document: documents[0] 499 | }, 500 | { 501 | content: 'Medium score content from file2', 502 | score: 0.6, 503 | document: documents[1] 504 | } 505 | ]; 506 | 507 | mockAIProviders.retrieve.mockResolvedValue(mockResults); 508 | 509 | const result = await searchDocuments( 510 | 'query', 511 | documents, 512 | mockAIProviders, 513 | mockEmbeddingProvider, 514 | new AbortController(), 515 | vi.fn(), 516 | vi.fn(), 517 | 10000 518 | ); 519 | 520 | // Should be sorted by file creation time (newer first) 521 | const file1Index = result.indexOf('[[file1]]'); 522 | const file2Index = result.indexOf('[[file2]]'); 523 | expect(file1Index).toBeLessThan(file2Index); 524 | 525 | // Within each file, should be sorted by score (higher first) 526 | const highScoreIndex = result.indexOf('High score content from file1'); 527 | const lowScoreIndex = result.indexOf('Low score content from file1'); 528 | expect(highScoreIndex).toBeLessThan(lowScoreIndex); 529 | }); 530 | 531 | describe('Context limit presets', () => { 532 | const presetCases = [ 533 | { preset: undefined, expectedChunks: 1, label: 'default (no preset)' }, 534 | { preset: 'local', expectedChunks: 1, label: 'local' }, 535 | { preset: 'cloud', expectedChunks: 6, label: 'cloud' }, 536 | { preset: 'advanced', expectedChunks: 19, label: 'advanced' }, 537 | { preset: 'max', expectedChunks: 25, label: 'max' }, 538 | ]; 539 | 540 | const makeResults = (doc: IAIDocument) => 541 | Array.from({ length: 25 }, () => ({ 542 | content: 'A'.repeat(5000), 543 | score: 0.5, 544 | document: doc, 545 | })) as unknown as IAIProvidersRetrievalResult[]; 546 | 547 | for (const { preset, expectedChunks, label } of presetCases) { 548 | it(`should respect preset: ${label}`, async () => { 549 | const map: Record = { 550 | local: 10000, 551 | cloud: 32000, 552 | advanced: 100000, 553 | max: 3000000, 554 | }; 555 | const limit = preset ? map[preset] : 10000; 556 | 557 | const doc: IAIDocument = { 558 | content: 'irrelevant', 559 | meta: { basename: 'fileX', stat: { ctime: 1 } }, 560 | }; 561 | 562 | const mocked = makeResults(doc); 563 | mockAIProviders.retrieve.mockResolvedValue(mocked); 564 | 565 | const result = await searchDocuments( 566 | 'query', 567 | [doc], 568 | mockAIProviders, 569 | mockEmbeddingProvider, 570 | new AbortController(), 571 | vi.fn(), 572 | vi.fn(), 573 | limit, 574 | ); 575 | 576 | // Count A's equals number of included chunks * 5000 577 | const aCount = (result.match(/A/g) || []).length; 578 | expect(aCount).toBe(expectedChunks * 5000); 579 | 580 | }); 581 | } 582 | }); 583 | 584 | it('should handle empty results', async () => { 585 | mockAIProviders.retrieve.mockResolvedValue([]); 586 | 587 | const result = await searchDocuments( 588 | 'query', 589 | [], 590 | mockAIProviders, 591 | mockEmbeddingProvider, 592 | new AbortController(), 593 | vi.fn(), 594 | vi.fn(), 595 | 10000 596 | ); 597 | 598 | expect(result).toBe(''); 599 | }); 600 | 601 | it('should respect context limit', async () => { 602 | const longContent = 'A'.repeat(5000); 603 | const documents: IAIDocument[] = [ 604 | { 605 | content: longContent, 606 | meta: { basename: 'file1', stat: { ctime: 1000 } } 607 | } 608 | ]; 609 | 610 | const mockResults: IAIProvidersRetrievalResult[] = [ 611 | { 612 | content: longContent, 613 | score: 0.9, 614 | document: documents[0] 615 | }, 616 | { 617 | content: longContent, 618 | score: 0.8, 619 | document: documents[0] 620 | } 621 | ]; 622 | 623 | mockAIProviders.retrieve.mockResolvedValue(mockResults); 624 | 625 | const result = await searchDocuments( 626 | 'query', 627 | documents, 628 | mockAIProviders, 629 | mockEmbeddingProvider, 630 | new AbortController(), 631 | vi.fn(), 632 | vi.fn(), 633 | 10000 634 | ); 635 | 636 | // Should not exceed reasonable length due to context limit 637 | expect(result.length).toBeLessThan(15000); // Some buffer for formatting 638 | }); 639 | 640 | it('handles zero context limit with missing timestamps', async () => { 641 | const documents: IAIDocument[] = [ 642 | { content: 'Doc body', meta: { basename: 'first', stat: {} } }, 643 | { content: 'Other doc', meta: { basename: 'second', stat: {} } }, 644 | ]; 645 | 646 | mockAIProviders.retrieve.mockResolvedValue([ 647 | { content: 'One', score: 0.9, document: documents[0] }, 648 | { content: 'Two', score: 0.8, document: documents[1] }, 649 | ] as unknown as IAIProvidersRetrievalResult[]); 650 | 651 | const result = await searchDocuments( 652 | 'query', 653 | documents, 654 | mockAIProviders, 655 | mockEmbeddingProvider, 656 | new AbortController(), 657 | vi.fn(), 658 | vi.fn(), 659 | 0, 660 | ); 661 | 662 | expect(result).toBe(''); 663 | }); 664 | 665 | it('tracks progress updates during searchDocuments', async () => { 666 | const documents: IAIDocument[] = [ 667 | { 668 | content: 'Doc body', 669 | meta: { basename: 'file-track', stat: { ctime: 1 } }, 670 | }, 671 | ]; 672 | const mockUpdate = vi.fn(); 673 | const mockAddTotal = vi.fn(); 674 | 675 | mockAIProviders.retrieve.mockImplementation(async ({ onProgress }) => { 676 | onProgress({ totalChunks: 2, processedChunks: [1] }); 677 | onProgress({ totalChunks: 2, processedChunks: [1, 2] }); 678 | return [ 679 | { 680 | content: 'Snippet', 681 | score: 0.9, 682 | document: documents[0], 683 | }, 684 | ]; 685 | }); 686 | 687 | const result = await searchDocuments( 688 | 'query', 689 | documents, 690 | mockAIProviders, 691 | mockEmbeddingProvider, 692 | new AbortController(), 693 | mockUpdate, 694 | mockAddTotal, 695 | 10000, 696 | ); 697 | 698 | expect(mockAddTotal).toHaveBeenCalledWith(2); 699 | expect(mockUpdate).toHaveBeenCalledWith(1); 700 | expect(result).toContain('[[file-track]]'); 701 | }); 702 | 703 | it('stops processing progress updates after abort', async () => { 704 | const documents: IAIDocument[] = [ 705 | { 706 | content: 'Doc body', 707 | meta: { basename: 'file-track', stat: { ctime: 1 } }, 708 | }, 709 | ]; 710 | const mockUpdate = vi.fn(); 711 | const abortController = new AbortController(); 712 | 713 | mockAIProviders.retrieve.mockImplementation(async ({ onProgress }) => { 714 | onProgress({ totalChunks: 2, processedChunks: [1] }); 715 | onProgress({ totalChunks: 2, processedChunks: [1] }); 716 | abortController.abort(); 717 | onProgress({ totalChunks: 2, processedChunks: [1, 2] }); 718 | return [ 719 | { 720 | content: 'Snippet', 721 | score: 0.9, 722 | document: documents[0], 723 | }, 724 | ]; 725 | }); 726 | 727 | const result = await searchDocuments( 728 | 'query', 729 | documents, 730 | mockAIProviders, 731 | mockEmbeddingProvider, 732 | abortController, 733 | mockUpdate, 734 | undefined as any, 735 | 10000, 736 | ); 737 | 738 | expect(mockUpdate).toHaveBeenCalledTimes(1); 739 | expect(result).toContain('[[file-track]]'); 740 | }); 741 | 742 | it('handles progress events without totals or new chunks', async () => { 743 | const documents: IAIDocument[] = [ 744 | { content: 'Doc body', meta: { basename: 'file-zero', stat: {} } }, 745 | ]; 746 | const mockUpdate = vi.fn(); 747 | const mockAddTotal = vi.fn(); 748 | 749 | mockAIProviders.retrieve.mockImplementation(async ({ onProgress }) => { 750 | onProgress({ processedChunks: [] }); 751 | return [ 752 | { 753 | content: 'Snippet', 754 | score: 0.9, 755 | document: documents[0], 756 | }, 757 | ]; 758 | }); 759 | 760 | const result = await searchDocuments( 761 | 'query', 762 | documents, 763 | mockAIProviders, 764 | mockEmbeddingProvider, 765 | new AbortController(), 766 | mockUpdate, 767 | mockAddTotal, 768 | 10000, 769 | ); 770 | 771 | expect(mockAddTotal).toHaveBeenCalledWith(0); 772 | expect(mockUpdate).not.toHaveBeenCalled(); 773 | expect(result).toContain('[[file-zero]]'); 774 | }); 775 | }); 776 | 777 | describe('getBacklinkFiles', () => { 778 | beforeEach(() => { 779 | vi.restoreAllMocks(); 780 | vi.clearAllMocks(); 781 | vi.resetModules(); 782 | }); 783 | 784 | it('should find backlink files from resolved links', () => { 785 | const mockFile = new TFile(); 786 | mockFile.path = 'target.md'; 787 | mockFile.extension = 'md'; 788 | 789 | const mockBacklink1 = new TFile(); 790 | mockBacklink1.path = 'backlink1.md'; 791 | mockBacklink1.extension = 'md'; 792 | const mockBacklink2 = new TFile(); 793 | mockBacklink2.path = 'backlink2.md'; 794 | mockBacklink2.extension = 'md'; 795 | 796 | const getAbstractFileByPath = vi.fn().mockImplementation((path: string) => { 797 | if (path === 'backlink1.md') return mockBacklink1; 798 | if (path === 'backlink2.md') return mockBacklink2; 799 | return null; 800 | }); 801 | const mockContext = { 802 | vault: { 803 | getAbstractFileByPath, 804 | } as unknown as Vault, 805 | metadataCache: Object.assign(new MetadataCache(), { 806 | resolvedLinks: { 807 | 'backlink1.md': { 'target.md': 1 }, 808 | 'backlink2.md': { 'target.md': 1 }, 809 | 'other.md': { 'different.md': 1 } 810 | }, 811 | }), 812 | activeFile: { path: 'active.md' } as TFile 813 | } as ProcessingContext; 814 | 815 | const processedDocs = new Map(); 816 | 817 | const backlinkFiles = ragModule.getBacklinkFiles( 818 | mockFile, 819 | mockContext, 820 | processedDocs, 821 | ); 822 | 823 | expect(backlinkFiles.map((f: any) => f.path)).toEqual([ 824 | 'backlink1.md', 825 | 'backlink2.md', 826 | ]); 827 | expect(getAbstractFileByPath).toHaveBeenCalledTimes(2); 828 | }); 829 | 830 | it('should exclude already processed documents', () => { 831 | const mockFile = new TFile(); 832 | mockFile.path = 'target.md'; 833 | mockFile.extension = 'md'; 834 | 835 | const mockBacklink1 = new TFile(); 836 | mockBacklink1.path = 'backlink1.md'; 837 | mockBacklink1.extension = 'md'; 838 | 839 | const getAbstractFileByPath = vi.fn().mockImplementation((path: string) => { 840 | if (path === 'backlink1.md') return mockBacklink1; 841 | return null; 842 | }); 843 | const mockContext = { 844 | vault: { 845 | getAbstractFileByPath, 846 | } as unknown as Vault, 847 | metadataCache: Object.assign(new MetadataCache(), { 848 | resolvedLinks: { 849 | 'backlink1.md': { 'target.md': 1 }, 850 | 'backlink2.md': { 'target.md': 1 } 851 | }, 852 | }), 853 | activeFile: { path: 'active.md' } as TFile 854 | } as ProcessingContext; 855 | 856 | const processedDocs = new Map(); 857 | processedDocs.set('backlink2.md', { 858 | content: 'Already processed', 859 | meta: { 860 | source: 'backlink2.md', 861 | basename: 'backlink2', 862 | stat: { ctime: 1000 }, 863 | depth: 0, 864 | isBacklink: false 865 | } 866 | }); 867 | 868 | const backlinkFiles = ragModule.getBacklinkFiles( 869 | mockFile, 870 | mockContext, 871 | processedDocs, 872 | ); 873 | 874 | expect(backlinkFiles.map((f: any) => f.path)).toEqual([ 875 | 'backlink1.md', 876 | ]); 877 | expect(getAbstractFileByPath).toHaveBeenCalledTimes(1); 878 | }); 879 | 880 | describe('Progress tracking', () => { 881 | it('should call updateCompletedSteps for each processed file in startProcessing', async () => { 882 | const mockLinkedFiles = [ 883 | { path: 'file1.md', extension: 'md', basename: 'file1', stat: { ctime: 1000 } } as TFile, 884 | { path: 'file2.md', extension: 'md', basename: 'file2', stat: { ctime: 2000 } } as TFile, 885 | { path: 'file3.md', extension: 'md', basename: 'file3', stat: { ctime: 3000 } } as TFile, 886 | ]; 887 | const mockVault = { cachedRead: vi.fn().mockResolvedValue('Mock content') } as unknown as Vault; 888 | const mockMetadataCache = new MetadataCache(); 889 | const mockActiveFile = { path: 'active.md' } as TFile; 890 | const mockUpdateCompletedSteps = vi.fn(); 891 | 892 | vi.spyOn(ragModule, 'getLinkedFiles').mockReturnValue([]); 893 | vi.spyOn(ragModule, 'getBacklinkFiles').mockReturnValue([]); 894 | 895 | await startProcessing(mockLinkedFiles, mockVault, mockMetadataCache, mockActiveFile, mockUpdateCompletedSteps); 896 | 897 | expect(mockUpdateCompletedSteps).toHaveBeenCalledTimes(3); 898 | expect(mockUpdateCompletedSteps).toHaveBeenCalledWith(1); 899 | }); 900 | 901 | it('should work without updateCompletedSteps callback in startProcessing', async () => { 902 | const mockLinkedFiles = [ 903 | { path: 'file1.md', extension: 'md', basename: 'file1', stat: { ctime: 1000 } } as TFile, 904 | ]; 905 | const mockVault = { cachedRead: vi.fn().mockResolvedValue('Mock content') } as unknown as Vault; 906 | const mockMetadataCache = new MetadataCache(); 907 | const mockActiveFile = { path: 'active.md' } as TFile; 908 | 909 | vi.spyOn(ragModule, 'getLinkedFiles').mockReturnValue([]); 910 | vi.spyOn(ragModule, 'getBacklinkFiles').mockReturnValue([]); 911 | 912 | // Should not throw when callback is not provided 913 | const result = await startProcessing(mockLinkedFiles, mockVault, mockMetadataCache, mockActiveFile); 914 | 915 | expect(result.size).toBe(1); 916 | }); 917 | 918 | it('should call updateCompletedSteps once in searchDocuments', async () => { 919 | const query = 'test query'; 920 | const documents: IAIDocument[] = [ 921 | { 922 | content: 'Test content', 923 | meta: { basename: 'file1', stat: { ctime: 1000 } } 924 | } 925 | ]; 926 | 927 | const mockResults: IAIProvidersRetrievalResult[] = [ 928 | { 929 | content: 'Relevant content', 930 | score: 0.9, 931 | document: documents[0] 932 | } 933 | ]; 934 | 935 | mockAIProviders.retrieve.mockResolvedValue(mockResults); 936 | const mockUpdateCompletedSteps = vi.fn(); 937 | const abortController = new AbortController(); 938 | 939 | await searchDocuments( 940 | query, 941 | documents, 942 | mockAIProviders, 943 | mockEmbeddingProvider, 944 | abortController, 945 | mockUpdateCompletedSteps, 946 | vi.fn(), 947 | 10000 948 | ); 949 | 950 | expect(mockUpdateCompletedSteps).toHaveBeenCalledTimes(1); 951 | expect(mockUpdateCompletedSteps).toHaveBeenCalledWith(1); 952 | }); 953 | 954 | it('should not call updateCompletedSteps when searchDocuments is aborted', async () => { 955 | const abortController = new AbortController(); 956 | abortController.abort(); 957 | const mockUpdateCompletedSteps = vi.fn(); 958 | 959 | const result = await searchDocuments( 960 | 'query', 961 | [], 962 | mockAIProviders, 963 | mockEmbeddingProvider, 964 | abortController, 965 | mockUpdateCompletedSteps, 966 | vi.fn(), 967 | 10000 968 | ); 969 | 970 | expect(result).toBe(''); 971 | expect(mockUpdateCompletedSteps).not.toHaveBeenCalled(); 972 | }); 973 | 974 | it('should not call updateCompletedSteps when searchDocuments encounters error', async () => { 975 | mockAIProviders.retrieve.mockRejectedValue(new Error('Retrieval failed')); 976 | const abortController = new AbortController(); 977 | const mockUpdateCompletedSteps = vi.fn(); 978 | const consoleSpy = vi.spyOn(console, 'error').mockImplementation(); 979 | 980 | const result = await searchDocuments( 981 | 'query', 982 | [], 983 | mockAIProviders, 984 | mockEmbeddingProvider, 985 | abortController, 986 | mockUpdateCompletedSteps, 987 | vi.fn(), 988 | 10000 989 | ); 990 | 991 | expect(result).toBe(''); 992 | expect(mockUpdateCompletedSteps).not.toHaveBeenCalled(); 993 | consoleSpy.mockRestore(); 994 | }); 995 | }); 996 | 997 | it('should return empty array when no backlinks exist', () => { 998 | const mockFile = new TFile(); 999 | mockFile.path = 'target.md'; 1000 | mockFile.extension = 'md'; 1001 | 1002 | const mockActiveFile = new TFile(); 1003 | mockActiveFile.path = 'active.md'; 1004 | mockActiveFile.extension = 'md'; 1005 | 1006 | const mockContext: ProcessingContext = { 1007 | vault: { 1008 | getAbstractFileByPath: vi.fn().mockReturnValue(null) 1009 | } as unknown as Vault, 1010 | metadataCache: { 1011 | resolvedLinks: { 1012 | 'other.md': { 'different.md': 1 } 1013 | } 1014 | } as unknown as MetadataCache, 1015 | activeFile: mockActiveFile 1016 | }; 1017 | const processedDocs = new Map(); 1018 | 1019 | const backlinkFiles = ragModule.getBacklinkFiles(mockFile, mockContext, processedDocs); 1020 | 1021 | expect(backlinkFiles).toHaveLength(0); 1022 | }); 1023 | }); 1024 | }); 1025 | -------------------------------------------------------------------------------- /src/main.ts: -------------------------------------------------------------------------------- 1 | import { Editor, Menu, Notice, Plugin, requestUrl, TFile } from "obsidian"; 2 | import { LocalGPTSettingTab } from "./LocalGPTSettingTab"; 3 | import { CREATIVITY, DEFAULT_SETTINGS } from "defaultSettings"; 4 | import { spinnerPlugin } from "./spinnerPlugin"; 5 | import { 6 | actionPalettePlugin, 7 | showActionPalette, 8 | hideActionPalette, 9 | } from "./ui/actionPalettePlugin"; 10 | import { IAIDocument, LocalGPTAction, LocalGPTSettings } from "./interfaces"; 11 | 12 | import { getLinkedFiles, startProcessing, searchDocuments } from "./rag"; 13 | import { logger } from "./logger"; 14 | import { I18n } from "./i18n"; 15 | import { fileCache } from "./indexedDB"; 16 | import { 17 | initAI, 18 | waitForAI, 19 | IAIProvider, 20 | IAIProvidersService, 21 | } from "@obsidian-ai-providers/sdk"; 22 | import { preparePrompt } from "./utils"; 23 | 24 | /** 25 | * Remove all thinking tags and their content from text 26 | * Used for final output processing 27 | * 28 | * @param text Text that may contain thinking tags 29 | * @returns Clean text without thinking tags and their content 30 | */ 31 | function removeThinkingTags(text: string): string { 32 | return text.replace(/^[\s\S]*?<\/think>\s*/, ""); 33 | } 34 | 35 | const MIN_BASE_SPEED = 0.02 / 16; 36 | const MAX_BASE_SPEED = 3 / 16; 37 | 38 | export default class LocalGPT extends Plugin { 39 | settings: LocalGPTSettings; 40 | actionPaletteProviderId: string | null = null; 41 | actionPaletteModel: string | null = null; 42 | actionPaletteModelProviderId: string | null = null; 43 | actionPaletteCreativityKey: string | null = null; // "", "low", "medium", "high" 44 | abortControllers: AbortController[] = []; 45 | updatingInterval: number; 46 | private statusBarItem: HTMLElement; 47 | private currentPercentage = 0; 48 | private targetPercentage = 0; 49 | private frameId: number | null = null; 50 | private lastFrameTime: number | null = null; 51 | private displayedPercentage = 0; // fractional internal value 52 | private baseSpeed = 0; // percent per ms (smoothed) 53 | private lastTargetUpdateTime: number | null = null; 54 | private progressFinished = false; // controls when we can show 100% 55 | private totalProgressSteps = 0; 56 | private completedProgressSteps = 0; 57 | 58 | async onload() { 59 | initAI(this.app, this, async () => { 60 | await this.loadSettings(); 61 | this.addSettingTab(new LocalGPTSettingTab(this.app, this)); 62 | this.reload(); 63 | this.app.workspace.onLayoutReady(async () => { 64 | // @ts-ignore 65 | await fileCache.init(this.app.appId); 66 | 67 | window.setTimeout(() => { 68 | this.checkUpdates(); 69 | }, 5000); 70 | }); 71 | this.registerEditorExtension(spinnerPlugin); 72 | this.registerEditorExtension(actionPalettePlugin); 73 | this.initializeStatusBar(); 74 | }); 75 | } 76 | 77 | private initializeStatusBar() { 78 | this.statusBarItem = this.addStatusBarItem(); 79 | this.statusBarItem.addClass("local-gpt-status"); 80 | this.statusBarItem.hide(); 81 | } 82 | 83 | processText(text: string, selectedText: string) { 84 | if (!text.trim()) { 85 | return ""; 86 | } 87 | 88 | // Remove ... tags and their content from the final output 89 | const cleanText = removeThinkingTags(text).trim(); 90 | 91 | return ["\n", cleanText.replace(selectedText, "").trim(), "\n"].join( 92 | "", 93 | ); 94 | } 95 | 96 | private addCommands() { 97 | this.addCommand({ 98 | id: "context-menu", 99 | name: I18n.t("commands.showContextMenu"), 100 | editorCallback: (editor: Editor) => { 101 | // @ts-expect-error, not typed 102 | const editorView = editor.cm; 103 | 104 | const cursorPositionFrom = editor.getCursor("from"); 105 | const cursorPositionTo = editor.getCursor("to"); 106 | 107 | const contextMenu = new Menu(); 108 | 109 | this.settings.actions.forEach((action) => { 110 | contextMenu.addItem((item) => { 111 | item.setTitle(action.name).onClick( 112 | this.runAction.bind(this, action, editor), 113 | ); 114 | }); 115 | }); 116 | 117 | const fromRect = editorView.coordsAtPos( 118 | editor.posToOffset(cursorPositionFrom), 119 | ); 120 | const toRect = editorView.coordsAtPos( 121 | editor.posToOffset(cursorPositionTo), 122 | ); 123 | contextMenu.showAtPosition({ 124 | x: fromRect.left, 125 | y: toRect.top + (editorView.defaultLineHeight || 0), 126 | }); 127 | }, 128 | }); 129 | 130 | this.settings.actions.forEach((action, index) => { 131 | this.addCommand({ 132 | id: `quick-access-${index + 1}`, 133 | name: `${index + 1} | ${action.name}`, 134 | editorCallback: (editor: Editor) => { 135 | this.runAction(action, editor); 136 | }, 137 | }); 138 | }); 139 | 140 | this.addCommand({ 141 | id: "local-gpt-action-palette", 142 | name: I18n.t("commands.actionPalette.name"), 143 | editorCallback: async (editor: Editor) => { 144 | // @ts-expect-error, not typed 145 | const editorView = editor.cm; 146 | const cursorPositionFrom = editor.getCursor("from"); 147 | const insertPos = editor.posToOffset({ 148 | line: cursorPositionFrom.line, 149 | ch: 0, 150 | }); 151 | 152 | let modelLabel = ""; 153 | let currentProviderId: string | undefined; 154 | try { 155 | const aiRequestWaiter = await waitForAI(); 156 | const aiProviders: IAIProvidersService = 157 | await aiRequestWaiter.promise; 158 | const selectedProviderId = 159 | this.actionPaletteProviderId || 160 | this.settings.aiProviders.main; 161 | const provider = aiProviders.providers.find( 162 | (p: IAIProvider) => p.id === selectedProviderId, 163 | ); 164 | if (provider) { 165 | currentProviderId = provider.id; 166 | const modelToShow = 167 | this.actionPaletteModelProviderId === provider.id 168 | ? this.actionPaletteModel || provider.model 169 | : provider.model; 170 | // Compose creativity label for badge 171 | const creativityKey = 172 | this.actionPaletteCreativityKey ?? 173 | this.settings.defaults.creativity ?? 174 | ""; 175 | const creativityLabelMap: Record = { 176 | "": I18n.t("settings.creativityNone"), 177 | low: I18n.t("settings.creativityLow"), 178 | medium: I18n.t("settings.creativityMedium"), 179 | high: I18n.t("settings.creativityHigh"), 180 | }; 181 | const creativityLabel = 182 | creativityLabelMap[creativityKey] || ""; 183 | 184 | modelLabel = [ 185 | provider.name, 186 | modelToShow, 187 | creativityLabel, 188 | ] 189 | .filter(Boolean) 190 | .join(" · "); 191 | } 192 | } catch (e) { 193 | void e; 194 | } 195 | 196 | showActionPalette(editorView, insertPos, { 197 | onSubmit: ( 198 | text: string, 199 | selectedFiles: string[] = [], 200 | systemPrompt?: string, 201 | ) => { 202 | const overrideProviderId = 203 | this.actionPaletteProviderId || 204 | this.settings.aiProviders.main; 205 | // Palette-only creativity override 206 | const creativityKey = 207 | this.actionPaletteCreativityKey ?? 208 | this.settings.defaults.creativity ?? 209 | ""; 210 | const temperatureOverride = (CREATIVITY as any)[ 211 | creativityKey 212 | ]?.temperature as number | undefined; 213 | 214 | this.runFreeform( 215 | editor, 216 | text, 217 | selectedFiles, 218 | overrideProviderId, 219 | temperatureOverride, 220 | systemPrompt, 221 | ).finally(() => {}); 222 | 223 | hideActionPalette(editorView); 224 | this.app.workspace.updateOptions(); 225 | }, 226 | onCancel: () => { 227 | hideActionPalette(editorView); 228 | this.app.workspace.updateOptions(); 229 | }, 230 | placeholder: I18n.t("commands.actionPalette.placeholder"), 231 | modelLabel: modelLabel, 232 | providerId: currentProviderId, 233 | getFiles: () => { 234 | return this.app.vault 235 | .getMarkdownFiles() 236 | .concat( 237 | this.app.vault 238 | .getFiles() 239 | .filter((f) => f.extension === "pdf"), 240 | ) 241 | .map((file) => ({ 242 | path: file.path, 243 | basename: file.basename, 244 | extension: file.extension, 245 | })); 246 | }, 247 | getProviders: async () => { 248 | try { 249 | const aiRequestWaiter = await waitForAI(); 250 | const aiProviders: IAIProvidersService = 251 | await aiRequestWaiter.promise; 252 | 253 | return aiProviders.providers 254 | .filter((p) => Boolean(p.model)) 255 | .map((p) => ({ 256 | id: p.id, 257 | name: 258 | p.model || 259 | I18n.t( 260 | "commands.actionPalette.unknownModel", 261 | ), 262 | providerName: p.name, 263 | providerUrl: 264 | (p as unknown as { url?: string }) 265 | .url || "", 266 | })); 267 | } catch (error) { 268 | console.error("Error fetching models:", error); 269 | return []; 270 | } 271 | }, 272 | getModels: async (providerId: string) => { 273 | try { 274 | const aiRequestWaiter = await waitForAI(); 275 | const aiProviders: IAIProvidersService = 276 | await aiRequestWaiter.promise; 277 | const provider = aiProviders.providers.find( 278 | (p: IAIProvider) => p.id === providerId, 279 | ); 280 | if (!provider) return []; 281 | const models = 282 | provider.availableModels || 283 | (await aiProviders.fetchModels(provider)); 284 | return models.map((m) => ({ id: m, name: m })); 285 | } catch (error) { 286 | console.error("Error fetching models:", error); 287 | return []; 288 | } 289 | }, 290 | onProviderChange: async (providerId: string) => { 291 | // Only override Action Palette provider, keep settings unchanged 292 | this.actionPaletteProviderId = providerId; 293 | this.actionPaletteModel = null; 294 | this.actionPaletteModelProviderId = null; 295 | }, 296 | onModelChange: async (model: string) => { 297 | const providerId = 298 | this.actionPaletteProviderId || 299 | this.settings.aiProviders.main; 300 | this.actionPaletteModel = model; 301 | this.actionPaletteModelProviderId = providerId; 302 | }, 303 | onCreativityChange: async (creativityKey: string) => { 304 | // Only override Action Palette creativity, keep settings unchanged 305 | this.actionPaletteCreativityKey = creativityKey; 306 | }, 307 | getSystemPrompts: () => { 308 | return this.settings.actions 309 | .filter((action) => action.system) 310 | .map((action) => ({ 311 | name: action.name, 312 | system: action.system!, 313 | })); 314 | }, 315 | }); 316 | this.app.workspace.updateOptions(); 317 | }, 318 | }); 319 | } 320 | 321 | private async runFreeform( 322 | editor: Editor, 323 | userInput: string, 324 | selectedFiles: string[] = [], 325 | overrideProviderId?: string | null, 326 | customTemperature?: number, 327 | systemPrompt?: string, 328 | ) { 329 | return this.executeAction( 330 | { 331 | prompt: userInput, 332 | system: systemPrompt, 333 | replace: false, 334 | selectedFiles, 335 | overrideProviderId: overrideProviderId || undefined, 336 | temperature: customTemperature, 337 | }, 338 | editor, 339 | ); 340 | } 341 | 342 | async runAction(action: LocalGPTAction, editor: Editor) { 343 | return this.executeAction( 344 | { 345 | prompt: action.prompt, 346 | system: action.system, 347 | replace: !!action.replace, 348 | temperature: 349 | action.temperature || 350 | CREATIVITY[this.settings.defaults.creativity].temperature, 351 | }, 352 | editor, 353 | ); 354 | } 355 | 356 | private async executeAction( 357 | params: { 358 | prompt: string; 359 | system?: string; 360 | replace?: boolean; 361 | temperature?: number; 362 | selectedFiles?: string[]; 363 | overrideProviderId?: string | null; 364 | }, 365 | editor: Editor, 366 | ) { 367 | const { 368 | editorView, 369 | cursorPositionFrom, 370 | cursorPositionTo, 371 | cursorOffsetTo, 372 | selectedTextRef, 373 | } = this.extractSelectionContext(editor); 374 | const { abortController, hideSpinner, onUpdate } = 375 | this.createExecutionContext( 376 | editorView, 377 | cursorOffsetTo, 378 | selectedTextRef, 379 | ); 380 | 381 | const { cleanedText, imagesInBase64 } = 382 | await this.extractImagesFromSelection(selectedTextRef.value); 383 | selectedTextRef.value = cleanedText; 384 | 385 | logger.time("Processing Embeddings"); 386 | logger.timeEnd("Processing Embeddings"); 387 | logger.debug("Selected text", cleanedText); 388 | 389 | const aiRequestWaiter = await waitForAI(); 390 | const aiProviders: IAIProvidersService = await aiRequestWaiter.promise; 391 | 392 | const embeddingProvider = aiProviders.providers.find( 393 | (provider: IAIProvider) => 394 | provider.id === this.settings.aiProviders.embedding, 395 | ); 396 | 397 | const context = await this.enhanceWithContext( 398 | cleanedText, 399 | aiProviders, 400 | embeddingProvider, 401 | abortController, 402 | params.selectedFiles, 403 | ); 404 | 405 | const provider = this.selectProvider( 406 | aiProviders, 407 | imagesInBase64.length > 0, 408 | params.overrideProviderId, 409 | ); 410 | const adjustedProvider = this.overrideProviderModel(provider, params); 411 | 412 | let fullText = ""; 413 | try { 414 | fullText = await this.executeProviderRequest( 415 | aiProviders, 416 | adjustedProvider, 417 | params, 418 | cleanedText, 419 | context, 420 | imagesInBase64, 421 | abortController, 422 | onUpdate, 423 | ); 424 | } finally { 425 | hideSpinner && hideSpinner(); 426 | this.app.workspace.updateOptions(); 427 | } 428 | 429 | if (abortController.signal.aborted) { 430 | return; 431 | } 432 | 433 | const finalText = removeThinkingTags(fullText).trim(); 434 | this.applyTextResult( 435 | editor, 436 | params.replace, 437 | finalText, 438 | selectedTextRef.value, 439 | cursorPositionFrom, 440 | cursorPositionTo, 441 | ); 442 | } 443 | 444 | private extractSelectionContext(editor: Editor) { 445 | // @ts-expect-error, not typed 446 | const editorView = editor.cm; 447 | const selection = editor.getSelection(); 448 | const selectedTextRef = { value: selection || editor.getValue() }; 449 | const cursorPositionFrom = editor.getCursor("from"); 450 | const cursorPositionTo = editor.getCursor("to"); 451 | const cursorOffsetTo = editor.posToOffset(cursorPositionTo); 452 | 453 | return { 454 | editorView, 455 | cursorPositionFrom, 456 | cursorPositionTo, 457 | cursorOffsetTo, 458 | selectedTextRef, 459 | }; 460 | } 461 | 462 | private createExecutionContext( 463 | editorView: any, 464 | cursorOffsetTo: number, 465 | selectedTextRef: { value: string }, 466 | ) { 467 | const abortController = new AbortController(); 468 | this.abortControllers.push(abortController); 469 | 470 | const spinner = editorView.plugin(spinnerPlugin) || undefined; 471 | const hideSpinner = spinner?.show(cursorOffsetTo); 472 | this.app.workspace.updateOptions(); 473 | 474 | abortController.signal.addEventListener("abort", () => { 475 | hideSpinner && hideSpinner(); 476 | this.app.workspace.updateOptions(); 477 | }); 478 | 479 | const onUpdate = (updatedString: string) => { 480 | if (!spinner) return; 481 | spinner.processText(updatedString, (text: string) => 482 | this.processText(text, selectedTextRef.value), 483 | ); 484 | this.app.workspace.updateOptions(); 485 | }; 486 | 487 | return { abortController, hideSpinner, onUpdate }; 488 | } 489 | 490 | private async extractImagesFromSelection( 491 | selectedText: string, 492 | ): Promise<{ cleanedText: string; imagesInBase64: string[] }> { 493 | const regexp = /!\[\[(.+?\.(?:png|jpe?g))]]/gi; 494 | const fileNames = Array.from( 495 | selectedText.matchAll(regexp), 496 | (match) => match[1], 497 | ); 498 | 499 | const cleanedText = selectedText.replace(regexp, ""); 500 | const imagesInBase64 = 501 | ( 502 | await Promise.all( 503 | fileNames.map((fileName) => 504 | this.readImageAsDataUrl(fileName), 505 | ), 506 | ) 507 | ).filter(Boolean) || []; 508 | 509 | return { cleanedText, imagesInBase64 }; 510 | } 511 | 512 | private async readImageAsDataUrl(fileName: string): Promise { 513 | const filePath = this.app.metadataCache.getFirstLinkpathDest( 514 | fileName, 515 | // @ts-ignore 516 | this.app.workspace.getActiveFile().path, 517 | ); 518 | 519 | if (!filePath) { 520 | return ""; 521 | } 522 | 523 | return this.app.vault.adapter 524 | .readBinary(filePath.path) 525 | .then((buffer) => { 526 | const extension = filePath.extension.toLowerCase(); 527 | const mimeType = extension === "jpg" ? "jpeg" : extension; 528 | const blob = new Blob([buffer], { 529 | type: `image/${mimeType}`, 530 | }); 531 | return new Promise((resolve) => { 532 | const reader = new FileReader(); 533 | reader.onloadend = () => resolve(reader.result as string); 534 | reader.readAsDataURL(blob); 535 | }); 536 | }); 537 | } 538 | 539 | private selectProvider( 540 | aiProviders: IAIProvidersService, 541 | hasImages: boolean, 542 | overrideProviderId?: string | null, 543 | ): IAIProvider { 544 | const visionCandidate = hasImages 545 | ? aiProviders.providers.find( 546 | (p: IAIProvider) => 547 | p.id === this.settings.aiProviders.vision, 548 | ) 549 | : undefined; 550 | const preferredProviderId = 551 | overrideProviderId || this.settings.aiProviders.main; 552 | const fallback = aiProviders.providers.find( 553 | (p) => p.id === preferredProviderId, 554 | ); 555 | 556 | const provider = visionCandidate || fallback; 557 | if (!provider) { 558 | throw new Error("No AI provider found"); 559 | } 560 | return provider; 561 | } 562 | 563 | private overrideProviderModel( 564 | provider: IAIProvider, 565 | params: { 566 | overrideProviderId?: string | null; 567 | }, 568 | ): IAIProvider { 569 | if ( 570 | this.actionPaletteModel && 571 | params.overrideProviderId && 572 | this.actionPaletteModelProviderId === params.overrideProviderId 573 | ) { 574 | return { ...provider, model: this.actionPaletteModel }; 575 | } 576 | return provider; 577 | } 578 | 579 | private async executeProviderRequest( 580 | aiProviders: IAIProvidersService, 581 | provider: IAIProvider, 582 | params: { prompt: string; system?: string; temperature?: number }, 583 | selectedText: string, 584 | context: string, 585 | imagesInBase64: string[], 586 | abortController: AbortController, 587 | onUpdate: (updatedString: string) => void, 588 | ): Promise { 589 | try { 590 | return await aiProviders.execute({ 591 | provider, 592 | prompt: preparePrompt(params.prompt, selectedText, context), 593 | images: imagesInBase64, 594 | systemPrompt: params.system, 595 | options: { 596 | temperature: 597 | params.temperature ?? 598 | CREATIVITY[this.settings.defaults.creativity] 599 | .temperature, 600 | }, 601 | onProgress: (_chunk: string, accumulatedText: string) => { 602 | onUpdate(accumulatedText); 603 | }, 604 | abortController, 605 | }); 606 | } catch (error) { 607 | if (!abortController.signal.aborted) { 608 | new Notice( 609 | I18n.t("notices.errorGenerating", { 610 | message: (error as any).message, 611 | }), 612 | ); 613 | } 614 | logger.separator(); 615 | return ""; 616 | } 617 | } 618 | 619 | private applyTextResult( 620 | editor: Editor, 621 | replaceSelection: boolean | undefined, 622 | finalText: string, 623 | selectedText: string, 624 | cursorPositionFrom: any, 625 | cursorPositionTo: any, 626 | ) { 627 | if (replaceSelection) { 628 | editor.replaceRange( 629 | finalText, 630 | cursorPositionFrom, 631 | cursorPositionTo, 632 | ); 633 | return; 634 | } 635 | const isLastLine = editor.lastLine() === cursorPositionTo.line; 636 | const text = this.processText(finalText, selectedText); 637 | editor.replaceRange(isLastLine ? "\n" + text : text, { 638 | ch: 0, 639 | line: cursorPositionTo.line + 1, 640 | }); 641 | } 642 | 643 | async enhanceWithContext( 644 | selectedText: string, 645 | aiProviders: IAIProvidersService, 646 | aiProvider: IAIProvider | undefined, 647 | abortController: AbortController, 648 | selectedFiles?: string[], 649 | ): Promise { 650 | const activeFile = this.app.workspace.getActiveFile(); 651 | if (!activeFile || !aiProvider || abortController?.signal.aborted) { 652 | return ""; 653 | } 654 | 655 | const allLinkedFiles = this.collectLinkedFilesForContext( 656 | selectedText, 657 | selectedFiles, 658 | activeFile.path, 659 | ); 660 | if (allLinkedFiles.length === 0) { 661 | return ""; 662 | } 663 | 664 | try { 665 | this.initializeProgress(); 666 | 667 | const processedDocs = await startProcessing( 668 | allLinkedFiles, 669 | this.app.vault, 670 | this.app.metadataCache, 671 | activeFile, 672 | this.updateCompletedSteps.bind(this), 673 | ); 674 | 675 | if (this.shouldAbortProcessing(processedDocs, abortController)) { 676 | return this.finishContextProcessing(""); 677 | } 678 | 679 | const retrieveDocuments = Array.from(processedDocs.values()); 680 | 681 | if (abortController?.signal.aborted) { 682 | return this.finishContextProcessing(""); 683 | } 684 | 685 | const contextLimit = this.resolveContextLimit(); 686 | 687 | const relevantContext = await searchDocuments( 688 | selectedText, 689 | retrieveDocuments, 690 | aiProviders, 691 | aiProvider, 692 | abortController, 693 | this.updateCompletedSteps.bind(this), 694 | this.addTotalProgressSteps.bind(this), 695 | contextLimit, 696 | ); 697 | 698 | return this.finishContextProcessing(relevantContext.trim() || ""); 699 | } catch (error) { 700 | return this.handleContextError(error, abortController); 701 | } 702 | } 703 | 704 | private collectLinkedFilesForContext( 705 | selectedText: string, 706 | selectedFiles: string[] | undefined, 707 | activeFilePath: string, 708 | ): TFile[] { 709 | const linkedFiles = getLinkedFiles( 710 | selectedText, 711 | this.app.vault, 712 | this.app.metadataCache, 713 | activeFilePath, 714 | ); 715 | 716 | const additionalFiles = 717 | selectedFiles 718 | ?.map((filePath) => 719 | this.app.vault.getAbstractFileByPath(filePath), 720 | ) 721 | .filter( 722 | (file): file is TFile => 723 | file !== null && 724 | file instanceof TFile && 725 | (file.extension === "md" || file.extension === "pdf"), 726 | ) || []; 727 | 728 | return [...linkedFiles, ...additionalFiles]; 729 | } 730 | 731 | private shouldAbortProcessing( 732 | processedDocs: Map, 733 | abortController: AbortController, 734 | ): boolean { 735 | return processedDocs.size === 0 || abortController?.signal.aborted; 736 | } 737 | 738 | private resolveContextLimit(): number { 739 | const preset = this.settings?.defaults?.contextLimit as 740 | | "local" 741 | | "cloud" 742 | | "advanced" 743 | | "max"; 744 | const map: Record = { 745 | local: 10_000, 746 | cloud: 32_000, 747 | advanced: 100_000, 748 | max: 3_000_000, 749 | }; 750 | return map[preset]; 751 | } 752 | 753 | private finishContextProcessing(result: string): string { 754 | this.hideStatusBar(); 755 | return result; 756 | } 757 | 758 | private handleContextError( 759 | error: unknown, 760 | abortController: AbortController, 761 | ): string { 762 | this.hideStatusBar(); 763 | if (!abortController?.signal.aborted) { 764 | console.error("Error processing RAG:", error); 765 | new Notice( 766 | I18n.t("notices.errorProcessingRag", { 767 | message: (error as any).message, 768 | }), 769 | ); 770 | } 771 | return ""; 772 | } 773 | 774 | onunload() { 775 | document.removeEventListener("keydown", this.escapeHandler); 776 | window.clearInterval(this.updatingInterval); 777 | if (this.frameId !== null) { 778 | cancelAnimationFrame(this.frameId); 779 | } 780 | } 781 | 782 | async loadSettings() { 783 | const loadedData: LocalGPTSettings | undefined = await this.loadData(); 784 | const { settings, changed } = await this.migrateSettings(loadedData); 785 | 786 | this.settings = Object.assign({}, DEFAULT_SETTINGS, settings); 787 | 788 | if (changed) { 789 | await this.saveData(this.settings); 790 | } 791 | } 792 | 793 | // Legacy provider defaults used by older settings migrations 794 | private readonly legacyDefaultProviders = { 795 | ollama: { 796 | url: "http://localhost:11434", 797 | defaultModel: "gemma2", 798 | embeddingModel: "", 799 | type: "ollama", 800 | }, 801 | ollama_fallback: { 802 | url: "http://localhost:11434", 803 | defaultModel: "gemma2", 804 | embeddingModel: "", 805 | type: "ollama", 806 | }, 807 | openaiCompatible: { 808 | url: "http://localhost:8080/v1", 809 | apiKey: "", 810 | embeddingModel: "", 811 | type: "openaiCompatible", 812 | }, 813 | openaiCompatible_fallback: { 814 | url: "http://localhost:8080/v1", 815 | apiKey: "", 816 | embeddingModel: "", 817 | type: "openaiCompatible", 818 | }, 819 | } as const; 820 | 821 | private async migrateSettings( 822 | loadedData?: LocalGPTSettings, 823 | ): Promise<{ settings?: LocalGPTSettings; changed: boolean }> { 824 | if (!loadedData) { 825 | return { settings: loadedData, changed: false }; 826 | } 827 | 828 | let changed = false; 829 | changed = this.migrateToVersion2(loadedData) || changed; 830 | changed = this.migrateToVersion3(loadedData) || changed; 831 | changed = this.migrateToVersion4(loadedData) || changed; 832 | changed = this.migrateToVersion5(loadedData) || changed; 833 | changed = this.migrateToVersion6(loadedData) || changed; 834 | changed = (await this.migrateToVersion7(loadedData)) || changed; 835 | changed = this.migrateToVersion8(loadedData) || changed; 836 | 837 | return { settings: loadedData, changed }; 838 | } 839 | 840 | private migrateToVersion2(settings: LocalGPTSettings): boolean { 841 | if (settings._version && settings._version >= 1) { 842 | return false; 843 | } 844 | 845 | const providers: Record = JSON.parse( 846 | JSON.stringify(this.legacyDefaultProviders), 847 | ); 848 | 849 | (settings as any).providers = providers; 850 | (settings as any).providers.ollama.ollamaUrl = ( 851 | settings as any 852 | ).ollamaUrl; 853 | delete (settings as any).ollamaUrl; 854 | (settings as any).providers.ollama.defaultModel = ( 855 | settings as any 856 | ).defaultModel; 857 | delete (settings as any).defaultModel; 858 | (settings as any).providers.openaiCompatible && 859 | ((settings as any).providers.openaiCompatible.apiKey = ""); 860 | 861 | settings._version = 2; 862 | return true; 863 | } 864 | 865 | private migrateToVersion3(settings: LocalGPTSettings): boolean { 866 | if (settings._version && settings._version >= 3) { 867 | return false; 868 | } 869 | (settings as any).defaultProvider = 870 | (settings as any).selectedProvider || "ollama"; 871 | delete (settings as any).selectedProvider; 872 | 873 | const providers = (settings as any).providers; 874 | if (providers) { 875 | Object.keys(providers).forEach((key) => { 876 | providers[key].type = key; 877 | }); 878 | } 879 | 880 | settings._version = 3; 881 | return true; 882 | } 883 | 884 | private migrateToVersion4(settings: LocalGPTSettings): boolean { 885 | if (settings._version && settings._version >= 4) { 886 | return false; 887 | } 888 | 889 | (settings as any).defaults = { 890 | provider: (settings as any).defaultProvider || "ollama", 891 | fallbackProvider: (settings as any).fallbackProvider || "", 892 | creativity: "low", 893 | }; 894 | delete (settings as any).defaultProvider; 895 | delete (settings as any).fallbackProvider; 896 | 897 | settings._version = 4; 898 | return true; 899 | } 900 | 901 | private migrateToVersion5(settings: LocalGPTSettings): boolean { 902 | if (settings._version && settings._version >= 5) { 903 | return false; 904 | } 905 | 906 | const providers = (settings as any).providers; 907 | if (providers) { 908 | Object.keys(this.legacyDefaultProviders).forEach((provider) => { 909 | if (providers[provider]) { 910 | providers[provider].embeddingModel = ( 911 | this.legacyDefaultProviders as any 912 | )[provider].embeddingModel; 913 | } 914 | }); 915 | } 916 | 917 | settings._version = 5; 918 | setTimeout(() => { 919 | new Notice( 920 | `🎉 LocalGPT can finally use\ncontext from links!\nCheck the Settings!`, 921 | 0, 922 | ); 923 | }, 10000); 924 | return true; 925 | } 926 | 927 | private migrateToVersion6(settings: LocalGPTSettings): boolean { 928 | if (settings._version && settings._version >= 6) { 929 | return false; 930 | } 931 | 932 | const providers = (settings as any).providers; 933 | if (providers) { 934 | Object.keys(this.legacyDefaultProviders).forEach((provider) => { 935 | if (providers[provider]?.type === "ollama") { 936 | providers[provider].url = providers[provider].ollamaUrl; 937 | delete providers[provider].ollamaUrl; 938 | } 939 | if (providers[provider]?.type === "openaiCompatible") { 940 | providers[provider].url = 941 | providers[provider].url.replace(/\/+$/i, "") + "/v1"; 942 | } 943 | }); 944 | } 945 | 946 | settings._version = 6; 947 | return true; 948 | } 949 | 950 | private async migrateToVersion7( 951 | settings: LocalGPTSettings, 952 | ): Promise { 953 | if (settings._version && settings._version >= 7) { 954 | return false; 955 | } 956 | 957 | new Notice(I18n.t("notices.importantUpdate"), 0); 958 | const aiRequestWaiter = await waitForAI(); 959 | const aiProviders = await aiRequestWaiter.promise; 960 | 961 | settings.aiProviders = { 962 | main: null, 963 | embedding: null, 964 | vision: null, 965 | }; 966 | 967 | const oldProviders = (settings as any).providers; 968 | const oldDefaults = (settings as any).defaults; 969 | 970 | if (oldProviders && oldDefaults?.provider) { 971 | await this.migrateLegacyProviderConfig( 972 | settings, 973 | aiProviders, 974 | oldProviders, 975 | oldDefaults, 976 | ); 977 | } 978 | 979 | delete (settings as any).defaults; 980 | delete (settings as any).providers; 981 | 982 | settings._version = 7; 983 | return true; 984 | } 985 | 986 | private async migrateLegacyProviderConfig( 987 | settings: LocalGPTSettings, 988 | aiProviders: any, 989 | oldProviders: Record, 990 | oldDefaults: Record, 991 | ) { 992 | const provider = oldDefaults.provider; 993 | const typesMap: { [key: string]: string } = { 994 | ollama: "ollama", 995 | openaiCompatible: "openai", 996 | }; 997 | 998 | const providerConfig = oldProviders[provider]; 999 | if (!providerConfig) { 1000 | return; 1001 | } 1002 | const type = typesMap[providerConfig.type]; 1003 | await this.createMigratedProvider( 1004 | settings, 1005 | aiProviders, 1006 | provider, 1007 | providerConfig, 1008 | type, 1009 | "main", 1010 | providerConfig.defaultModel, 1011 | ); 1012 | await this.createMigratedProvider( 1013 | settings, 1014 | aiProviders, 1015 | provider, 1016 | providerConfig, 1017 | type, 1018 | "embedding", 1019 | providerConfig.embeddingModel, 1020 | ); 1021 | } 1022 | 1023 | private async createMigratedProvider( 1024 | settings: LocalGPTSettings, 1025 | aiProviders: any, 1026 | provider: string, 1027 | providerConfig: any, 1028 | type: string, 1029 | targetKey: "main" | "embedding", 1030 | model?: string, 1031 | ) { 1032 | if (!model) { 1033 | return; 1034 | } 1035 | let adjustedModel = model; 1036 | if (type === "ollama" && !adjustedModel.endsWith(":latest")) { 1037 | adjustedModel = `${adjustedModel}:latest`; 1038 | } 1039 | const id = `id-${Date.now().toString()}`; 1040 | const newProvider = await (aiProviders as any).migrateProvider({ 1041 | id, 1042 | name: 1043 | targetKey === "main" 1044 | ? `Local GPT ${provider}` 1045 | : `Local GPT ${provider} embeddings`, 1046 | apiKey: providerConfig.apiKey, 1047 | url: providerConfig.url, 1048 | type, 1049 | model: adjustedModel, 1050 | }); 1051 | 1052 | if (newProvider) { 1053 | settings.aiProviders[targetKey] = newProvider.id; 1054 | } 1055 | } 1056 | 1057 | private migrateToVersion8(settings: LocalGPTSettings): boolean { 1058 | if (settings._version && settings._version >= 8) { 1059 | return false; 1060 | } 1061 | 1062 | (settings as any).defaults = (settings as any).defaults || {}; 1063 | (settings as any).defaults.contextLimit = 1064 | (settings as any).defaults.contextLimit || "local"; 1065 | 1066 | settings._version = 8; 1067 | return true; 1068 | } 1069 | 1070 | async checkUpdates() { 1071 | try { 1072 | const { json: response } = await requestUrl({ 1073 | url: "https://api.github.com/repos/pfrankov/obsidian-local-gpt/releases/latest", 1074 | method: "GET", 1075 | headers: { 1076 | "Content-Type": "application/json", 1077 | }, 1078 | contentType: "application/json", 1079 | }); 1080 | 1081 | if (response.tag_name !== this.manifest.version) { 1082 | new Notice(I18n.t("notices.newVersion")); 1083 | } 1084 | } catch (error) { 1085 | console.error("Error checking for updates:", error); 1086 | } 1087 | } 1088 | 1089 | escapeHandler = (event: KeyboardEvent) => { 1090 | if (event.key === "Escape") { 1091 | this.abortControllers.forEach( 1092 | (abortControllers: AbortController) => { 1093 | abortControllers.abort(); 1094 | }, 1095 | ); 1096 | this.abortControllers = []; 1097 | } 1098 | }; 1099 | 1100 | reload() { 1101 | this.onunload(); 1102 | this.addCommands(); 1103 | this.abortControllers = []; 1104 | this.updatingInterval = window.setInterval( 1105 | this.checkUpdates.bind(this), 1106 | 10800000, 1107 | ); // every 3 hours 1108 | document.addEventListener("keydown", this.escapeHandler); 1109 | } 1110 | 1111 | async saveSettings() { 1112 | await this.saveData(this.settings); 1113 | this.reload(); 1114 | } 1115 | 1116 | private initializeProgress() { 1117 | this.totalProgressSteps = 0; 1118 | this.completedProgressSteps = 0; 1119 | this.currentPercentage = 0; 1120 | this.targetPercentage = 0; 1121 | this.displayedPercentage = 0; 1122 | this.baseSpeed = 0; 1123 | this.lastTargetUpdateTime = null; 1124 | this.lastFrameTime = null; 1125 | this.progressFinished = false; 1126 | this.stopAnimation(); 1127 | this.statusBarItem.show(); 1128 | this.updateStatusBar(); 1129 | } 1130 | 1131 | private addTotalProgressSteps(steps: number) { 1132 | this.totalProgressSteps += steps; 1133 | this.updateProgressBar(); 1134 | } 1135 | 1136 | private updateCompletedSteps(steps: number) { 1137 | this.completedProgressSteps += steps; 1138 | // Maintain invariant: total >= completed (dynamic totals may appear late) 1139 | if (this.completedProgressSteps > this.totalProgressSteps) { 1140 | this.totalProgressSteps = this.completedProgressSteps; 1141 | } 1142 | this.updateProgressBar(); 1143 | } 1144 | 1145 | private updateProgressBar() { 1146 | const newTarget = this.calculateTargetPercentage(); 1147 | if (newTarget === this.targetPercentage) { 1148 | return; 1149 | } 1150 | const now = performance.now(); 1151 | this.baseSpeed = this.calculateBaseSpeed(newTarget, now); 1152 | this.targetPercentage = newTarget; 1153 | this.lastTargetUpdateTime = now; 1154 | this.ensureAnimationLoop(); 1155 | } 1156 | 1157 | private calculateTargetPercentage(): number { 1158 | if (this.totalProgressSteps <= 0) { 1159 | return 0; 1160 | } 1161 | const ratio = Math.min( 1162 | this.completedProgressSteps / this.totalProgressSteps, 1163 | 1, 1164 | ); 1165 | return Math.floor(ratio * 100); 1166 | } 1167 | 1168 | private calculateBaseSpeed(newTarget: number, now: number): number { 1169 | if (this.lastTargetUpdateTime === null) { 1170 | return this.baseSpeed; 1171 | } 1172 | const dt = now - this.lastTargetUpdateTime; 1173 | const diff = newTarget - this.targetPercentage; 1174 | if (dt <= 0 || diff <= 0) { 1175 | return this.baseSpeed; 1176 | } 1177 | const instantaneous = diff / dt; 1178 | const blended = 1179 | this.baseSpeed === 0 1180 | ? instantaneous 1181 | : this.baseSpeed * 0.75 + instantaneous * 0.25; 1182 | 1183 | return Math.min(MAX_BASE_SPEED, Math.max(MIN_BASE_SPEED, blended)); 1184 | } 1185 | 1186 | private ensureAnimationLoop() { 1187 | if (this.frameId !== null) { 1188 | return; 1189 | } 1190 | this.lastFrameTime = null; 1191 | this.frameId = requestAnimationFrame(this.animationLoop); 1192 | } 1193 | 1194 | private updateStatusBar() { 1195 | const shown = this.progressFinished 1196 | ? this.currentPercentage 1197 | : Math.min(this.currentPercentage, 99); 1198 | this.statusBarItem.setAttr( 1199 | "data-text", 1200 | shown 1201 | ? I18n.t("statusBar.enhancingWithProgress", { 1202 | percent: String(shown), 1203 | }) 1204 | : I18n.t("statusBar.enhancing"), 1205 | ); 1206 | this.statusBarItem.setText(` `); 1207 | } 1208 | 1209 | private animationLoop = (time: number) => { 1210 | if (this.lastFrameTime === null) { 1211 | this.lastFrameTime = time; 1212 | } 1213 | const delta = time - this.lastFrameTime; 1214 | this.lastFrameTime = time; 1215 | const target = this.targetPercentage; 1216 | if (delta > 0 && this.displayedPercentage < target) { 1217 | let speed = this.baseSpeed; 1218 | if (speed === 0) { 1219 | // Initial guess: reach target in ~400ms 1220 | speed = (target - this.displayedPercentage) / 400; 1221 | } 1222 | this.displayedPercentage = Math.min( 1223 | target, 1224 | this.displayedPercentage + speed * delta, 1225 | ); 1226 | const rounded = Math.floor(this.displayedPercentage); 1227 | if (rounded !== this.currentPercentage) { 1228 | this.currentPercentage = rounded; 1229 | this.updateStatusBar(); 1230 | } 1231 | } 1232 | if (this.displayedPercentage >= target) { 1233 | this.displayedPercentage = target; 1234 | this.currentPercentage = target; 1235 | this.updateStatusBar(); 1236 | } 1237 | if ( 1238 | this.currentPercentage < this.targetPercentage || 1239 | this.displayedPercentage < this.targetPercentage 1240 | ) { 1241 | this.frameId = requestAnimationFrame(this.animationLoop); 1242 | return; 1243 | } 1244 | this.stopAnimation(); 1245 | }; 1246 | 1247 | private stopAnimation() { 1248 | if (this.frameId !== null) { 1249 | cancelAnimationFrame(this.frameId); 1250 | } 1251 | this.frameId = null; 1252 | this.lastFrameTime = null; 1253 | } 1254 | 1255 | private hideStatusBar() { 1256 | this.statusBarItem.hide(); 1257 | this.totalProgressSteps = 0; 1258 | this.completedProgressSteps = 0; 1259 | this.currentPercentage = 0; 1260 | this.targetPercentage = 0; 1261 | this.displayedPercentage = 0; 1262 | this.baseSpeed = 0; 1263 | this.lastTargetUpdateTime = null; 1264 | this.lastFrameTime = null; 1265 | this.progressFinished = false; 1266 | this.stopAnimation(); 1267 | } 1268 | 1269 | private markProgressFinished() { 1270 | if (this.progressFinished) { 1271 | return; 1272 | } 1273 | this.progressFinished = true; 1274 | this.currentPercentage = 100; 1275 | this.displayedPercentage = 100; 1276 | this.targetPercentage = 100; 1277 | this.updateStatusBar(); 1278 | } 1279 | } 1280 | --------------------------------------------------------------------------------