├── .editorconfig ├── .eslintignore ├── .eslintrc.json ├── .github ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── ci.yaml ├── .gitignore ├── .prettierrc.json ├── LICENSE ├── README.md ├── esbuild.config.mjs ├── jest.config.js ├── manifest.json ├── package.json ├── src ├── api │ ├── clients │ │ ├── gemini.ts │ │ ├── ollama.ts │ │ ├── openai-compatible.ts │ │ ├── openai.ts │ │ └── openrouter.ts │ ├── index.ts │ ├── prompts │ │ ├── chat │ │ │ ├── example1 │ │ │ │ ├── assistant.md │ │ │ │ └── user.md │ │ │ ├── example2 │ │ │ │ ├── assistant.md │ │ │ │ └── user.md │ │ │ ├── index.ts │ │ │ └── system.txt │ │ ├── completions │ │ │ ├── block-quote │ │ │ │ ├── example1 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── index.ts │ │ │ │ └── system.txt │ │ │ ├── code-block │ │ │ │ ├── example1 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── example2 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── example3 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── index.ts │ │ │ │ └── system.txt │ │ │ ├── heading │ │ │ │ ├── example1 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── example2 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── index.ts │ │ │ │ └── system.txt │ │ │ ├── list-item │ │ │ │ ├── example1 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── index.ts │ │ │ │ └── system.txt │ │ │ ├── math-block │ │ │ │ ├── example1 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── example2 │ │ │ │ │ ├── assistant.txt │ │ │ │ │ └── user.md │ │ │ │ ├── index.ts │ │ │ │ └── system.txt │ │ │ └── paragraph │ │ │ │ ├── example1 │ │ │ │ ├── assistant.txt │ │ │ │ └── user.md │ │ │ │ ├── index.ts │ │ │ │ └── system.txt │ │ ├── context.ts │ │ ├── generator.ts │ │ └── index.ts │ ├── providers │ │ ├── costs.ts │ │ ├── index.ts │ │ ├── models.ts │ │ ├── ollama.json │ │ ├── openai.json │ │ └── openrouter.json │ └── proxies │ │ ├── ignored-filter.ts │ │ ├── memory-cache.ts │ │ └── usage-monitor.ts ├── chat │ ├── App.tsx │ ├── components │ │ ├── ChatBox.tsx │ │ └── ChatItem.tsx │ └── view.tsx ├── editor │ ├── extension.ts │ ├── keymap.ts │ ├── listener.ts │ ├── state.ts │ └── view.ts ├── icons │ └── bot-off.svg ├── index.d.ts ├── main.ts ├── scripts │ └── scrape.ts ├── settings │ ├── index.ts │ ├── migrators │ │ ├── 1.1.0-1.2.0.test.ts │ │ ├── 1.1.0-1.2.0.ts │ │ ├── 1.2.0-1.2.5.test.ts │ │ ├── 1.2.0-1.2.5.ts │ │ └── index.ts │ ├── runner.ts │ ├── utils.ts │ └── versions │ │ ├── 1.1.0 │ │ ├── api │ │ │ └── openai.ts │ │ └── index.ts │ │ ├── 1.2.0 │ │ ├── api │ │ │ ├── index.ts │ │ │ └── providers │ │ │ │ ├── index.ts │ │ │ │ ├── models.ts │ │ │ │ ├── ollama.json │ │ │ │ ├── openai.json │ │ │ │ └── openrouter.json │ │ └── index.ts │ │ └── 1.2.5 │ │ ├── api │ │ ├── index.ts │ │ └── providers │ │ │ ├── index.ts │ │ │ ├── models.ts │ │ │ ├── ollama.json │ │ │ ├── openai.json │ │ │ └── openrouter.json │ │ └── index.ts └── utils.ts ├── styles.css ├── tsconfig.json ├── version-bump.mjs ├── versions.json └── yarn.lock /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | [*] 5 | charset = utf-8 6 | end_of_line = lf 7 | insert_final_newline = true 8 | indent_style = tab 9 | indent_size = 4 10 | tab_width = 4 11 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | 3 | main.js 4 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "parser": "@typescript-eslint/parser", 4 | "env": { "node": true }, 5 | "plugins": [ 6 | "@typescript-eslint" 7 | ], 8 | "extends": [ 9 | "eslint:recommended", 10 | "plugin:@typescript-eslint/eslint-recommended", 11 | "plugin:@typescript-eslint/recommended" 12 | ], 13 | "parserOptions": { 14 | "sourceType": "module" 15 | }, 16 | "rules": { 17 | "no-unused-vars": "off", 18 | "no-constant-condition": "off", 19 | "@typescript-eslint/no-non-null-assertion": "off", 20 | "@typescript-eslint/no-unused-vars": ["error", { "args": "none" }], 21 | "@typescript-eslint/ban-ts-comment": "off", 22 | "no-prototype-builtins": "off", 23 | "@typescript-eslint/no-empty-function": "off" 24 | } 25 | } -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Expected Behavior 2 | 3 | ## Actual Behavior 4 | 5 | ## Steps to Reproduce the Problem 6 | 7 | 1. 8 | 1. 9 | 1. 10 | 11 | ## Specifications 12 | 13 | - Version: 14 | - Platform: 15 | - Subsystem: 16 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Fixes # 2 | 3 | ## Proposed Changes 4 | 5 | - 6 | - 7 | - 8 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | 7 | permissions: 8 | contents: read 9 | packages: read 10 | 11 | jobs: 12 | typecheck: 13 | name: Typecheck 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Checkout 18 | id: checkout 19 | uses: actions/checkout@v4 20 | 21 | - name: Setup Node.js 22 | id: setup-node 23 | uses: actions/setup-node@v4 24 | with: 25 | node-version: 18 26 | 27 | - name: Install dependencies 28 | id: install 29 | run: yarn install 30 | 31 | - name: Typecheck 32 | id: typecheck 33 | run: yarn typecheck 34 | 35 | format: 36 | name: Format 37 | runs-on: ubuntu-latest 38 | 39 | steps: 40 | - name: Checkout 41 | id: checkout 42 | uses: actions/checkout@v4 43 | 44 | - name: Setup Node.js 45 | id: setup-node 46 | uses: actions/setup-node@v4 47 | with: 48 | node-version: 18 49 | 50 | - name: Install dependencies 51 | id: install 52 | run: yarn install 53 | 54 | - name: Run formatter 55 | id: format 56 | run: yarn format 57 | 58 | lint: 59 | name: Lint 60 | runs-on: ubuntu-latest 61 | 62 | steps: 63 | - name: Checkout 64 | id: checkout 65 | uses: actions/checkout@v4 66 | 67 | - name: Setup Node.js 68 | id: setup-node 69 | uses: actions/setup-node@v4 70 | with: 71 | node-version: 18 72 | 73 | - name: Install dependencies 74 | id: install 75 | run: yarn install 76 | 77 | - name: Run linter 78 | id: lint 79 | run: yarn lint 80 | 81 | test: 82 | name: Test 83 | runs-on: ubuntu-latest 84 | 85 | steps: 86 | - name: Checkout 87 | id: checkout 88 | uses: actions/checkout@v4 89 | 90 | - name: Setup Node.js 91 | id: setup-node 92 | uses: actions/setup-node@v4 93 | with: 94 | node-version: 18 95 | 96 | - name: Install dependencies 97 | id: install 98 | run: yarn install 99 | 100 | - name: Run tests 101 | id: test 102 | run: yarn test 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # vscode 2 | .vscode 3 | 4 | # Intellij 5 | *.iml 6 | .idea 7 | 8 | # npm 9 | node_modules 10 | 11 | # Don't include the compiled main.js file in the repo. 12 | # They should be uploaded to GitHub releases instead. 13 | main.js 14 | 15 | # Exclude sourcemaps 16 | *.map 17 | 18 | # obsidian 19 | data.json 20 | 21 | # Exclude macOS Finder (System Explorer) View States 22 | .DS_Store 23 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "trailingComma": "all", 3 | "tabWidth": 2, 4 | "semi": true, 5 | "singleQuote": true 6 | } 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Taichi Maeda 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤖 Markpilot: AI-powered inline completions and chat view for Obsidian 2 | 3 | ![workflow](https://github.com/taichimaeda/markpilot/actions/workflows/ci.yaml/badge.svg) 4 | ![semver](https://img.shields.io/badge/semver-1.2.2-blue) 5 | 6 | Markpilot is an Obsidian plugin that offers _inline completions_ features and _chat view_ in the sidebar. It aims to provide a similar experience to [GitHub Copilot](https://github.com/features/copilot) in Obsidian. 7 | 8 | Currently the plugin supports models provided by OpenAI API, OpenRouter API and **local models** by Ollama. We are planning to support more providers in the future, such as Gemini Pro API. 9 | 10 | There are plugins that provide similar features, such as [Obsidian Companion](https://github.com/rizerphe/obsidian-companion) and [Obsidian Copilot Autocompletion](https://github.com/j0rd1smit/obsidian-copilot-auto-completion) for AI-powered auto-completions, and [Obsidian Copilot](https://github.com/logancyang/obsidian-copilot) for chat UI. 11 | 12 | However, Markpilot is designed to be a _GitHub Copilot-flavored_ alternative that provides _both features_ in one plugin, with more sophisticated UI/UX, including: 13 | 14 | - Context-aware inline completions. 15 | - Detects the context of a Markdown content, and uses an optimised system prompt for each. 16 | - e.g. List, heading, code block 17 | - Detects the language of a Markdown code block, and enforces the model to use the same language. 18 | - e.g. Python, JavaScript 19 | - Advanced prompting techniques (Beta) 20 | - Context-aware system prompts. 21 | - Context-aware few-shot examples to guide the model to generate more accurate completions. 22 | - Carefully-designed user experience. 23 | - Force completions before waiting time by hitting `Tab` twice. 24 | - Reject completions by hitting `Esc` key. 25 | - Send a chat message by hitting `Enter`, add a new line by hitting `Shift + Enter`. 26 | - Usage limit feature to manage costs. 27 | - Fast in-memory caching to save costs. 28 | - Disable inline completions features by filename and tags. 29 | 30 | Markpilot also comes with a bar chart visualization of usage similar to [OpenAI API Platform](https://platform.openai.com/usage), and the fact that Markpilot offers both features in one plugin makes it a more convenient choice for users who want to manage their API usage in one place. 31 | 32 | Markpilot's chat view UI is heavily inspired by [GitHub Copilot for VSCode](https://code.visualstudio.com/docs/copilot/overview), and the CodeMirror extension by [codemirror-copilot](https://github.com/asadm/codemirror-copilot). Also I took inspirations from [Obsidian Copilot Autocompletion](https://github.com/j0rd1smit/obsidian-copilot-auto-completion) to implement the few-shot prompts feature. 33 | 34 | ## Demo 35 | 36 | ### Inline Completions 37 | 38 | [Inline Completions Demo](https://github.com/taichimaeda/markpilot/assets/28210288/5659c12b-22d2-4427-ad98-c4376c7718d8) 39 | 40 | ### Chat View 41 | 42 | [Chat View Demo](https://github.com/taichimaeda/markpilot/assets/28210288/a4ba56a9-9672-4560-a4a4-829a3cfeceed) 43 | 44 | ## Getting Started 45 | 46 | Markpilot currently supports OpenAI API, OpenRouter API and Ollama as providers for inline completions and chat view. 47 | 48 | ### Using OpenAI API 49 | 50 | First, you need to obtain the API key from [OpenAI API](https://platform.openai.com/docs/guides/authentication). 51 | 52 | 1. Install the plugin from the Obsidian community plugins. 53 | 2. Navigate to the plugin settings: 54 | 1. Under **Providers** > **OpenAI API Key**, enter your OpenAI API key. 55 | 2. Under **Inline completions** > **Provider**, select **OpenAI**. 56 | 3. Under **Inline completions** > **Model**, select the model you want to use (Recommended: `gpt-3.5-turbo`). 57 | 4. Repeat the same steps for the chat view settings under **Chat view**. 58 | 3. You're all set! Enjoy using Markpilot. 59 | 60 | ### Using OpenRouter API 61 | 62 | First, you need to obtain the API key from [OpenRouter API](https://openrouter.ai/keys). 63 | 64 | 1. Install the plugin from the Obsidian community plugins. 65 | 2. Navigate to the plugin settings: 66 | 1. Under **Providers** > **OpenRouter API Key**, enter your OpenRouter API key. 67 | 2. Under **Inline completions** > **Provider**, select **OpenRouter**. 68 | 3. Under **Inline completions** > **Model**, select the model you want to use (Recommended: `gpt-3.5-turbo`). 69 | 4. Repeat the same steps for the chat view settings under **Chat view**. 70 | 3. You're all set! Enjoy using Markpilot. 71 | 72 | ### Using Ollama (MacOS, Linux, Windows - Preview) 73 | 74 | First, download [Ollama](https://ollama.com/download) and follow the instructions to install it. 75 | 76 | Now you need to pull the local model of your choice from Ollama (Recommended: `llama2`). 77 | 78 | ```console 79 | $ ollama pull --model llama2 80 | ``` 81 | 82 | This will take some time. Once the model is downloaded, you can start the Ollama server: 83 | 84 | ```console 85 | $ ollama serve 86 | ``` 87 | 88 | If you are on MacOS, the server should start automatically when you login. 89 | If you are on Linux, you may need to configure the startup service manually: [Ollama on Linux](https://github.com/ollama/ollama/blob/main/docs/linux.md) 90 | 91 | Now you can install Markpilot and set it up to use Ollama: 92 | 93 | 1. Install the plugin from the Obsidian community plugins. 94 | 2. Navigate to the plugin settings: 95 | 1. Under **Providers**, click **Test Ollama Connection** and see if the Ollama server is running correctly. 96 | 2. Under **Inline completions** > **Provider**, select **Ollama**. 97 | 3. Under **Inline completions** > **Model**, select the model you want to use (Recommended: `llama2`). 98 | - Make sure to only select the same model you pulled from Ollama. 99 | 4. Repeat the same steps for the chat view settings under **Chat view**. 100 | 3. You're all set! Enjoy using Markpilot. 101 | 102 | ## Caveats 103 | 104 | If you use the OpenAI API or OpenRouter API, this plugin will send your content to the OpenAI API to generate completions. 105 | 106 | You should be cautious about sending sensitive information to the API, and be aware of the costs associated with using the API. The plugin provides a usage limit feature to help you manage your costs, but it is your responsibility to monitor your usage and costs. 107 | 108 | ## Features 109 | 110 | - Providers 111 | - Support for OpenAI API, OpenRouter API and **local** models available on Ollama. 112 | - Providers and models are customisable independently for inline completions and chat view. 113 | - Inline completions 114 | - Context-aware system prompts. 115 | - Context-aware few-shot examples to guide the model to generate more accurate completions. 116 | - Chat view 117 | - Open chat view from the sidebar. 118 | - Clear chat history from the command palette. 119 | - Stop chat response by clicking the stop button. 120 | - Caching 121 | - In-memory cache to save costs (will be cleared when Obsidian restarts). 122 | - Filtering 123 | - Disable inline completions features by filename (glob) and tags (regex). 124 | - Usage 125 | - Set a monthly usage limit to automatically disable features when the limit is reached. 126 | - Monitor costs in a graph from the settings tab. 127 | 128 | ## Frequently Asked Questions 129 | 130 | ### I can't accept completions by hitting `Tab`. 131 | 132 | Currently some extensions like [Obsidian Outliner](https://github.com/vslinko/obsidian-outliner) use the `Tab` key for their own purposes, which will conflict with Markpilot's completions. 133 | 134 | Although I could not find documentation on this, it seems like the keybindings are loaded in the order of when the plugins got enabled, so you can try disabling the conflicting plugin and enabling it again to prioritise Markpilot's keybindings. 135 | -------------------------------------------------------------------------------- /esbuild.config.mjs: -------------------------------------------------------------------------------- 1 | import builtins from "builtin-modules"; 2 | import esbuild from "esbuild"; 3 | import process from "process"; 4 | 5 | const banner = `/* 6 | THIS IS A GENERATED/BUNDLED FILE BY ESBUILD 7 | if you want to view the source, please visit the github repository of this plugin 8 | */ 9 | `; 10 | 11 | const prod = process.argv[2] === "production"; 12 | 13 | const context = await esbuild.context({ 14 | banner: { 15 | js: banner, 16 | }, 17 | entryPoints: ["src/main.ts"], 18 | bundle: true, 19 | external: [ 20 | "obsidian", 21 | "electron", 22 | "@codemirror/autocomplete", 23 | "@codemirror/collab", 24 | "@codemirror/commands", 25 | "@codemirror/language", 26 | "@codemirror/lint", 27 | "@codemirror/search", 28 | "@codemirror/state", 29 | "@codemirror/view", 30 | "@lezer/common", 31 | "@lezer/highlight", 32 | "@lezer/lr", 33 | ...builtins, 34 | ], 35 | platform: "node", 36 | format: "cjs", 37 | target: "es2018", 38 | logLevel: "info", 39 | sourcemap: prod ? false : "inline", 40 | treeShaking: true, 41 | outfile: "main.js", 42 | loader: { 43 | ".txt": "text", 44 | ".md": "text", 45 | ".svg": "text", // For custom icons, 46 | }, 47 | }); 48 | 49 | if (prod) { 50 | await context.rebuild(); 51 | process.exit(0); 52 | } else { 53 | await context.watch(); 54 | } 55 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 2 | module.exports = { 3 | preset: 'ts-jest', 4 | testEnvironment: 'node', 5 | roots: [''], 6 | modulePaths: [''], 7 | moduleDirectories: ['node_modules'], 8 | }; 9 | -------------------------------------------------------------------------------- /manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "markpilot", 3 | "name": "Markpilot", 4 | "version": "1.2.5", 5 | "minAppVersion": "1.5.7", 6 | "description": "Inline completions and chat view powered by OpenAI", 7 | "author": "Taichi Maeda", 8 | "authorUrl": "https://taichimaeda.github.io", 9 | "isDesktopOnly": true 10 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "markpilot", 3 | "version": "1.2.5", 4 | "description": "Inline completions and chat view powered by OpenAI", 5 | "main": "main.js", 6 | "scripts": { 7 | "dev": "node esbuild.config.mjs", 8 | "build": "tsc -noEmit -skipLibCheck && node esbuild.config.mjs production", 9 | "version": "node version-bump.mjs && git add manifest.json versions.json", 10 | "typecheck": "tsc --noEmit", 11 | "format": "prettier src --check", 12 | "format:fix": "prettier src --write", 13 | "lint": "eslint src --max-warnings 0", 14 | "lint:fix": "eslint src --max-warnings 0 --fix", 15 | "test": "jest", 16 | "scrape": "ts-node src/scripts/scrape.ts" 17 | }, 18 | "keywords": [], 19 | "author": "", 20 | "license": "MIT", 21 | "devDependencies": { 22 | "@types/jest": "^29.5.12", 23 | "@types/node": "^16.11.6", 24 | "@types/react": "^18.2.73", 25 | "@types/react-dom": "^18.2.22", 26 | "@typescript-eslint/eslint-plugin": "5.29.0", 27 | "@typescript-eslint/parser": "5.29.0", 28 | "axios": "^1.6.8", 29 | "builtin-modules": "3.3.0", 30 | "cheerio": "^1.0.0-rc.12", 31 | "commander": "^12.0.0", 32 | "esbuild": "0.17.3", 33 | "eslint": "^8.57.0", 34 | "prettier": "^3.2.5", 35 | "jest": "^29.7.0", 36 | "obsidian": "latest", 37 | "ts-jest": "^29.1.2", 38 | "ts-node": "^10.9.2", 39 | "tslib": "2.4.0", 40 | "typescript": "4.7.4" 41 | }, 42 | "dependencies": { 43 | "@codemirror/state": "^6.4.1", 44 | "@codemirror/view": "^6.26.0", 45 | "chart.js": "^4.4.2", 46 | "js-tiktoken": "^1.0.10", 47 | "lucide-react": "^0.363.0", 48 | "minimatch": "^9.0.4", 49 | "openai": "^4.30.0", 50 | "react": "^18.2.0", 51 | "react-dom": "^18.2.0", 52 | "react-markdown": "^9.0.1", 53 | "rehype-katex": "^7.0.0", 54 | "remark-math": "^6.0.0" 55 | } 56 | } -------------------------------------------------------------------------------- /src/api/clients/gemini.ts: -------------------------------------------------------------------------------- 1 | import { APIClient, ChatMessage } from '..'; 2 | 3 | // TODO: 4 | // Implement API client for Gemini. 5 | 6 | export class GeminiAPIClient implements APIClient { 7 | fetchChat(messages: ChatMessage[]): AsyncGenerator { 8 | throw new Error('Method not implemented.'); 9 | } 10 | fetchCompletions( 11 | prefix: string, 12 | suffix: string, 13 | ): Promise { 14 | throw new Error('Method not implemented.'); 15 | } 16 | testConnection(): Promise { 17 | throw new Error('Method not implemented.'); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/api/clients/ollama.ts: -------------------------------------------------------------------------------- 1 | import { Notice } from 'obsidian'; 2 | import OpenAI from 'openai'; 3 | import Markpilot from 'src/main'; 4 | import { validateURL } from 'src/utils'; 5 | import { APIClient } from '..'; 6 | import { PromptGenerator } from '../prompts/generator'; 7 | import { Provider } from '../providers'; 8 | import { CostsTracker } from '../providers/costs'; 9 | import { OpenAICompatibleAPIClient } from './openai-compatible'; 10 | 11 | export class OllamaAPIClient 12 | extends OpenAICompatibleAPIClient 13 | implements APIClient 14 | { 15 | constructor( 16 | generator: PromptGenerator, 17 | tracker: CostsTracker, 18 | plugin: Markpilot, 19 | ) { 20 | super(generator, tracker, plugin); 21 | } 22 | 23 | get provider(): Provider { 24 | return 'ollama'; 25 | } 26 | 27 | get openai(): OpenAI | undefined { 28 | const { settings } = this.plugin; 29 | 30 | const apiUrl = settings.providers.ollama.apiUrl; 31 | if (apiUrl === undefined) { 32 | new Notice('Ollama API URL is not set.'); 33 | return; 34 | } 35 | if (!validateURL(apiUrl)) { 36 | new Notice('Ollama API URL is invalid.'); 37 | return; 38 | } 39 | 40 | return new OpenAI({ 41 | baseURL: apiUrl, 42 | apiKey: 'ollama', // Required but ignored. 43 | dangerouslyAllowBrowser: true, 44 | }); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/api/clients/openai-compatible.ts: -------------------------------------------------------------------------------- 1 | import { getEncoding } from 'js-tiktoken'; 2 | import { Notice } from 'obsidian'; 3 | import OpenAI from 'openai'; 4 | import Markpilot from 'src/main'; 5 | import { APIClient, ChatMessage } from '..'; 6 | import { PromptGenerator } from '../prompts/generator'; 7 | import { Provider } from '../providers'; 8 | import { CostsTracker } from '../providers/costs'; 9 | import { DEFAULT_MODELS } from '../providers/models'; 10 | 11 | export abstract class OpenAICompatibleAPIClient implements APIClient { 12 | constructor( 13 | protected generator: PromptGenerator, 14 | protected tracker: CostsTracker, 15 | protected plugin: Markpilot, 16 | ) {} 17 | 18 | abstract get provider(): Provider; 19 | 20 | abstract get openai(): OpenAI | undefined; 21 | 22 | async *fetchChat(messages: ChatMessage[]) { 23 | if (this.openai === undefined) { 24 | return; 25 | } 26 | 27 | const { settings } = this.plugin; 28 | try { 29 | const prompt = this.generator.generateChatPrompt(messages); 30 | const modelTag = settings.chat.modelTag ?? ''; 31 | const stream = await this.openai.chat.completions.create({ 32 | messages: prompt, 33 | model: settings.chat.model + (modelTag !== '' ? `:${modelTag}` : ''), 34 | max_tokens: settings.chat.maxTokens, 35 | temperature: settings.chat.temperature, 36 | top_p: 1, 37 | n: 1, 38 | stream: true, 39 | }); 40 | 41 | const contents = []; 42 | for await (const chunk of stream) { 43 | const content = chunk.choices[0].delta.content ?? ''; 44 | contents.push(content); 45 | yield content; 46 | } 47 | 48 | // Update usage cost estimates. 49 | const enc = getEncoding('gpt2'); // Assume GPT-2 encoding 50 | const inputMessage = messages 51 | .map((message) => message.content) 52 | .join('\n'); 53 | const outputMessage = contents.join(''); 54 | const inputTokens = enc.encode(inputMessage).length; 55 | const outputTokens = enc.encode(outputMessage).length; 56 | await this.tracker.add( 57 | settings.chat.provider, 58 | settings.chat.model, 59 | inputTokens, 60 | outputTokens, 61 | ); 62 | } catch (error) { 63 | console.error(error); 64 | new Notice( 65 | 'Failed to fetch chat completions. Make sure your API key or API URL is correct.', 66 | ); 67 | } 68 | } 69 | 70 | async fetchCompletions(prefix: string, suffix: string) { 71 | if (this.openai === undefined) { 72 | return; 73 | } 74 | 75 | const { settings } = this.plugin; 76 | try { 77 | const prompt = this.generator.generateCompletionsPrompt(prefix, suffix); 78 | const modelTag = settings.completions.modelTag ?? ''; 79 | const completions = await this.openai.chat.completions.create({ 80 | messages: prompt, 81 | model: 82 | settings.completions.model + (modelTag !== '' ? `:${modelTag}` : ''), 83 | max_tokens: settings.completions.maxTokens, 84 | temperature: settings.completions.temperature, 85 | top_p: 1, 86 | n: 1, 87 | stop: ['\n\n\n'], 88 | }); 89 | 90 | // Update usage cost estimates. 91 | const inputTokens = completions.usage?.prompt_tokens ?? 0; 92 | const outputTokens = completions.usage?.completion_tokens ?? 0; 93 | await this.tracker.add( 94 | settings.completions.provider, 95 | settings.completions.model, 96 | inputTokens, 97 | outputTokens, 98 | ); 99 | 100 | const content = completions.choices[0].message.content; 101 | if (content === null) { 102 | return; 103 | } 104 | return this.generator.parseResponse(content); 105 | } catch (error) { 106 | console.error(error); 107 | new Notice( 108 | 'Failed to fetch completions. Make sure your API key or API URL is correct.', 109 | ); 110 | } 111 | } 112 | 113 | async testConnection() { 114 | if (this.openai === undefined) { 115 | return false; 116 | } 117 | 118 | try { 119 | const response = await this.openai.chat.completions.create({ 120 | messages: [ 121 | { 122 | role: 'user', 123 | content: 'Say this is a test', 124 | }, 125 | ], 126 | model: DEFAULT_MODELS[this.provider], 127 | max_tokens: 1, 128 | temperature: 0, 129 | top_p: 1, 130 | n: 1, 131 | }); 132 | 133 | return response.choices[0].message.content !== ''; 134 | } catch (error) { 135 | console.error(error); 136 | return false; 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/api/clients/openai.ts: -------------------------------------------------------------------------------- 1 | import { Notice } from 'obsidian'; 2 | import OpenAI from 'openai'; 3 | import Markpilot from 'src/main'; 4 | import { APIClient } from '..'; 5 | import { PromptGenerator } from '../prompts/generator'; 6 | import { Provider } from '../providers'; 7 | import { CostsTracker } from '../providers/costs'; 8 | import { OpenAICompatibleAPIClient } from './openai-compatible'; 9 | 10 | export class OpenAIAPIClient 11 | extends OpenAICompatibleAPIClient 12 | implements APIClient 13 | { 14 | constructor( 15 | generator: PromptGenerator, 16 | tracker: CostsTracker, 17 | plugin: Markpilot, 18 | ) { 19 | super(generator, tracker, plugin); 20 | } 21 | 22 | get provider(): Provider { 23 | return 'openai'; 24 | } 25 | 26 | get openai(): OpenAI | undefined { 27 | const { settings } = this.plugin; 28 | 29 | const apiKey = settings.providers.openai.apiKey; 30 | if (apiKey === undefined) { 31 | new Notice('OpenAI API key is not set.'); 32 | return; 33 | } 34 | if (!apiKey.startsWith('sk-')) { 35 | new Notice('OpenAI API key is invalid.'); 36 | return; 37 | } 38 | 39 | return new OpenAI({ 40 | apiKey, 41 | dangerouslyAllowBrowser: true, 42 | }); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/api/clients/openrouter.ts: -------------------------------------------------------------------------------- 1 | import { Notice } from 'obsidian'; 2 | import OpenAI from 'openai'; 3 | import Markpilot from 'src/main'; 4 | import { APIClient } from '..'; 5 | import { PromptGenerator } from '../prompts/generator'; 6 | import { Provider } from '../providers'; 7 | import { CostsTracker } from '../providers/costs'; 8 | import { OpenAICompatibleAPIClient } from './openai-compatible'; 9 | 10 | export class OpenRouterAPIClient 11 | extends OpenAICompatibleAPIClient 12 | implements APIClient 13 | { 14 | constructor( 15 | generator: PromptGenerator, 16 | tracker: CostsTracker, 17 | plugin: Markpilot, 18 | ) { 19 | super(generator, tracker, plugin); 20 | } 21 | 22 | get provider(): Provider { 23 | return 'openrouter'; 24 | } 25 | 26 | get openai(): OpenAI | undefined { 27 | const { settings } = this.plugin; 28 | 29 | const apiKey = settings.providers.openrouter.apiKey; 30 | if (apiKey === undefined) { 31 | new Notice('OpenRouter API key is not set.'); 32 | return; 33 | } 34 | if (!apiKey.startsWith('sk-or-')) { 35 | new Notice('OpenRouter API key is invalid.'); 36 | return; 37 | } 38 | 39 | return new OpenAI({ 40 | apiKey, 41 | baseURL: 'https://openrouter.ai/api/v1', 42 | dangerouslyAllowBrowser: true, 43 | }); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/api/index.ts: -------------------------------------------------------------------------------- 1 | export interface APIClient { 2 | fetchChat(messages: ChatMessage[]): AsyncGenerator; 3 | fetchCompletions(prefix: string, suffix: string): Promise; 4 | testConnection(): Promise; 5 | } 6 | 7 | export type ChatRole = 'system' | 'assistant' | 'user'; 8 | 9 | export interface ChatMessage { 10 | role: ChatRole; 11 | content: string; 12 | } 13 | 14 | export interface ChatHistory { 15 | messages: ChatMessage[]; 16 | response: string; 17 | } 18 | -------------------------------------------------------------------------------- /src/api/prompts/chat/example1/assistant.md: -------------------------------------------------------------------------------- 1 | Sure! Here is a Python implementation of Kadane's algorithm: 2 | 3 | ```python 4 | def kadane_algorithm(arr): 5 | max_sum = float('-inf') 6 | current_sum = 0 7 | 8 | for num in arr: 9 | current_sum = max(num, current_sum + num) 10 | max_sum = max(max_sum, current_sum) 11 | 12 | return max_sum 13 | 14 | # Example usage 15 | arr = [-2, 1, -3, 4, -1, 2, 1, -5, 4] 16 | print(kadane_algorithm(arr)) # Output: 6 17 | ``` 18 | 19 | This implementation finds the maximum sum of a contiguous subarray within the given array arr using Kadane's algorithm. 20 | -------------------------------------------------------------------------------- /src/api/prompts/chat/example1/user.md: -------------------------------------------------------------------------------- 1 | Show me a Python implementation of Kadane's algorithm. 2 | -------------------------------------------------------------------------------- /src/api/prompts/chat/example2/assistant.md: -------------------------------------------------------------------------------- 1 | The Navier-Stokes equation is given by: 2 | 3 | $$ 4 | \rho \left( \frac{\partial \textbf{u}}{\partial t} + \textbf{u} \cdot \nabla \textbf{u} \right) = - \nabla p + \mu \nabla^2 \textbf{u} + \textbf{f} 5 | $$ 6 | 7 | where: 8 | 9 | - $\rho$ is the fluid density, 10 | - $\textbf{u}$ is the fluid velocity vector, 11 | - $t$ is time, 12 | - $p$ is the pressure, 13 | - $\mu$ is the dynamic viscosity, 14 | - $\nabla$ is the gradient operator, 15 | - $\nabla^2$ is the Laplacian operator, and 16 | - $\textbf{f}$ is the external force per unit volume. 17 | -------------------------------------------------------------------------------- /src/api/prompts/chat/example2/user.md: -------------------------------------------------------------------------------- 1 | Show me the Navier-Stokes equation using LaTeX. 2 | -------------------------------------------------------------------------------- /src/api/prompts/chat/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '..'; 2 | import example1Assistant from './example1/assistant.md'; 3 | import example1User from './example1/user.md'; 4 | import example2Assistant from './example2/assistant.md'; 5 | import example2User from './example2/user.md'; 6 | import system from './system.txt'; 7 | 8 | export const CHAT_PROMPT: FewShotPrompt = { 9 | system, 10 | examples: [ 11 | { 12 | user: example1User, 13 | assistant: example1Assistant, 14 | }, 15 | { 16 | user: example2User, 17 | assistant: example2Assistant, 18 | }, 19 | ], 20 | }; 21 | -------------------------------------------------------------------------------- /src/api/prompts/chat/system.txt: -------------------------------------------------------------------------------- 1 | Answer the user's question. 2 | Code blocks must be formatted using triple backticks (```), and the language name must be specified. 3 | Math blocks must be formatted using double dollar signs ($$). 4 | Inline math must be formatted using single dollar signs ($). 5 | -------------------------------------------------------------------------------- /src/api/prompts/completions/block-quote/example1/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | English 3 | 4 | The quote is from Adam Smith's "The Wealth Of Nations". It is often used to describe the concept of the invisible hand in economics. 5 | 6 | led by an invisible hand to promote an end which was no part of his intention. 7 | > 8 | > The Wealth Of Nations, Book IV, Chapter V -------------------------------------------------------------------------------- /src/api/prompts/completions/block-quote/example1/user.md: -------------------------------------------------------------------------------- 1 | # Adam Smith's Invisible Hand 2 | 3 | Adam Smith, in his seminal work "The Wealth of Nations," coined the term "invisible hand" to describe the self-regulating nature of markets. 4 | 5 | > Every individual... neither intends to promote the public interest, nor knows how much he is promoting it... he intends only his own security; and by directing that industry in such a manner as its produce may be of the greatest value, he intends only his own gain, and he is in this, as in many other cases, 6 | -------------------------------------------------------------------------------- /src/api/prompts/completions/block-quote/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '../..'; 2 | import example1Assistant from './example1/assistant.txt'; 3 | import example1User from './example1/user.md'; 4 | import system from './system.txt'; 5 | 6 | export const BLOCK_QUOTE_PROMPT: FewShotPrompt = { 7 | system, 8 | examples: [ 9 | { 10 | user: example1User, 11 | assistant: example1Assistant, 12 | }, 13 | ], 14 | }; 15 | -------------------------------------------------------------------------------- /src/api/prompts/completions/block-quote/system.txt: -------------------------------------------------------------------------------- 1 | Complete the most suitable text at the location of the . 2 | The is located within a Markdown block quote. 3 | Your answer must complete this quote in a way that fits the context of the surrounding text. 4 | Your answer must be written in the same language as the surrounding text. 5 | Your answer must not overlap with any text adjacent to the . 6 | Your answer must have the following format: 7 | 8 | Here, you write the language of your response e.g. English, Chinese, TypeScript, Python. 9 | 10 | Here, you reason about the answer, using the 80/20 rule for clarity and conciseness. 11 | 12 | Here, you write the text that should be inserted at the location of the . -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/example1/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | C 3 | 4 | This paragraph explains the strcmp function in C, which is used for comparing two strings. 5 | 6 | strcmp -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/example1/user.md: -------------------------------------------------------------------------------- 1 | # String Comparison 2 | 3 | In C, `` function compares two strings and returns an integer value based on the comparison. 4 | The function compares the two strings character by character until it finds a difference or reaches the end of one of the strings. 5 | -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/example2/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | JavaScript 3 | 4 | This JavaScript function should sort an array of numbers in ascending order using the bubble sort algorithm. 5 | The bubble sort algorithm works by comparing each element in the array with the element next to it, and swapping them if they are in the wrong order. 6 | In this specific case the code should use 2 spaces for indentation. 7 | 8 | for (let i = 0; i < array.length; i++) { 9 | for (let j = 0; j < array.length; j++) { 10 | if (array[j] > array[j + 1]) { 11 | [array[j], array[j + 1]] = [array[j + 1], array[j]]; 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/example2/user.md: -------------------------------------------------------------------------------- 1 | # Bubble Sort 2 | 3 | Bubble sort is a simple sorting algorithm that repeatedly steps through the list, compares adjacent elements and swaps them if they are in the wrong order. 4 | 5 | ```js 6 | function bubbleSort(array) { 7 | 8 | return array; 9 | } 10 | ``` 11 | -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/example3/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | Python 3 | 4 | This Python function should take an integer as input and determine if it is a prime number. 5 | In this specific case the code should use 4 spaces for indentation. 6 | 7 | n: int -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/example3/user.md: -------------------------------------------------------------------------------- 1 | ```python 2 | def is_prime() -> int: 3 | if n < 2: 4 | return False 5 | i = 2 6 | while i * i <= n: 7 | if n % i == 0: 8 | return False 9 | i += 1 10 | return True 11 | ``` 12 | -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '../..'; 2 | import example1Assistant from './example1/assistant.txt'; 3 | import example1User from './example1/user.md'; 4 | import example2Assistant from './example2/assistant.txt'; 5 | import example2User from './example2/user.md'; 6 | import example3Assistant from './example3/assistant.txt'; 7 | import example3User from './example3/user.md'; 8 | import system from './system.txt'; 9 | 10 | export const CODE_BLOCK_PROMPT: FewShotPrompt = { 11 | system, 12 | examples: [ 13 | { 14 | user: example1User, 15 | assistant: example1Assistant, 16 | }, 17 | { 18 | user: example2User, 19 | assistant: example2Assistant, 20 | }, 21 | { 22 | user: example3User, 23 | assistant: example3Assistant, 24 | }, 25 | ], 26 | }; 27 | -------------------------------------------------------------------------------- /src/api/prompts/completions/code-block/system.txt: -------------------------------------------------------------------------------- 1 | Complete the most suitable text at the location of the . 2 | The is located within a Markdown codeblock, written in the language {{LANGUAGE}}. 3 | Your answer must complete this code block in the language {{LANGUAGE}}. 4 | Your answer must not complete any text outside this code block. 5 | Your answer must not overlap with any text adjacent to the . 6 | Your answer must have the following format: 7 | 8 | Here, you write the language of your response e.g. English, Chinese, TypeScript, Python. 9 | 10 | Here, you reason about the answer, using the 80/20 rule for clarity and conciseness. 11 | 12 | Here, you write the text that should be inserted at the location of the . -------------------------------------------------------------------------------- /src/api/prompts/completions/heading/example1/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | English 3 | 4 | This is a list of the plays written by William Shakespeare, so the missing title "William Shakespeare" should be inserted and nothing after it. 5 | 6 | William Shakespeare -------------------------------------------------------------------------------- /src/api/prompts/completions/heading/example1/user.md: -------------------------------------------------------------------------------- 1 | # 2 | 3 | ## Plays 4 | 5 | ### The Tragedy of Macbeth 6 | 7 | The Tragedy of Macbeth is a play by William Shakespeare. It is believed to have been written between 1603 and 1607. 8 | 9 | ### Romeo and Juliet 10 | 11 | Romeo and Juliet is a play by William Shakespeare. It is one of his most famous works and is often considered one of the greatest love stories ever told. 12 | -------------------------------------------------------------------------------- /src/api/prompts/completions/heading/example2/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | English 3 | 4 | This paragraph summarizes the main points of the First Punic War. The title starts with "First" and ends with the year "264-241 BC", so the missing word "Punic War" should be inserted. 5 | 6 | Punic War -------------------------------------------------------------------------------- /src/api/prompts/completions/heading/example2/user.md: -------------------------------------------------------------------------------- 1 | # Punic Wars 2 | 3 | ## First <>, 264-241 BC 4 | 5 | The First Punic War was fought between Rome and Carthage from 264 to 241 BCE. It was the first of three major wars fought between the two powers. The war was fought over control of Sicily, a large island located to the south of Italy. The war ended with a Roman victory and the signing of a peace treaty that saw Carthage cede control of Sicily to Rome. 6 | -------------------------------------------------------------------------------- /src/api/prompts/completions/heading/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '../..'; 2 | import example1Assistant from './example1/assistant.txt'; 3 | import example1User from './example1/user.md'; 4 | import example2Assistant from './example2/assistant.txt'; 5 | import example2User from './example2/user.md'; 6 | import system from './system.txt'; 7 | 8 | export const HEADING_PROMPT: FewShotPrompt = { 9 | system, 10 | examples: [ 11 | { 12 | user: example1User, 13 | assistant: example1Assistant, 14 | }, 15 | { 16 | user: example2User, 17 | assistant: example2Assistant, 18 | }, 19 | ], 20 | }; 21 | -------------------------------------------------------------------------------- /src/api/prompts/completions/heading/system.txt: -------------------------------------------------------------------------------- 1 | Complete the most suitable text at the location of the . 2 | The is located within a Markdown heading. 3 | Your answer must complete the title for this heading that fits the context of the surrounding text. 4 | Your answer must be written in the same language as the surrounding text. 5 | Your answer must not overlap with any text adjacent to the . 6 | Your answer must have the following format: 7 | 8 | Here, you write the language of your response e.g. English, Chinese, TypeScript, Python. 9 | 10 | Here, you reason about the answer, using the 80/20 rule for clarity and conciseness. 11 | 12 | Here, you write the text that should be inserted at the location of the . -------------------------------------------------------------------------------- /src/api/prompts/completions/list-item/example1/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | English 3 | 4 | This is a list of the ACID principles in database management. The "Consistency" and "Isolation" principles are missing and so must be inserted in a consistent format. 5 | 6 | Consistency: Transactions maintain database validity by transitioning it between consistent states. 7 | - Isolation: Transactions execute independently, preventing interference between concurrent operations. -------------------------------------------------------------------------------- /src/api/prompts/completions/list-item/example1/user.md: -------------------------------------------------------------------------------- 1 | # The ACID Principle 2 | 3 | The ACID principle in database management ensures transaction reliability: 4 | 5 | - Atomicity: Transactions are all-or-nothing, guaranteeing data integrity. 6 | - 7 | - Durability: Committed transactions persist even through system failures. 8 | -------------------------------------------------------------------------------- /src/api/prompts/completions/list-item/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '../..'; 2 | import example1Assistant from './example1/assistant.txt'; 3 | import example1User from './example1/user.md'; 4 | import system from './system.txt'; 5 | 6 | export const LIST_ITEM_PROMPT: FewShotPrompt = { 7 | system, 8 | examples: [ 9 | { 10 | user: example1User, 11 | assistant: example1Assistant, 12 | }, 13 | ], 14 | }; 15 | -------------------------------------------------------------------------------- /src/api/prompts/completions/list-item/system.txt: -------------------------------------------------------------------------------- 1 | Complete the most suitable text at the location of the . 2 | The is located within a Markdown list item. 3 | Your answer must complete one or multiple list items for this list that fits the context of the surrounding text. 4 | Your answer must not complete any text that is not part of this list. 5 | Your answer must be written in the same language as the surrounding text. 6 | Your answer must not overlap with any text adjacent to the . 7 | Your answer must have the following format: 8 | 9 | Here, you write the language of your response e.g. English, Chinese, TypeScript, Python. 10 | 11 | Here, you reason about the answer, using the 80/20 rule for clarity and conciseness. 12 | 13 | Here, you write the text that should be inserted at the location of the . -------------------------------------------------------------------------------- /src/api/prompts/completions/math-block/example1/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | LaTeX 3 | 4 | This is an example of the contrapositive of a statement. 5 | In this case, the original statement is "If P, then Q." The contrapositive is "If not Q, then not P.", which is written in LaTeX as $\neg Q\implies\neg P$. 6 | 7 | \neg Q\implies\neg P -------------------------------------------------------------------------------- /src/api/prompts/completions/math-block/example1/user.md: -------------------------------------------------------------------------------- 1 | # Contraposition 2 | 3 | The contraposition of a statement is the statement formed by negating both the hypothesis and conclusion of the original statement and then interchanging them. 4 | 5 | For instance, the contraposition of a statement $P\implies Q$, is "". 6 | -------------------------------------------------------------------------------- /src/api/prompts/completions/math-block/example2/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | LaTeX 3 | 4 | This is the formula for the standard deviation of a sample. It should be the LaTeX formula representing the square root of the average of the squared differences between each data point and the mean. 5 | 6 | \sqrt{\frac{\sum_{i=1}^{n}(x_i - \mu)^2}{n}} -------------------------------------------------------------------------------- /src/api/prompts/completions/math-block/example2/user.md: -------------------------------------------------------------------------------- 1 | # Standard Deviation 2 | 3 | The standard deviation is given by the formula: 4 | 5 | $$ 6 | \sigma = 7 | $$ 8 | 9 | where $\mu$ is the mean of the observations, $n$ is the number of observations and $x_i$ is the value of the $i$-th observation. 10 | -------------------------------------------------------------------------------- /src/api/prompts/completions/math-block/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '../..'; 2 | import example1Assistant from './example1/assistant.txt'; 3 | import example1User from './example1/user.md'; 4 | import example2Assistant from './example2/assistant.txt'; 5 | import example2User from './example2/user.md'; 6 | import system from './system.txt'; 7 | 8 | export const MATH_BLOCK_PROMPT: FewShotPrompt = { 9 | system, 10 | examples: [ 11 | { 12 | user: example1User, 13 | assistant: example1Assistant, 14 | }, 15 | { 16 | user: example2User, 17 | assistant: example2Assistant, 18 | }, 19 | ], 20 | }; 21 | -------------------------------------------------------------------------------- /src/api/prompts/completions/math-block/system.txt: -------------------------------------------------------------------------------- 1 | Complete the most suitable text at the location of the . 2 | The is located within a Markdown math block. 3 | Your answer must only contain LaTeX code that captures the math discussed in the surrounding text. 4 | Your answer must not contain any text that is not part of the LaTeX code. 5 | Your answer must be written in the same language as the surrounding text. 6 | Your answer must not overlap with any text adjacent to the . 7 | Your answer must have the following format: 8 | 9 | Here, you write the language of your response e.g. English, Chinese, TypeScript, Python. 10 | 11 | Here, you reason about the answer, using the 80/20 rule for clarity and conciseness. 12 | 13 | Here, you write the text that should be inserted at the location of the . -------------------------------------------------------------------------------- /src/api/prompts/completions/paragraph/example1/assistant.txt: -------------------------------------------------------------------------------- 1 | 2 | English 3 | 4 | This paragraph explains about the pigeonhole principle. If there are more pigeons than pigeonholes, then at least one pigeonhole must contain at least two pigeons 5 | 6 | at least one pigeonhole must contain at least two pigeons -------------------------------------------------------------------------------- /src/api/prompts/completions/paragraph/example1/user.md: -------------------------------------------------------------------------------- 1 | # Pigeonhole Principle 2 | 3 | The pigeonhole principle states that if you have more pigeons than pigeonholes, then . This principle is often used in combinatorics to prove that a certain number of objects must be placed in a certain number of containers. 4 | -------------------------------------------------------------------------------- /src/api/prompts/completions/paragraph/index.ts: -------------------------------------------------------------------------------- 1 | import { FewShotPrompt } from '../..'; 2 | import example1Assistant from './example1/assistant.txt'; 3 | import example1User from './example1/user.md'; 4 | import system from './system.txt'; 5 | 6 | export const PARAGRAPH_PROMPT: FewShotPrompt = { 7 | system, 8 | examples: [ 9 | { 10 | user: example1User, 11 | assistant: example1Assistant, 12 | }, 13 | ], 14 | }; 15 | -------------------------------------------------------------------------------- /src/api/prompts/completions/paragraph/system.txt: -------------------------------------------------------------------------------- 1 | Complete the most suitable text at the location of the . 2 | The is located within a Markdown paragraph. 3 | Your answer must complete one or multiple sentences to this paragraph that fit the surrounding text. 4 | Your answer must be written in the same language as the surrounding text. 5 | Your answer must not overlap with any text adjacent to the . 6 | Your answer must have the following format: 7 | 8 | Here, you write the language of your response e.g. English, Chinese, TypeScript, Python. 9 | 10 | Here, you reason about the answer, using the 80/20 rule for clarity and conciseness. 11 | 12 | Here, you write the text that should be inserted at the location of the . -------------------------------------------------------------------------------- /src/api/prompts/context.ts: -------------------------------------------------------------------------------- 1 | // NOTE: 2 | // This context detection module is heavily inspired by `j0rd1smit/obsidian-copilot-auto-completion`: 3 | // https://github.com/j0rd1smit/obsidian-copilot-auto-completion/blob/32912133b3eea43b8bfca94258ce2ca55445b2ce/src/context_detection.ts 4 | 5 | // NOTE: 6 | // Unicode character \uFFFF is not a valid character 7 | // so we use it to represent the cursor position, assuming the user does not intentionally copy and paste it. 8 | const CURSOR_CHAR = '\uFFFF'; 9 | 10 | const HEADER_REGEX = /^#+\s.*\uFFFF.*$/gm; 11 | const UNORDERED_LIST_REGEX = /^\s*(-|\*)\s.*\uFFFF.*$/gm; 12 | const TASK_LIST_REGEX = /^\s*(-|[0-9]+\.) +\[.\]\s.*\uFFFF.*$/gm; 13 | const BLOCK_QUOTES_REGEX = /^\s*>.*\uFFFF.*$/gm; 14 | const NUMBERED_LIST_REGEX = /^\s*\d+\.\s.*\uFFFF.*$/gm; 15 | const MATH_BLOCK_REGEX = /\$\$[\s\S]*?\$\$/g; 16 | const INLINE_MATH_BLOCK_REGEX = /\$[\s\S]*?\$/g; 17 | const CODE_BLOCK_REGEX = /```(?.*)[\s\S]*?```/g; 18 | const INLINE_CODE_BLOCK_REGEX = /`.*`/g; 19 | 20 | export const CONTEXTS = [ 21 | 'heading', 22 | 'paragraph', 23 | 'list-item', 24 | 'block-quote', 25 | 'math-block', 26 | 'code-block', 27 | ] as const; 28 | 29 | export const CONTEXTS_NAMES: Record = { 30 | heading: 'Heading', 31 | paragraph: 'Paragraph', 32 | 'list-item': 'List Item', 33 | 'block-quote': 'Block Quote', 34 | 'math-block': 'Math Block', 35 | 'code-block': 'Code Block', 36 | }; 37 | 38 | export type Context = (typeof CONTEXTS)[number]; 39 | 40 | export function getContext(prefix: string, suffix: string): Context { 41 | const text = prefix + CURSOR_CHAR + suffix; 42 | if (HEADER_REGEX.test(text)) { 43 | return 'heading'; 44 | } 45 | if (BLOCK_QUOTES_REGEX.test(text)) { 46 | return 'block-quote'; 47 | } 48 | if ( 49 | NUMBERED_LIST_REGEX.test(text) || 50 | UNORDERED_LIST_REGEX.test(text) || 51 | TASK_LIST_REGEX.test(text) 52 | ) { 53 | return 'list-item'; 54 | } 55 | if ( 56 | isCursorInBlock(text, MATH_BLOCK_REGEX) || 57 | isCursorInBlock(text, INLINE_MATH_BLOCK_REGEX) 58 | ) { 59 | return 'math-block'; 60 | } 61 | if ( 62 | isCursorInBlock(text, CODE_BLOCK_REGEX) || 63 | isCursorInBlock(text, INLINE_CODE_BLOCK_REGEX) 64 | ) { 65 | return 'code-block'; 66 | } 67 | 68 | return 'paragraph'; 69 | } 70 | 71 | export function getLanguage(prefix: string, suffix: string): string { 72 | const text = prefix + CURSOR_CHAR + suffix; 73 | if (!isCursorInBlock(text, CODE_BLOCK_REGEX)) { 74 | throw new Error('Cursor is not in a code block'); 75 | } 76 | 77 | const match = text.match(CODE_BLOCK_REGEX); 78 | const language = match?.groups?.language ?? 'plaintext'; 79 | return `${language}code-block`; 80 | } 81 | 82 | function isCursorInBlock(text: string, regex: RegExp): boolean { 83 | const blocks = text.match(regex) as string[] | null; 84 | if (blocks === null) { 85 | return false; 86 | } 87 | return blocks.some((block) => block.includes(CURSOR_CHAR)); 88 | } 89 | -------------------------------------------------------------------------------- /src/api/prompts/generator.ts: -------------------------------------------------------------------------------- 1 | import Markpilot from 'src/main'; 2 | import { FewShotPrompt } from '.'; 3 | import { ChatMessage } from '..'; 4 | import { CHAT_PROMPT } from './chat'; 5 | import { BLOCK_QUOTE_PROMPT } from './completions/block-quote'; 6 | import { CODE_BLOCK_PROMPT } from './completions/code-block'; 7 | import { HEADING_PROMPT } from './completions/heading'; 8 | import { LIST_ITEM_PROMPT } from './completions/list-item'; 9 | import { MATH_BLOCK_PROMPT } from './completions/math-block'; 10 | import { PARAGRAPH_PROMPT } from './completions/paragraph'; 11 | import { Context, getContext, getLanguage } from './context'; 12 | 13 | const COMPLETIONS_PROMPTS: Record = { 14 | heading: HEADING_PROMPT, 15 | paragraph: PARAGRAPH_PROMPT, 16 | 'list-item': LIST_ITEM_PROMPT, 17 | 'block-quote': BLOCK_QUOTE_PROMPT, 18 | 'math-block': MATH_BLOCK_PROMPT, 19 | 'code-block': CODE_BLOCK_PROMPT, 20 | }; 21 | 22 | export class PromptGenerator { 23 | constructor(private plugin: Markpilot) {} 24 | 25 | generateChatPrompt(messages: ChatMessage[]) { 26 | const prompt = CHAT_PROMPT; 27 | const system = prompt.system; 28 | 29 | return [ 30 | { 31 | role: 'system', 32 | content: system, 33 | }, 34 | ...this.makeChatExamples(), 35 | ...messages, 36 | ] as ChatMessage[]; 37 | } 38 | 39 | generateCompletionsPrompt(prefix: string, suffix: string) { 40 | const { settings } = this.plugin; 41 | 42 | const context = getContext(prefix, suffix); 43 | const prompt = COMPLETIONS_PROMPTS[context]; 44 | const system = 45 | context === 'code-block' 46 | ? prompt.system.replace('{{LANGUAGE}}', getLanguage(prefix, suffix)!) 47 | : prompt.system; 48 | 49 | const windowSize = settings.completions.windowSize; 50 | const truncatedPrefix = prefix.slice( 51 | prefix.length - windowSize / 2, 52 | prefix.length, 53 | ); 54 | const truncatedSuffix = suffix.slice(0, windowSize / 2); 55 | 56 | return [ 57 | { 58 | role: 'system', 59 | content: system, 60 | }, 61 | ...this.makeCompletionsExamples(prefix, suffix), 62 | { 63 | role: 'user', 64 | content: `${truncatedPrefix}${truncatedSuffix}`, 65 | }, 66 | ] as ChatMessage[]; 67 | } 68 | 69 | makeChatExamples() { 70 | const { settings } = this.plugin; 71 | 72 | if (!settings.chat.fewShot) { 73 | return []; 74 | } 75 | 76 | const prompt = CHAT_PROMPT; 77 | return prompt.examples.flatMap((example) => [ 78 | { 79 | role: 'user', 80 | content: example.user, 81 | }, 82 | { 83 | role: 'assistant', 84 | content: example.assistant, 85 | }, 86 | ]); 87 | } 88 | 89 | makeCompletionsExamples(prefix: string, suffix: string) { 90 | const { settings } = this.plugin; 91 | 92 | if (!settings.completions.fewShot) { 93 | return []; 94 | } 95 | 96 | const context = getContext(prefix, suffix); 97 | const prompt = COMPLETIONS_PROMPTS[context]; 98 | return prompt.examples.flatMap((example) => [ 99 | { 100 | role: 'user', 101 | content: example.user, 102 | }, 103 | { 104 | role: 'assistant', 105 | content: example.assistant, 106 | }, 107 | ]); 108 | } 109 | 110 | parseResponse(content: string) { 111 | const lines = content.split('\n'); 112 | return lines.slice(lines.indexOf('') + 1).join('\n'); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/api/prompts/index.ts: -------------------------------------------------------------------------------- 1 | export interface FewShotPrompt { 2 | system: string; 3 | examples: FewShotExample[]; 4 | } 5 | 6 | export interface FewShotExample { 7 | user: string; 8 | assistant: string; 9 | } 10 | -------------------------------------------------------------------------------- /src/api/providers/costs.ts: -------------------------------------------------------------------------------- 1 | import Markpilot from 'src/main'; 2 | import { getThisMonthAsString, getTodayAsString } from 'src/utils'; 3 | import { Provider } from '.'; 4 | import { Model } from './models'; 5 | import OllamaModelsJSON from './ollama.json'; 6 | import OpenAIModelsJSON from './openai.json'; 7 | import OpenRouterModelsJSON from './openrouter.json'; 8 | 9 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 10 | const ModelsJSON: Record = { 11 | ollama: OllamaModelsJSON, 12 | openrouter: OpenRouterModelsJSON, 13 | openai: OpenAIModelsJSON, 14 | }; 15 | 16 | export class CostsTracker { 17 | constructor(private plugin: Markpilot) {} 18 | 19 | async add( 20 | provider: Provider, 21 | model: Model | Model, 22 | inputTokens: number, 23 | outputTokens: number, 24 | ) { 25 | const { settings } = this.plugin; 26 | 27 | const today = getTodayAsString(); 28 | const thisMonth = getThisMonthAsString(); 29 | if (settings.usage.dailyCosts[today] === undefined) { 30 | settings.usage.dailyCosts[today] = 0; 31 | } 32 | 33 | const cost = 34 | (inputTokens * ModelsJSON[provider][model].inputCost + 35 | outputTokens * ModelsJSON[provider][model].outputCost) / 36 | // Costs are stored in per 1M token. 37 | 1_000_000; 38 | 39 | settings.usage.dailyCosts[today] += cost; 40 | settings.usage.monthlyCosts[thisMonth] += cost; 41 | 42 | // TODO: 43 | // Only save settings before unload. 44 | await this.plugin.saveSettings(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/api/providers/index.ts: -------------------------------------------------------------------------------- 1 | export type Provider = (typeof PROVIDERS)[number]; 2 | 3 | export const PROVIDERS = ['openai', 'openrouter', 'ollama'] as const; 4 | 5 | export const PROVIDERS_NAMES: Record = { 6 | openai: 'OpenAI', 7 | openrouter: 'OpenRouter', 8 | ollama: 'Ollama', 9 | }; 10 | 11 | export const DEFAULT_PROVIDER = 'openai' as Provider; 12 | -------------------------------------------------------------------------------- /src/api/providers/models.ts: -------------------------------------------------------------------------------- 1 | import { getObjectKeys } from 'src/utils'; 2 | import { Provider } from '.'; 3 | import OllamaModelsJSON from './ollama.json'; 4 | import OpenAIModelsJSON from './openai.json'; 5 | import OpenRouterModelsJSON from './openrouter.json'; 6 | 7 | export type OpenAIModel = keyof typeof OpenAIModelsJSON; 8 | export type OpenRouterModel = keyof typeof OpenRouterModelsJSON; 9 | export type OllamaModel = keyof typeof OllamaModelsJSON; 10 | 11 | export type Model = OpenAIModel | OpenRouterModel | OllamaModel; 12 | 13 | export const OPENAI_MODELS = getObjectKeys(OpenAIModelsJSON); 14 | export const OPENROUTER_MODELS = getObjectKeys(OpenRouterModelsJSON); 15 | export const OLLAMA_MODELS = getObjectKeys(OllamaModelsJSON); 16 | 17 | export const MODELS = { 18 | openai: OPENAI_MODELS, 19 | openrouter: OPENROUTER_MODELS, 20 | ollama: OLLAMA_MODELS, 21 | }; 22 | 23 | export const DEFAULT_MODELS: Record = { 24 | openai: 'gpt-3.5-turbo', 25 | openrouter: 'openai/gpt-3.5-turbo', 26 | ollama: 'llama2', 27 | }; 28 | -------------------------------------------------------------------------------- /src/api/providers/ollama.json: -------------------------------------------------------------------------------- 1 | { 2 | "llama3": { 3 | "inputCost": 0, 4 | "outputCost": 0 5 | }, 6 | "codestral": { 7 | "inputCost": 0, 8 | "outputCost": 0 9 | }, 10 | "phi3": { 11 | "inputCost": 0, 12 | "outputCost": 0 13 | }, 14 | "aya": { 15 | "inputCost": 0, 16 | "outputCost": 0 17 | }, 18 | "mistral": { 19 | "inputCost": 0, 20 | "outputCost": 0 21 | }, 22 | "gemma": { 23 | "inputCost": 0, 24 | "outputCost": 0 25 | }, 26 | "mixtral": { 27 | "inputCost": 0, 28 | "outputCost": 0 29 | }, 30 | "llama2": { 31 | "inputCost": 0, 32 | "outputCost": 0 33 | }, 34 | "codegemma": { 35 | "inputCost": 0, 36 | "outputCost": 0 37 | }, 38 | "command-r": { 39 | "inputCost": 0, 40 | "outputCost": 0 41 | }, 42 | "command-r-plus": { 43 | "inputCost": 0, 44 | "outputCost": 0 45 | }, 46 | "llava": { 47 | "inputCost": 0, 48 | "outputCost": 0 49 | }, 50 | "qwen": { 51 | "inputCost": 0, 52 | "outputCost": 0 53 | }, 54 | "codellama": { 55 | "inputCost": 0, 56 | "outputCost": 0 57 | }, 58 | "dolphin-mixtral": { 59 | "inputCost": 0, 60 | "outputCost": 0 61 | }, 62 | "llama2-uncensored": { 63 | "inputCost": 0, 64 | "outputCost": 0 65 | }, 66 | "deepseek-coder": { 67 | "inputCost": 0, 68 | "outputCost": 0 69 | }, 70 | "nomic-embed-text": { 71 | "inputCost": 0, 72 | "outputCost": 0 73 | }, 74 | "mistral-openorca": { 75 | "inputCost": 0, 76 | "outputCost": 0 77 | }, 78 | "dolphin-mistral": { 79 | "inputCost": 0, 80 | "outputCost": 0 81 | }, 82 | "phi": { 83 | "inputCost": 0, 84 | "outputCost": 0 85 | }, 86 | "orca-mini": { 87 | "inputCost": 0, 88 | "outputCost": 0 89 | }, 90 | "zephyr": { 91 | "inputCost": 0, 92 | "outputCost": 0 93 | }, 94 | "nous-hermes2": { 95 | "inputCost": 0, 96 | "outputCost": 0 97 | }, 98 | "starcoder2": { 99 | "inputCost": 0, 100 | "outputCost": 0 101 | }, 102 | "llama2-chinese": { 103 | "inputCost": 0, 104 | "outputCost": 0 105 | }, 106 | "vicuna": { 107 | "inputCost": 0, 108 | "outputCost": 0 109 | }, 110 | "wizard-vicuna-uncensored": { 111 | "inputCost": 0, 112 | "outputCost": 0 113 | }, 114 | "dolphin-llama3": { 115 | "inputCost": 0, 116 | "outputCost": 0 117 | }, 118 | "yi": { 119 | "inputCost": 0, 120 | "outputCost": 0 121 | }, 122 | "mxbai-embed-large": { 123 | "inputCost": 0, 124 | "outputCost": 0 125 | }, 126 | "wizardlm2": { 127 | "inputCost": 0, 128 | "outputCost": 0 129 | }, 130 | "tinyllama": { 131 | "inputCost": 0, 132 | "outputCost": 0 133 | }, 134 | "starcoder": { 135 | "inputCost": 0, 136 | "outputCost": 0 137 | }, 138 | "openhermes": { 139 | "inputCost": 0, 140 | "outputCost": 0 141 | }, 142 | "openchat": { 143 | "inputCost": 0, 144 | "outputCost": 0 145 | }, 146 | "tinydolphin": { 147 | "inputCost": 0, 148 | "outputCost": 0 149 | }, 150 | "wizardcoder": { 151 | "inputCost": 0, 152 | "outputCost": 0 153 | }, 154 | "stable-code": { 155 | "inputCost": 0, 156 | "outputCost": 0 157 | }, 158 | "neural-chat": { 159 | "inputCost": 0, 160 | "outputCost": 0 161 | }, 162 | "wizard-math": { 163 | "inputCost": 0, 164 | "outputCost": 0 165 | }, 166 | "phind-codellama": { 167 | "inputCost": 0, 168 | "outputCost": 0 169 | }, 170 | "starling-lm": { 171 | "inputCost": 0, 172 | "outputCost": 0 173 | }, 174 | "stablelm2": { 175 | "inputCost": 0, 176 | "outputCost": 0 177 | }, 178 | "dolphincoder": { 179 | "inputCost": 0, 180 | "outputCost": 0 181 | }, 182 | "nous-hermes": { 183 | "inputCost": 0, 184 | "outputCost": 0 185 | }, 186 | "falcon\n \n \n Archive": { 187 | "inputCost": 0, 188 | "outputCost": 0 189 | }, 190 | "codeqwen": { 191 | "inputCost": 0, 192 | "outputCost": 0 193 | }, 194 | "sqlcoder": { 195 | "inputCost": 0, 196 | "outputCost": 0 197 | }, 198 | "orca2": { 199 | "inputCost": 0, 200 | "outputCost": 0 201 | }, 202 | "dolphin-phi": { 203 | "inputCost": 0, 204 | "outputCost": 0 205 | }, 206 | "solar": { 207 | "inputCost": 0, 208 | "outputCost": 0 209 | }, 210 | "deepseek-llm": { 211 | "inputCost": 0, 212 | "outputCost": 0 213 | }, 214 | "yarn-llama2": { 215 | "inputCost": 0, 216 | "outputCost": 0 217 | }, 218 | "llama3-gradient": { 219 | "inputCost": 0, 220 | "outputCost": 0 221 | }, 222 | "xwinlm": { 223 | "inputCost": 0, 224 | "outputCost": 0 225 | }, 226 | "all-minilm": { 227 | "inputCost": 0, 228 | "outputCost": 0 229 | }, 230 | "samantha-mistral": { 231 | "inputCost": 0, 232 | "outputCost": 0 233 | }, 234 | "bakllava": { 235 | "inputCost": 0, 236 | "outputCost": 0 237 | }, 238 | "wizardlm": { 239 | "inputCost": 0, 240 | "outputCost": 0 241 | }, 242 | "stable-beluga": { 243 | "inputCost": 0, 244 | "outputCost": 0 245 | }, 246 | "medllama2": { 247 | "inputCost": 0, 248 | "outputCost": 0 249 | }, 250 | "wizardlm-uncensored": { 251 | "inputCost": 0, 252 | "outputCost": 0 253 | }, 254 | "nous-hermes2-mixtral": { 255 | "inputCost": 0, 256 | "outputCost": 0 257 | }, 258 | "llama3-chatqa": { 259 | "inputCost": 0, 260 | "outputCost": 0 261 | }, 262 | "yarn-mistral": { 263 | "inputCost": 0, 264 | "outputCost": 0 265 | }, 266 | "codeup": { 267 | "inputCost": 0, 268 | "outputCost": 0 269 | }, 270 | "llama-pro": { 271 | "inputCost": 0, 272 | "outputCost": 0 273 | }, 274 | "everythinglm": { 275 | "inputCost": 0, 276 | "outputCost": 0 277 | }, 278 | "meditron": { 279 | "inputCost": 0, 280 | "outputCost": 0 281 | }, 282 | "llava-llama3": { 283 | "inputCost": 0, 284 | "outputCost": 0 285 | }, 286 | "nexusraven": { 287 | "inputCost": 0, 288 | "outputCost": 0 289 | }, 290 | "stablelm-zephyr": { 291 | "inputCost": 0, 292 | "outputCost": 0 293 | }, 294 | "magicoder": { 295 | "inputCost": 0, 296 | "outputCost": 0 297 | }, 298 | "codebooga": { 299 | "inputCost": 0, 300 | "outputCost": 0 301 | }, 302 | "snowflake-arctic-embed": { 303 | "inputCost": 0, 304 | "outputCost": 0 305 | }, 306 | "mistrallite": { 307 | "inputCost": 0, 308 | "outputCost": 0 309 | }, 310 | "moondream": { 311 | "inputCost": 0, 312 | "outputCost": 0 313 | }, 314 | "wizard-vicuna": { 315 | "inputCost": 0, 316 | "outputCost": 0 317 | }, 318 | "duckdb-nsql": { 319 | "inputCost": 0, 320 | "outputCost": 0 321 | }, 322 | "goliath": { 323 | "inputCost": 0, 324 | "outputCost": 0 325 | }, 326 | "open-orca-platypus2": { 327 | "inputCost": 0, 328 | "outputCost": 0 329 | }, 330 | "megadolphin": { 331 | "inputCost": 0, 332 | "outputCost": 0 333 | }, 334 | "notux": { 335 | "inputCost": 0, 336 | "outputCost": 0 337 | }, 338 | "llava-phi3": { 339 | "inputCost": 0, 340 | "outputCost": 0 341 | }, 342 | "notus": { 343 | "inputCost": 0, 344 | "outputCost": 0 345 | }, 346 | "dbrx": { 347 | "inputCost": 0, 348 | "outputCost": 0 349 | }, 350 | "granite-code": { 351 | "inputCost": 0, 352 | "outputCost": 0 353 | }, 354 | "falcon2": { 355 | "inputCost": 0, 356 | "outputCost": 0 357 | }, 358 | "alfred": { 359 | "inputCost": 0, 360 | "outputCost": 0 361 | }, 362 | "deepseek-v2": { 363 | "inputCost": 0, 364 | "outputCost": 0 365 | } 366 | } 367 | -------------------------------------------------------------------------------- /src/api/providers/openai.json: -------------------------------------------------------------------------------- 1 | { 2 | "gpt-4-turbo": { 3 | "inputCost": 10, 4 | "outputCost": 30 5 | }, 6 | "gpt-4": { 7 | "inputCost": 30, 8 | "outputCost": 60 9 | }, 10 | "gpt-4-32k": { 11 | "inputCost": 60, 12 | "outputCost": 120 13 | }, 14 | "gpt-3.5-turbo": { 15 | "inputCost": 0.5, 16 | "outputCost": 1.5 17 | }, 18 | "gpt-3.5-turbo-instruct": { 19 | "inputCost": 1.5, 20 | "outputCost": 2 21 | }, 22 | "gpt-4o": { 23 | "inputCost": 5, 24 | "outputCost": 15 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/api/providers/openrouter.json: -------------------------------------------------------------------------------- 1 | { 2 | "": { 3 | "inputCost": null, 4 | "outputCost": null 5 | }, 6 | "mistralai/mistral-7b-instruct": { 7 | "inputCost": 0.07, 8 | "outputCost": 0.07 9 | }, 10 | "mistralai/mistral-7b-instruct-v0.3": { 11 | "inputCost": 0.07, 12 | "outputCost": 0.07 13 | }, 14 | "nousresearch/hermes-2-pro-llama-3-8b": { 15 | "inputCost": 0.15, 16 | "outputCost": 0.15 17 | }, 18 | "microsoft/phi-3-mini-128k-instruct:free": { 19 | "inputCost": 0, 20 | "outputCost": 0 21 | }, 22 | "microsoft/phi-3-mini-128k-instruct": { 23 | "inputCost": 0.1, 24 | "outputCost": 0.1 25 | }, 26 | "microsoft/phi-3-medium-128k-instruct:free": { 27 | "inputCost": 0, 28 | "outputCost": 0 29 | }, 30 | "microsoft/phi-3-medium-128k-instruct": { 31 | "inputCost": 1, 32 | "outputCost": 1 33 | }, 34 | "neversleep/llama-3-lumimaid-70b": { 35 | "inputCost": 3.375, 36 | "outputCost": 4.5 37 | }, 38 | "google/gemini-flash-1.5": { 39 | "inputCost": 0.25, 40 | "outputCost": 0.75 41 | }, 42 | "perplexity/llama-3-sonar-small-32k-chat": { 43 | "inputCost": 0.2, 44 | "outputCost": 0.2 45 | }, 46 | "perplexity/llama-3-sonar-small-32k-online": { 47 | "inputCost": 0.2, 48 | "outputCost": 0.2 49 | }, 50 | "perplexity/llama-3-sonar-large-32k-chat": { 51 | "inputCost": 1, 52 | "outputCost": 1 53 | }, 54 | "perplexity/llama-3-sonar-large-32k-online": { 55 | "inputCost": 1, 56 | "outputCost": 1 57 | }, 58 | "deepseek/deepseek-chat": { 59 | "inputCost": 0.14, 60 | "outputCost": 0.28 61 | }, 62 | "deepseek/deepseek-coder": { 63 | "inputCost": 0.14, 64 | "outputCost": 0.28 65 | }, 66 | "meta-llama/llama-3-8b": { 67 | "inputCost": 0.18, 68 | "outputCost": 0.18 69 | }, 70 | "meta-llama/llama-3-70b": { 71 | "inputCost": 0.81, 72 | "outputCost": 0.81 73 | }, 74 | "openai/gpt-4o": { 75 | "inputCost": 5, 76 | "outputCost": 15 77 | }, 78 | "openai/gpt-4o-2024-05-13": { 79 | "inputCost": 5, 80 | "outputCost": 15 81 | }, 82 | "meta-llama/llama-guard-2-8b": { 83 | "inputCost": 0.2, 84 | "outputCost": 0.2 85 | }, 86 | "liuhaotian/llava-yi-34b": { 87 | "inputCost": 0.9, 88 | "outputCost": 0.9 89 | }, 90 | "allenai/olmo-7b-instruct": { 91 | "inputCost": 0.18, 92 | "outputCost": 0.18 93 | }, 94 | "qwen/qwen-110b-chat": { 95 | "inputCost": 1.62, 96 | "outputCost": 1.62 97 | }, 98 | "qwen/qwen-14b-chat": { 99 | "inputCost": 0.27, 100 | "outputCost": 0.27 101 | }, 102 | "qwen/qwen-7b-chat": { 103 | "inputCost": 0.18, 104 | "outputCost": 0.18 105 | }, 106 | "qwen/qwen-4b-chat": { 107 | "inputCost": 0.09, 108 | "outputCost": 0.09 109 | }, 110 | "qwen/qwen-72b-chat": { 111 | "inputCost": 0.9, 112 | "outputCost": 0.9 113 | }, 114 | "qwen/qwen-32b-chat": { 115 | "inputCost": 0.75, 116 | "outputCost": 0.75 117 | }, 118 | "meta-llama/llama-3-8b-instruct:free": { 119 | "inputCost": 0, 120 | "outputCost": 0 121 | }, 122 | "neversleep/llama-3-lumimaid-8b": { 123 | "inputCost": 0.2006, 124 | "outputCost": 1.125 125 | }, 126 | "neversleep/llama-3-lumimaid-8b:extended": { 127 | "inputCost": 0.2006, 128 | "outputCost": 1.125 129 | }, 130 | "snowflake/snowflake-arctic-instruct": { 131 | "inputCost": 2.16, 132 | "outputCost": 2.16 133 | }, 134 | "fireworks/firellava-13b": { 135 | "inputCost": 0.2, 136 | "outputCost": 0.2 137 | }, 138 | "lynn/soliloquy-l3": { 139 | "inputCost": 0.05, 140 | "outputCost": 0.05 141 | }, 142 | "sao10k/fimbulvetr-11b-v2": { 143 | "inputCost": 0.375, 144 | "outputCost": 1.5 145 | }, 146 | "meta-llama/llama-3-8b-instruct:extended": { 147 | "inputCost": 0.2006, 148 | "outputCost": 1.125 149 | }, 150 | "meta-llama/llama-3-8b-instruct:nitro": { 151 | "inputCost": 0.2, 152 | "outputCost": 0.2 153 | }, 154 | "meta-llama/llama-3-70b-instruct:nitro": { 155 | "inputCost": 0.9, 156 | "outputCost": 0.9 157 | }, 158 | "meta-llama/llama-3-8b-instruct": { 159 | "inputCost": 0.07, 160 | "outputCost": 0.07 161 | }, 162 | "meta-llama/llama-3-70b-instruct": { 163 | "inputCost": 0.59, 164 | "outputCost": 0.79 165 | }, 166 | "mistralai/mixtral-8x22b-instruct": { 167 | "inputCost": 0.65, 168 | "outputCost": 0.65 169 | }, 170 | "microsoft/wizardlm-2-8x22b": { 171 | "inputCost": 0.65, 172 | "outputCost": 0.65 173 | }, 174 | "microsoft/wizardlm-2-7b": { 175 | "inputCost": 0.07, 176 | "outputCost": 0.07 177 | }, 178 | "undi95/toppy-m-7b:nitro": { 179 | "inputCost": 0.07, 180 | "outputCost": 0.07 181 | }, 182 | "mistralai/mixtral-8x22b": { 183 | "inputCost": 0.9, 184 | "outputCost": 0.9 185 | }, 186 | "openai/gpt-4-turbo": { 187 | "inputCost": 10, 188 | "outputCost": 30 189 | }, 190 | "google/gemini-pro-1.5": { 191 | "inputCost": 2.5, 192 | "outputCost": 7.5 193 | }, 194 | "cohere/command-r-plus": { 195 | "inputCost": 3, 196 | "outputCost": 15 197 | }, 198 | "databricks/dbrx-instruct": { 199 | "inputCost": 1.08, 200 | "outputCost": 1.08 201 | }, 202 | "sophosympatheia/midnight-rose-70b": { 203 | "inputCost": 9, 204 | "outputCost": 9 205 | }, 206 | "cohere/command": { 207 | "inputCost": 1, 208 | "outputCost": 2 209 | }, 210 | "cohere/command-r": { 211 | "inputCost": 0.5, 212 | "outputCost": 1.5 213 | }, 214 | "anthropic/claude-3-haiku": { 215 | "inputCost": 0.25, 216 | "outputCost": 1.25 217 | }, 218 | "anthropic/claude-3-haiku:beta": { 219 | "inputCost": 0.25, 220 | "outputCost": 1.25 221 | }, 222 | "google/gemma-7b-it:nitro": { 223 | "inputCost": 0.2, 224 | "outputCost": 0.2 225 | }, 226 | "mistralai/mistral-7b-instruct:nitro": { 227 | "inputCost": 0.07, 228 | "outputCost": 0.07 229 | }, 230 | "mistralai/mixtral-8x7b-instruct:nitro": { 231 | "inputCost": 0.54, 232 | "outputCost": 0.54 233 | }, 234 | "meta-llama/llama-2-70b-chat:nitro": { 235 | "inputCost": 0.9, 236 | "outputCost": 0.9 237 | }, 238 | "gryphe/mythomax-l2-13b:nitro": { 239 | "inputCost": 0.2, 240 | "outputCost": 0.2 241 | }, 242 | "anthropic/claude-3-opus": { 243 | "inputCost": 15, 244 | "outputCost": 75 245 | }, 246 | "anthropic/claude-3-sonnet": { 247 | "inputCost": 3, 248 | "outputCost": 15 249 | }, 250 | "anthropic/claude-3-opus:beta": { 251 | "inputCost": 15, 252 | "outputCost": 75 253 | }, 254 | "anthropic/claude-3-sonnet:beta": { 255 | "inputCost": 3, 256 | "outputCost": 15 257 | }, 258 | "mistralai/mistral-large": { 259 | "inputCost": 8, 260 | "outputCost": 24 261 | }, 262 | "google/gemma-7b-it:free": { 263 | "inputCost": 0, 264 | "outputCost": 0 265 | }, 266 | "google/gemma-7b-it": { 267 | "inputCost": 0.07, 268 | "outputCost": 0.07 269 | }, 270 | "nousresearch/nous-hermes-2-mistral-7b-dpo": { 271 | "inputCost": 0.18, 272 | "outputCost": 0.18 273 | }, 274 | "anthropic/claude-2:beta": { 275 | "inputCost": 8, 276 | "outputCost": 24 277 | }, 278 | "anthropic/claude-2.0:beta": { 279 | "inputCost": 8, 280 | "outputCost": 24 281 | }, 282 | "anthropic/claude-2.1:beta": { 283 | "inputCost": 8, 284 | "outputCost": 24 285 | }, 286 | "anthropic/claude-instant-1:beta": { 287 | "inputCost": 0.8, 288 | "outputCost": 2.4 289 | }, 290 | "openai/gpt-3.5-turbo-0125": { 291 | "inputCost": 0.5, 292 | "outputCost": 1.5 293 | }, 294 | "codellama/codellama-70b-instruct": { 295 | "inputCost": 0.81, 296 | "outputCost": 0.81 297 | }, 298 | "recursal/eagle-7b": { 299 | "inputCost": 0, 300 | "outputCost": 0 301 | }, 302 | "openai/gpt-4-turbo-preview": { 303 | "inputCost": 10, 304 | "outputCost": 30 305 | }, 306 | "undi95/remm-slerp-l2-13b:extended": { 307 | "inputCost": 1.125, 308 | "outputCost": 1.125 309 | }, 310 | "nousresearch/nous-hermes-2-mixtral-8x7b-sft": { 311 | "inputCost": 0.54, 312 | "outputCost": 0.54 313 | }, 314 | "nousresearch/nous-hermes-2-mixtral-8x7b-dpo": { 315 | "inputCost": 0.27, 316 | "outputCost": 0.27 317 | }, 318 | "mistralai/mistral-tiny": { 319 | "inputCost": 0.25, 320 | "outputCost": 0.25 321 | }, 322 | "mistralai/mistral-small": { 323 | "inputCost": 2, 324 | "outputCost": 6 325 | }, 326 | "mistralai/mistral-medium": { 327 | "inputCost": 2.7, 328 | "outputCost": 8.1 329 | }, 330 | "austism/chronos-hermes-13b": { 331 | "inputCost": 0.13, 332 | "outputCost": 0.13 333 | }, 334 | "nousresearch/nous-hermes-yi-34b": { 335 | "inputCost": 0.72, 336 | "outputCost": 0.72 337 | }, 338 | "neversleep/noromaid-mixtral-8x7b-instruct": { 339 | "inputCost": 8, 340 | "outputCost": 8 341 | }, 342 | "mistralai/mistral-7b-instruct-v0.2": { 343 | "inputCost": 0.07, 344 | "outputCost": 0.07 345 | }, 346 | "cognitivecomputations/dolphin-mixtral-8x7b": { 347 | "inputCost": 0.5, 348 | "outputCost": 0.5 349 | }, 350 | "google/gemini-pro": { 351 | "inputCost": 0.125, 352 | "outputCost": 0.375 353 | }, 354 | "google/gemini-pro-vision": { 355 | "inputCost": 0.125, 356 | "outputCost": 0.375 357 | }, 358 | "mistralai/mixtral-8x7b": { 359 | "inputCost": 0.54, 360 | "outputCost": 0.54 361 | }, 362 | "mistralai/mixtral-8x7b-instruct": { 363 | "inputCost": 0.24, 364 | "outputCost": 0.24 365 | }, 366 | "rwkv/rwkv-5-world-3b": { 367 | "inputCost": 0, 368 | "outputCost": 0 369 | }, 370 | "recursal/rwkv-5-3b-ai-town": { 371 | "inputCost": 0, 372 | "outputCost": 0 373 | }, 374 | "togethercomputer/stripedhyena-nous-7b": { 375 | "inputCost": 0.18, 376 | "outputCost": 0.18 377 | }, 378 | "togethercomputer/stripedhyena-hessian-7b": { 379 | "inputCost": 0.18, 380 | "outputCost": 0.18 381 | }, 382 | "koboldai/psyfighter-13b-2": { 383 | "inputCost": 1, 384 | "outputCost": 1 385 | }, 386 | "01-ai/yi-34b-chat": { 387 | "inputCost": 0.72, 388 | "outputCost": 0.72 389 | }, 390 | "01-ai/yi-34b": { 391 | "inputCost": 0.72, 392 | "outputCost": 0.72 393 | }, 394 | "01-ai/yi-6b": { 395 | "inputCost": 0.126, 396 | "outputCost": 0.126 397 | }, 398 | "nousresearch/nous-hermes-2-vision-7b": { 399 | "inputCost": 10, 400 | "outputCost": 10 401 | }, 402 | "nousresearch/nous-capybara-7b:free": { 403 | "inputCost": 0, 404 | "outputCost": 0 405 | }, 406 | "nousresearch/nous-capybara-7b": { 407 | "inputCost": 0.18, 408 | "outputCost": 0.18 409 | }, 410 | "openchat/openchat-7b:free": { 411 | "inputCost": 0, 412 | "outputCost": 0 413 | }, 414 | "openchat/openchat-7b": { 415 | "inputCost": 0.07, 416 | "outputCost": 0.07 417 | }, 418 | "gryphe/mythomist-7b:free": { 419 | "inputCost": 0, 420 | "outputCost": 0 421 | }, 422 | "neversleep/noromaid-20b": { 423 | "inputCost": 1.5, 424 | "outputCost": 2.25 425 | }, 426 | "gryphe/mythomist-7b": { 427 | "inputCost": 0.375, 428 | "outputCost": 0.375 429 | }, 430 | "intel/neural-chat-7b": { 431 | "inputCost": 5, 432 | "outputCost": 5 433 | }, 434 | "anthropic/claude-2": { 435 | "inputCost": 8, 436 | "outputCost": 24 437 | }, 438 | "anthropic/claude-2.1": { 439 | "inputCost": 8, 440 | "outputCost": 24 441 | }, 442 | "teknium/openhermes-2.5-mistral-7b": { 443 | "inputCost": 0.17, 444 | "outputCost": 0.17 445 | }, 446 | "liuhaotian/llava-13b": { 447 | "inputCost": 10, 448 | "outputCost": 10 449 | }, 450 | "nousresearch/nous-capybara-34b": { 451 | "inputCost": 0.9, 452 | "outputCost": 0.9 453 | }, 454 | "openai/gpt-4-vision-preview": { 455 | "inputCost": 10, 456 | "outputCost": 30 457 | }, 458 | "lizpreciatior/lzlv-70b-fp16-hf": { 459 | "inputCost": 0.59, 460 | "outputCost": 0.79 461 | }, 462 | "undi95/toppy-m-7b:free": { 463 | "inputCost": 0, 464 | "outputCost": 0 465 | }, 466 | "alpindale/goliath-120b": { 467 | "inputCost": 9.375, 468 | "outputCost": 9.375 469 | }, 470 | "undi95/toppy-m-7b": { 471 | "inputCost": 0.07, 472 | "outputCost": 0.07 473 | }, 474 | "openrouter/auto": { 475 | "inputCost": null, 476 | "outputCost": null 477 | }, 478 | "huggingfaceh4/zephyr-7b-beta:free": { 479 | "inputCost": 0, 480 | "outputCost": 0 481 | }, 482 | "google/palm-2-chat-bison-32k": { 483 | "inputCost": 0.25, 484 | "outputCost": 0.5 485 | }, 486 | "google/palm-2-codechat-bison-32k": { 487 | "inputCost": 0.25, 488 | "outputCost": 0.5 489 | }, 490 | "teknium/openhermes-2-mistral-7b": { 491 | "inputCost": 0.18, 492 | "outputCost": 0.18 493 | }, 494 | "open-orca/mistral-7b-openorca": { 495 | "inputCost": 0.18, 496 | "outputCost": 0.18 497 | }, 498 | "jondurbin/airoboros-l2-70b": { 499 | "inputCost": 0.7, 500 | "outputCost": 0.9 501 | }, 502 | "gryphe/mythomax-l2-13b:extended": { 503 | "inputCost": 1.125, 504 | "outputCost": 1.125 505 | }, 506 | "xwin-lm/xwin-lm-70b": { 507 | "inputCost": 3.75, 508 | "outputCost": 3.75 509 | }, 510 | "mistralai/mistral-7b-instruct:free": { 511 | "inputCost": 0, 512 | "outputCost": 0 513 | }, 514 | "openai/gpt-3.5-turbo-instruct": { 515 | "inputCost": 1.5, 516 | "outputCost": 2 517 | }, 518 | "mistralai/mistral-7b-instruct-v0.1": { 519 | "inputCost": 0.2, 520 | "outputCost": 0.2 521 | }, 522 | "pygmalionai/mythalion-13b": { 523 | "inputCost": 1.125, 524 | "outputCost": 1.125 525 | }, 526 | "openai/gpt-3.5-turbo-16k": { 527 | "inputCost": 3, 528 | "outputCost": 4 529 | }, 530 | "openai/gpt-4-32k": { 531 | "inputCost": 60, 532 | "outputCost": 120 533 | }, 534 | "meta-llama/codellama-34b-instruct": { 535 | "inputCost": 0.72, 536 | "outputCost": 0.72 537 | }, 538 | "phind/phind-codellama-34b": { 539 | "inputCost": 0.72, 540 | "outputCost": 0.72 541 | }, 542 | "nousresearch/nous-hermes-llama2-13b": { 543 | "inputCost": 0.18, 544 | "outputCost": 0.18 545 | }, 546 | "mancer/weaver": { 547 | "inputCost": 1.875, 548 | "outputCost": 2.25 549 | }, 550 | "anthropic/claude-2.0": { 551 | "inputCost": 8, 552 | "outputCost": 24 553 | }, 554 | "anthropic/claude-instant-1": { 555 | "inputCost": 0.8, 556 | "outputCost": 2.4 557 | }, 558 | "undi95/remm-slerp-l2-13b": { 559 | "inputCost": 0.27, 560 | "outputCost": 0.27 561 | }, 562 | "google/palm-2-chat-bison": { 563 | "inputCost": 0.25, 564 | "outputCost": 0.5 565 | }, 566 | "google/palm-2-codechat-bison": { 567 | "inputCost": 0.25, 568 | "outputCost": 0.5 569 | }, 570 | "gryphe/mythomax-l2-13b": { 571 | "inputCost": 0.13, 572 | "outputCost": 0.13 573 | }, 574 | "meta-llama/llama-2-13b-chat": { 575 | "inputCost": 0.13, 576 | "outputCost": 0.13 577 | }, 578 | "meta-llama/llama-2-70b-chat": { 579 | "inputCost": 0.64, 580 | "outputCost": 0.8 581 | }, 582 | "openai/gpt-3.5-turbo": { 583 | "inputCost": 0.5, 584 | "outputCost": 1.5 585 | }, 586 | "openai/gpt-4": { 587 | "inputCost": 30, 588 | "outputCost": 60 589 | } 590 | } 591 | -------------------------------------------------------------------------------- /src/api/proxies/ignored-filter.ts: -------------------------------------------------------------------------------- 1 | import { minimatch } from 'minimatch'; 2 | import { MarkdownView } from 'obsidian'; 3 | import Markpilot from 'src/main'; 4 | import { APIClient, ChatMessage } from '..'; 5 | 6 | export class IgnoredFilter implements APIClient { 7 | constructor( 8 | private client: APIClient, 9 | private plugin: Markpilot, 10 | ) {} 11 | 12 | fetchChat(messages: ChatMessage[]) { 13 | // No filter for chats. 14 | return this.client.fetchChat(messages); 15 | } 16 | 17 | async fetchCompletions(prefix: string, suffix: string) { 18 | const { plugin } = this; 19 | const { settings } = plugin; 20 | 21 | const view = plugin.app.workspace.getActiveViewOfType(MarkdownView); 22 | const file = view?.file; 23 | const content = view?.editor.getValue(); 24 | 25 | const isIgnoredFile = settings.completions.ignoredFiles.some( 26 | (filePattern) => 27 | file?.path && 28 | filePattern.trim() !== '' && 29 | minimatch(file?.path, filePattern), 30 | ); 31 | const hasIgnoredTags = settings.completions.ignoredTags.some( 32 | (tagRegex) => 33 | content && 34 | tagRegex.trim() !== '' && 35 | new RegExp(tagRegex, 'gm').test(content), 36 | ); 37 | if (isIgnoredFile || hasIgnoredTags) { 38 | return; 39 | } 40 | 41 | return this.client.fetchCompletions(prefix, suffix); 42 | } 43 | 44 | testConnection() { 45 | return this.client.testConnection(); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/api/proxies/memory-cache.ts: -------------------------------------------------------------------------------- 1 | import { createHash } from 'crypto'; 2 | import Markpilot from 'src/main'; 3 | import { APIClient, ChatMessage } from '..'; 4 | 5 | export class MemoryCacheProxy implements APIClient { 6 | private store: Map = new Map(); 7 | 8 | constructor( 9 | private client: APIClient, 10 | private plugin: Markpilot, 11 | ) {} 12 | 13 | fetchChat(messages: ChatMessage[]) { 14 | // No caching for chats. 15 | return this.client.fetchChat(messages); 16 | } 17 | 18 | async fetchCompletions(prefix: string, suffix: string) { 19 | const { settings } = this.plugin; 20 | 21 | if (!settings.cache.enabled) { 22 | const completions = await this.client.fetchCompletions(prefix, suffix); 23 | return completions; 24 | } 25 | 26 | // Use half the window size 27 | // because some characters may have overflowed due to extra whitespaces. 28 | const windowSize = settings.completions.windowSize / 2; 29 | const truncatedPrefix = prefix.slice( 30 | prefix.length - windowSize / 2, 31 | prefix.length, 32 | ); 33 | const truncatedSuffix = suffix.slice(0, windowSize / 2); 34 | 35 | // Extra whitespaces should not affect the completions. 36 | // We remove them after truncating the prefix and suffix for efficiency. 37 | const compactPrefix = truncatedPrefix.replace(/\s\s+/g, ' '); 38 | const compactSuffix = truncatedSuffix.replace(/\s\s+/g, ' '); 39 | 40 | const hash = createHash('sha256') 41 | .update(`${compactPrefix} ${compactSuffix} `, 'utf8') 42 | .digest('hex'); 43 | 44 | if (await this.store.has(hash)) { 45 | const cache = await this.store.get(hash); 46 | return cache; 47 | } 48 | 49 | const completions = await this.client.fetchCompletions(prefix, suffix); 50 | if (completions === undefined) { 51 | return undefined; 52 | } 53 | await this.store.set(hash, completions); 54 | return completions; 55 | } 56 | 57 | testConnection() { 58 | return this.client.testConnection(); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/api/proxies/usage-monitor.ts: -------------------------------------------------------------------------------- 1 | import { Notice } from 'obsidian'; 2 | import Markpilot from 'src/main'; 3 | import { getThisMonthAsString } from 'src/utils'; 4 | import { APIClient, ChatMessage } from '..'; 5 | 6 | export class UsageMonitorProxy implements APIClient { 7 | constructor( 8 | private client: APIClient, 9 | private plugin: Markpilot, 10 | ) {} 11 | 12 | hasReachedLimit() { 13 | const { settings } = this.plugin; 14 | 15 | const thisMonth = getThisMonthAsString(); 16 | return ( 17 | settings.usage.monthlyCosts[thisMonth] >= settings.usage.monthlyLimit 18 | ); 19 | } 20 | 21 | async *fetchChat(messages: ChatMessage[]) { 22 | if (this.hasReachedLimit()) { 23 | new Notice( 24 | 'Monthly usage limit reached. Please increase the limit to keep on using inline completions.', 25 | ); 26 | return; 27 | } 28 | 29 | yield* this.client.fetchChat(messages); 30 | } 31 | 32 | async fetchCompletions(prefix: string, suffix: string) { 33 | if (this.hasReachedLimit()) { 34 | new Notice( 35 | 'Monthly usage limit reached. Please increase the limit to keep on using chat view.', 36 | ); 37 | return; 38 | } 39 | 40 | return await this.client.fetchCompletions(prefix, suffix); 41 | } 42 | 43 | testConnection() { 44 | return this.client.testConnection(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/chat/App.tsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useLayoutEffect, useRef, useState } from 'react'; 2 | import { ChatHistory, ChatRole } from 'src/api'; 3 | import Markpilot from 'src/main'; 4 | import { ChatInput } from './components/ChatBox'; 5 | import { ChatItem } from './components/ChatItem'; 6 | import { ChatFetcher, ChatView } from './view'; 7 | 8 | const SYSTEM_PROMPT = ` 9 | Welcome, I'm your Markpilot and I'm here to help you get things done faster. You can also start an inline chat session. 10 | 11 | I'm powered by AI, so surprises and mistakes are possible. Make sure to verify any generated code or suggestions, and share feedback so that we can learn and improve. Check out the Markpilot documentation to learn more. 12 | `; 13 | 14 | const defaultHistory: ChatHistory = { 15 | messages: [{ role: 'system', content: SYSTEM_PROMPT }], 16 | response: '', 17 | }; 18 | 19 | export function App({ 20 | view, 21 | fetcher, 22 | cancel, 23 | plugin, 24 | }: { 25 | view: ChatView; 26 | fetcher: ChatFetcher; 27 | cancel: () => void; 28 | plugin: Markpilot; 29 | }) { 30 | const { settings } = plugin; 31 | 32 | const [turn, setTurn] = useState('user'); 33 | const [history, setHistory] = useState( 34 | settings.chat.history.messages.length > 1 35 | ? settings.chat.history 36 | : defaultHistory, 37 | ); 38 | 39 | const inputRef = useRef(null); 40 | const bottomRef = useRef(null); 41 | 42 | // Expose the method to clear history to the view 43 | // so that the plugin command can call it. 44 | useEffect(() => { 45 | view.clear = () => setHistory(defaultHistory); 46 | }, []); 47 | 48 | // Scroll to the bottom when chat history changes. 49 | useLayoutEffect(() => { 50 | bottomRef?.current?.scrollIntoView(); 51 | }, [history]); 52 | 53 | // Save chat history to settings when it changes. 54 | // There may be a better way to store chat history, but this works for now. 55 | useEffect(() => { 56 | settings.chat.history = history; 57 | // TODO: 58 | // Only save settings before unload. 59 | plugin.saveSettings(); 60 | }, [history]); 61 | 62 | useEffect(() => { 63 | if (turn === 'assistant') { 64 | (async () => { 65 | // Ignores the first message which is the system prompt. 66 | const messages = history.messages.slice(1); 67 | for await (const chunk of fetcher(messages)) { 68 | setHistory((history) => ({ 69 | ...history, 70 | response: history.response + chunk, 71 | })); 72 | } 73 | 74 | setHistory((history) => ({ 75 | messages: [ 76 | ...history.messages, 77 | { role: 'assistant', content: history.response }, 78 | ], 79 | response: '', 80 | })); 81 | setTurn('user'); 82 | })(); 83 | } else if (turn === 'user') { 84 | inputRef.current?.focus(); 85 | } 86 | }, [turn]); 87 | 88 | function submit(content: string) { 89 | setHistory({ 90 | ...history, 91 | messages: [...history.messages, { role: 'user', content }], 92 | }); 93 | setTurn('assistant'); 94 | } 95 | 96 | return ( 97 |
98 |
99 | {history.messages.map((message, index) => ( 100 | 101 | ))} 102 | {turn === 'assistant' && ( 103 | 107 | )} 108 |
109 |
110 |
111 | 112 |
113 |
114 | ); 115 | } 116 | -------------------------------------------------------------------------------- /src/chat/components/ChatBox.tsx: -------------------------------------------------------------------------------- 1 | import { CircleStop, SendHorizontal } from 'lucide-react'; 2 | import { forwardRef, useState } from 'react'; 3 | import { ChatRole } from 'src/api'; 4 | 5 | export const ChatInput = forwardRef< 6 | HTMLTextAreaElement, 7 | { 8 | turn: ChatRole; 9 | cancel: () => void; 10 | submit: (text: string) => void; 11 | } 12 | >(function ({ turn, cancel, submit }, ref) { 13 | const [value, setValue] = useState(''); 14 | 15 | const numLines = value.split('\n').length; 16 | const numRows = Math.min(10, numLines); 17 | 18 | return ( 19 |
20 |