├── .editorconfig
├── .eslintignore
├── .eslintrc
├── .github
└── workflows
│ ├── claude.yml
│ └── release.yml
├── .gitignore
├── .npmrc
├── CLAUDE.md
├── EXPECTED_BEHAVIOR.md
├── LICENSE
├── README.md
├── TROUBLESHOOTING.md
├── esbuild.config.mjs
├── main.css
├── main.ts
├── manifest.json
├── package-lock.json
├── package.json
├── removetags.sh
├── src
├── autoTagger.ts
├── backlinkGenerator.ts
├── ollamaEmbeddings.ts
├── openAIEmbeddings.ts
├── rag.ts
├── ragChatModal.ts
└── updateNoticeModal.ts
├── styles.css
├── test_stream.py
├── tsconfig.json
├── updatetags.sh
├── version-bump.mjs
└── versions.json
/.editorconfig:
--------------------------------------------------------------------------------
1 | # top-most EditorConfig file
2 | root = true
3 |
4 | [*]
5 | charset = utf-8
6 | end_of_line = lf
7 | insert_final_newline = true
8 | indent_style = tab
9 | indent_size = 4
10 | tab_width = 4
11 |
--------------------------------------------------------------------------------
/.eslintignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 |
3 | main.js
4 |
--------------------------------------------------------------------------------
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "root": true,
3 | "parser": "@typescript-eslint/parser",
4 | "env": { "node": true },
5 | "plugins": [
6 | "@typescript-eslint"
7 | ],
8 | "extends": [
9 | "eslint:recommended",
10 | "plugin:@typescript-eslint/eslint-recommended",
11 | "plugin:@typescript-eslint/recommended"
12 | ],
13 | "parserOptions": {
14 | "sourceType": "module"
15 | },
16 | "rules": {
17 | "no-unused-vars": "off",
18 | "@typescript-eslint/no-unused-vars": ["error", { "args": "none" }],
19 | "@typescript-eslint/ban-ts-comment": "off",
20 | "no-prototype-builtins": "off",
21 | "@typescript-eslint/no-empty-function": "off"
22 | }
23 | }
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
1 | name: Claude Code
2 |
3 | on:
4 | issue_comment:
5 | types: [created]
6 | pull_request_review_comment:
7 | types: [created]
8 | issues:
9 | types: [opened, assigned]
10 | pull_request_review:
11 | types: [submitted]
12 |
13 | jobs:
14 | claude:
15 | if: |
16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
20 | runs-on: ubuntu-latest
21 | permissions:
22 | contents: read
23 | pull-requests: read
24 | issues: read
25 | id-token: write
26 | steps:
27 | - name: Checkout repository
28 | uses: actions/checkout@v4
29 | with:
30 | fetch-depth: 1
31 |
32 | - name: Run Claude Code
33 | id: claude
34 | uses: anthropics/claude-code-action@beta
35 | with:
36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
37 |
38 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release Obsidian plugin
2 |
3 | on:
4 | push:
5 | tags:
6 | - "*"
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v3
14 |
15 | - name: Use Node.js
16 | uses: actions/setup-node@v3
17 | with:
18 | node-version: "18.x"
19 |
20 | - name: Build plugin
21 | run: |
22 | npm install
23 | npm run build
24 |
25 | - name: Create release
26 | env:
27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
28 | run: |
29 | tag="${GITHUB_REF#refs/tags/}"
30 |
31 | gh release create "$tag" \
32 | --title="$tag" \
33 | --draft \
34 | main.js manifest.json styles.css
35 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # vscode
2 | .vscode
3 |
4 | # Intellij
5 | *.iml
6 | .idea
7 |
8 | # npm
9 | node_modules
10 |
11 | # Don't include the compiled main.js file in the repo.
12 | # They should be uploaded to GitHub releases instead.
13 | main.js
14 |
15 | # Exclude sourcemaps
16 | *.map
17 |
18 | # obsidian
19 | data.json
20 |
21 | # Exclude macOS Finder (System Explorer) View States
22 | .DS_Store
23 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | tag-version-prefix=""
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
1 | # Obsidian Local LLM Helper Plugin
2 |
3 | ## Overview
4 | A privacy-focused Obsidian plugin that integrates local LLM servers (Ollama, LM Studio) to enhance note-taking capabilities without sending data to external services.
5 |
6 | ## Key Features
7 | - **Local LLM Integration**: Works with OpenAI-compatible servers for offline functionality
8 | - **Text Processing Commands**: Summarization, tone adjustment, action item generation, custom prompts
9 | - **RAG Chat Interface**: Interactive chat with indexed notes using embeddings
10 | - **Backlink Generation**: Automatically generate relevant backlinks between notes
11 | - **Auto-Tagging**: Generate contextual tags for notes
12 | - **Web/News Search Integration**: Search capabilities with Brave API support
13 |
14 | ## Recent Refactoring (v2.1.2)
15 | The codebase underwent significant refactoring to improve organization:
16 | - All feature modules moved to `src/` directory
17 | - Better separation of concerns with dedicated files for each feature
18 | - Added support for both Ollama and OpenAI embeddings
19 |
20 | ## Main Components
21 | - **main.ts**: Core plugin class, settings management, command registration
22 | - **src/rag.ts**: RAGManager for document indexing and retrieval
23 | - **src/ragChatModal.ts**: Chat interface with RAG capabilities
24 | - **src/autoTagger.ts**: Automatic tag generation for notes
25 | - **src/backlinkGenerator.ts**: Intelligent backlink suggestions
26 | - **src/ollamaEmbeddings.ts**: Ollama embedding provider
27 | - **src/openAIEmbeddings.ts**: OpenAI-compatible embedding provider
28 | - **src/updateNoticeModal.ts**: Version update notifications
29 |
30 | ## Technical Details
31 | - Supports multiple personas for tailored AI responses
32 | - Configurable streaming output and response formatting
33 | - Maintains conversation history (up to 3 prompts)
34 | - Uses vector embeddings for semantic search in notes
35 | - Temperature and max token controls for response generation
36 |
37 | ## Release Process
38 |
39 | ### Prerequisites
40 | 1. Ensure all changes are tested locally
41 | 2. Check for security vulnerabilities: `npm audit`
42 | 3. Fix any vulnerabilities: `npm audit fix`
43 |
44 | ### Release Steps
45 | 1. **Decide Version Number**: Follow semantic versioning (major.minor.patch)
46 | - Major: Breaking changes
47 | - Minor: New features (e.g., 2.1.2 → 2.2.0)
48 | - Patch: Bug fixes only
49 |
50 | 2. **Update Files** (in this order):
51 | - `manifest.json`: Update version number
52 | - `src/updateNoticeModal.ts`: Update changelog text with new features
53 | - `updatetags.sh`: Update version in git tag command
54 | - `README.md`: Add release notes for new version
55 |
56 | 3. **Build**: Run `npm run build` to generate production files
57 |
58 | 4. **Commit Changes**:
59 | ```bash
60 | git add manifest.json src/updateNoticeModal.ts updatetags.sh README.md package-lock.json
61 | git commit -m "chore: prepare release vX.X.X"
62 | ```
63 |
64 | 5. **Create Tag and Release**:
65 | ```bash
66 | ./updatetags.sh # This creates tag and pushes to trigger GitHub Actions
67 | ```
68 |
69 | ### GitHub Actions
70 | The `.github/workflows/release.yml` automatically:
71 | - Builds the plugin when a tag is pushed
72 | - Creates a draft release with `main.js`, `manifest.json`, and `styles.css`
73 | - You need to manually publish the draft release on GitHub
74 |
75 | ### Important Notes
76 | - The `version-bump.mjs` script is for `npm version` command (not used in current process)
77 | - `versions.json` tracks Obsidian compatibility but isn't updated in releases
78 | - Always ensure `manifest.json` version matches the git tag
79 | - The build process generates `main.js` from TypeScript sources
--------------------------------------------------------------------------------
/EXPECTED_BEHAVIOR.md:
--------------------------------------------------------------------------------
1 | # Expected Behavior After Plugin Reload
2 |
3 | ## 🔄 When You Restart/Reload Obsidian
4 |
5 | ### 1. **Console Messages** (Developer Tools > Console)
6 | You should see these messages in order:
7 | ```
8 | 🔌 LLM Helper: Plugin loading...
9 | 📂 LLM Helper: Loading plugin settings...
10 | 💾 LLM Helper: Raw saved data: [object with your settings]
11 | ✅ LLM Helper: Final settings after merge: {provider: "ollama", server: "http://localhost:11434", ...}
12 | 🧠 LLM Helper: Initializing RAGManager...
13 | 🔄 RAGManager: Starting initialization...
14 | 📁 RAGManager: Plugin data path: .../data.json
15 | 📂 RAGManager: Loading embeddings from persistent storage...
16 | 📊 RAGManager: Raw data check: {dataExists: true, hasEmbeddings: X, hasIndexedFiles: Y, ...}
17 | 🔄 RAGManager: Reconstructing vector store with X documents...
18 | ✅ RAGManager: Successfully loaded X embeddings from disk
19 | 📁 RAGManager: Y files were previously indexed
20 | 🗂️ RAGManager: Files: file1.md, file2.md, file3.md...
21 | 🕒 RAGManager: Last indexed: [date/time]
22 | ✅ RAGManager initialized with persistent storage
23 | 📊 Settings: Updated indexed files count to Y
24 | ```
25 |
26 | ### 2. **User Notifications**
27 | You should see this notification appear:
28 | ```
29 | 📚 Loaded X embeddings from Y files (Z KB)
30 | ```
31 |
32 | ### 3. **Settings Panel**
33 | When you open Settings → Local LLM Helper:
34 | - **Indexed Files Count**: Should show the actual number (not 0)
35 | - The count should change from "Loading..." to the real number
36 | - **Run Diagnostics** button should show detailed info
37 |
38 | ## 🚨 **If Something is Wrong**
39 |
40 | ### Settings Reset to Default
41 | If you see:
42 | ```
43 | 💾 LLM Helper: Raw saved data: null
44 | ```
45 | This means your plugin settings aren't persisting. Possible causes:
46 | 1. Plugin was moved or reinstalled
47 | 2. Obsidian permissions issue
48 | 3. Plugin data directory changed
49 |
50 | ### No Embeddings Loaded
51 | If you see:
52 | ```
53 | 🆕 RAGManager: No saved embeddings found, starting fresh
54 | ```
55 | This is normal if:
56 | 1. First time using the plugin
57 | 2. You haven't indexed any notes yet
58 | 3. Settings changed (provider/model/server), causing a rebuild
59 |
60 | ### Settings Changed - Rebuild Required
61 | If you see:
62 | ```
63 | ⚙️ RAGManager: Settings changed, embeddings will be rebuilt on next index
64 | Current vs Saved: {current: {...}, saved: {...}}
65 | ```
66 | This means you changed your embedding configuration, so embeddings need to be rebuilt.
67 |
68 | ## 🔧 **Quick Diagnostic Commands**
69 |
70 | 1. **Command Palette** (Ctrl/Cmd+P) → "RAG Storage Diagnostics"
71 | 2. **Settings** → Local LLM Helper → "Run Diagnostics" button
72 | 3. **Check Developer Console** for detailed logs
73 |
74 | ## 📁 **Data Storage Location**
75 |
76 | Your data is now stored in separate files:
77 | ```
78 | [Obsidian Vault]/.obsidian/plugins/obsidian-local-llm-helper/data.json (Plugin settings)
79 | [Obsidian Vault]/.obsidian/plugins/obsidian-local-llm-helper/embeddings.json (Embeddings data)
80 | ```
81 |
82 | This separation prevents settings from being overwritten by embedding data.
83 |
84 | ## 🔧 **If You Had the Previous Bug**
85 |
86 | If you were experiencing the re-indexing issue, you may need to:
87 |
88 | 1. **Clear corrupted data**: Use "RAG Storage Diagnostics" → look for any mixed data
89 | 2. **Reset plugin settings**: Go to Settings → Local LLM Helper and re-configure your provider/model
90 | 3. **Re-index once**: Run "Index Notes (BETA)" to create fresh embeddings in the new format
91 |
92 | The bug was that embeddings were overwriting plugin settings in the same file. This is now fixed with separate storage files.
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Mani Mohan
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Local LLM Helper - Obsidian Plugin
2 |
3 | Seamlessly integrate your local LLM with Obsidian. Process large text chunks, transform content with AI, chat with your notes and maintain data privacy — all without leaving your notes.
4 |
5 | ## Core Features
6 |
7 | #### Local LLM Integration:
8 | * **Multi-Provider Support**: Works with Ollama, OpenAI, and LM Studio (v2.2.0)
9 | * **Provider Switching**: Easy switching between providers in settings (v2.2.0)
10 | * **Model Selection**: Users can specify the LLM model to use, tailoring the experience to their needs and hardware
11 | * **Temperature & Max Tokens**: Configurable generation parameters (v2.2.0)
12 | * Remember chat history up to 3 previous prompts and responses
13 | #### Text Processing Commands:
14 | * Summarization: Condenses selected text while maintaining essential information and markdown formatting.
15 | * Professional Tone Adjustment: Rewrites selected text to sound more formal and polished.
16 | * Action Item Generation: Creates a structured list of actionable tasks from text.
17 | * Custom Prompts: Allows users to define and execute their own prompts for specialized tasks.
18 | * Prompt-as-Input: Uses the selected text directly as a prompt for creative text generation.
19 | * Generate backlinks: Generate backlinks to your notes.
20 | * Web Search: Search the web for selected text.
21 | * News Search: Search the news for selected text.
22 |
23 |
24 |
25 | #### LLM Chat Interface:
26 | * Interactive Chat Window: Engages in multi-turn conversations with the LLM for dynamic interaction.
27 | * Conversation History: Keeps track of past messages within the same session.
28 | * Chat with your notes: Chat with your indexed notes.
29 |
30 |
31 |
32 | #### Ribbon Menu and Status Bar:
33 | * Ribbon Menu: Provides quick access to common commands and the chat interface.
34 | * Status Bar: Displays the plugin's current status (ready or generating response).
35 |
36 |
37 |
38 | #### Plugin Settings:
39 | * Server Configuration: Easily set the server address, port, and model name.
40 | * Custom Prompt: Define a personalized prompt for repeated use.
41 | * Streaming Output: Toggle real-time, word-by-word output (experimental).
42 | * Output Mode: Choose to replace or append the generated text to the selection.
43 | * Personas: Select different personas to tailor the AI's response style.
44 | * Response formatting: Prepend/Append LLM responses.
45 |
46 |
47 |
48 |
49 | ## Release notes
50 | v2.2.1
51 | * **Critical Bug Fix**: Fixed re-embedding issue that caused embeddings to regenerate on every app restart
52 | * **Proper Persistent Storage**: Embeddings now persist correctly across Obsidian restarts without data conflicts
53 | * **Storage Diagnostics**: Added new diagnostic command and settings button to check embedding storage status
54 | * **User Notifications**: Shows embedding count and storage information on startup
55 | * **Enhanced Error Handling**: Improved Ollama API integration with better error messages
56 | * **Settings Improvements**: Indexed file count now updates properly in settings panel
57 |
58 | v2.2.0
59 | * **Multi-Provider Support**: Added support for OpenAI and LM Studio alongside Ollama
60 | * **Provider Switching**: Easy provider selection in settings (Ollama/OpenAI/LM Studio)
61 | * **Enhanced Configuration**: Temperature and max tokens are now user-configurable
62 | * **Code Refactoring**: Improved project structure with files moved to src/ directory
63 | * **Bug Fix**: Fixed server URL tooltip inconsistency
64 | * **Security Update**: Updated axios to fix security vulnerability
65 |
66 | v1.1.3
67 | * Chat history is now stored up to 3 previous prompts and responses.
68 | * Response formatting: Prepend/Append LLM responses.
69 | * Server port settings now removed.
70 |
71 | v1.1.1 and v1.1.2
72 | * Major update: LLM chat functionality that works with available personas
73 | * New UI for chat interaction : typing indicators, response formatting, modern look for chat interface
74 | * Streamlined personas related code
75 | * CSS styling added for different parts of the plugin
76 |
77 | v1.0.10
78 | * Ollama support + support for all LLM servers that support OpenAI API /v1/chat/completions endpoint.
79 | * Better prompts for available personas.
80 |
81 | v1.0.9
82 | * Added personas to choose from - available in Settings menu (raise issue for new persona needs)
83 |
84 | v1.0.8
85 | * Removed model name specification - it doesn't matter if you're using LMStudio.
86 | * You can now choose whether to replace or append to the selected text.
87 |
88 | v1.0.7
89 | * Generate text button is updated to more meaningful text
90 | * Command palette can now be accessed to use all the functionalities that were present before.
91 |
92 | v1.0.6
93 | * Custom prompt capability (enter your prompt in plugin Settings)
94 | * Generate action items - new prompt addition
95 | * Better status bar text updates
96 |
97 | v1.0.5
98 | * Streaming capabilities (enable in plugin Settings)
99 |
100 | v1.0.4
101 | * Summarize selected text in your Markdown notes.
102 | * Rephrase to make selected text sound professional
103 | * Generate text using selected text as prompt
104 | * Access various LLM functionalities through a ribbon icon.
105 |
106 | ## Installation
107 |
108 | Search for Local LLM Helper in Community plugins.
109 | Install the plugin and enable it, to use with your vault.
110 |
111 | ## Usage
112 |
113 | 1. Select the text you want to process in your Markdown note (make sure to visit Settings page to make sure everything looks alright).
114 | 2. Click the plugin icon in the ribbon bar (brain icon) and choose the desired action.
115 | 3. Use LLM Chat with side interactions.
116 |
117 | ## Configuration
118 |
119 | The plugin settings allow you to specify the server address, port, and LLM model name used for processing.
120 | The code currently supports all LLM servers that supports OpenAI API /v1/chat/completions endpoint.
121 |
122 | 1. Go to Settings > Obsidian LLM Helper.
123 | 2. Enter the details for your LLM server.
124 | 3. Choose the appropriate LLM model name from your server (if needed).
125 | 4. Select personas if needed.
126 | 5. Change replace/append based on preference.
127 |
128 | **Note:** You'll need to set up and configure your own LLM server for this plugin to function.
129 |
130 | ## Development
131 |
132 | Feel free to clone the repository and modify the code to suit your needs. The code utilizes the following Obsidian API elements:
133 |
134 | * `App`
135 | * `Editor`
136 | * `MarkdownView`
137 | * `Menu`
138 | * `Notice`
139 | * `Plugin`
140 | * `PluginSettingTab`
141 | * `Setting`
142 | * `View`
143 |
144 | ## License
145 |
146 | This plugin is distributed under the MIT license. See the LICENSE file for details.
147 |
--------------------------------------------------------------------------------
/TROUBLESHOOTING.md:
--------------------------------------------------------------------------------
1 | # Troubleshooting Guide
2 |
3 | ## Common Issues and Solutions
4 |
5 | ### 1. **Embedding Model Not Found (400 Bad Request)**
6 |
7 | **Error**: `400 (Bad Request)` - `model "nomic-embed-text" not found, try pulling it first`
8 |
9 | **This is the most common issue!**
10 |
11 | **Solutions**:
12 |
13 | #### For Ollama Users:
14 | 1. **Check what models are installed**:
15 | ```bash
16 | ollama list
17 | ```
18 |
19 | 2. **Install a compatible embedding model**:
20 | ```bash
21 | # Recommended embedding models:
22 | ollama pull mxbai-embed-large # (Default in plugin)
23 | ollama pull nomic-embed-text # Alternative option
24 | ollama pull all-minilm # Smaller, faster option
25 | ```
26 |
27 | 3. **Update plugin settings** (IMPORTANT):
28 | - Go to Obsidian Settings → Community Plugins → Local LLM Helper → Settings
29 | - Set "Embedding Model Name" to match your installed model (e.g., `mxbai-embed-large`)
30 | - The plugin will automatically update when you save settings
31 |
32 | 4. **Verify model is working**:
33 | ```bash
34 | curl http://localhost:11434/api/embeddings \
35 | -d '{"model": "mxbai-embed-large", "prompt": "test"}'
36 | ```
37 |
38 | ### 2. **Connection Errors**
39 |
40 | **Error**: `ECONNREFUSED` or `404 (Not Found)`
41 |
42 | **Symptoms**:
43 | - Cannot connect to server
44 | - "Server not found" errors
45 |
46 | **Solutions**:
47 |
48 | #### For Ollama Users:
49 | 1. **Start Ollama server**:
50 | ```bash
51 | ollama serve
52 | ```
53 |
54 | 2. **Verify server is running**:
55 | ```bash
56 | curl http://localhost:11434/api/tags
57 | ```
58 |
59 | 3. **Check plugin settings**:
60 | - Provider Type = "Ollama"
61 | - Server Address = `http://localhost:11434`
62 |
63 | #### For LM Studio Users:
64 | 1. **Start LM Studio server**:
65 | - Open LM Studio
66 | - Go to "Local Server" tab
67 | - Click "Start Server"
68 |
69 | 2. **Load an embedding model**:
70 | - Download an embedding model (e.g., `nomic-ai/nomic-embed-text-v1.5-GGUF`)
71 | - Load it in the server tab
72 |
73 | 3. **Verify server address**:
74 | - Default: `http://localhost:1234`
75 | - Check plugin settings: Provider Type = "OpenAI"
76 |
77 | 4. **Test LM Studio API**:
78 | ```bash
79 | curl http://localhost:1234/v1/models
80 | ```
81 |
82 | ### 2. **Model Not Found Errors**
83 |
84 | **Error**: `model "nomic-embed-text" not found`
85 |
86 | **Solutions**:
87 |
88 | #### For Ollama:
89 | ```bash
90 | ollama pull nomic-embed-text
91 | ollama list # Verify model is installed
92 | ```
93 |
94 | #### For LM Studio:
95 | - Download compatible embedding models from HuggingFace
96 | - Popular options: `nomic-ai/nomic-embed-text-v1.5-GGUF`
97 |
98 | ### 3. **Plugin Settings Configuration**
99 |
100 | **Recommended Settings**:
101 |
102 | #### Ollama Setup:
103 | - **Provider Type**: Ollama
104 | - **Server Address**: `http://localhost:11434`
105 | - **Embedding Model**: `mxbai-embed-large` (or any installed embedding model)
106 | - **LLM Model**: `llama3` (or any installed chat model)
107 |
108 | #### LM Studio Setup:
109 | - **Provider Type**: OpenAI
110 | - **Server Address**: `http://localhost:1234`
111 | - **Embedding Model**: Name of loaded embedding model
112 | - **LLM Model**: Name of loaded chat model
113 | - **OpenAI API Key**: `lm-studio` (can be anything)
114 |
115 | ### 4. **Performance Issues**
116 |
117 | **Symptoms**: Slow indexing or timeouts
118 |
119 | **Solutions**:
120 | 1. **Reduce chunk size** in code (default: 1000 chars)
121 | 2. **Index smaller batches** of files
122 | 3. **Check system resources** (RAM/CPU usage)
123 | 4. **Use faster embedding models**
124 |
125 | ### 5. **Persistent Storage Notice**
126 |
127 | **Good News**: This plugin now uses **persistent storage** for embeddings!
128 |
129 | **What this means**:
130 | - Embeddings are saved to your Obsidian data directory
131 | - When you restart Obsidian, embeddings are automatically loaded
132 | - No need to re-index your notes after restart
133 | - Embeddings will only be rebuilt if you change provider, model, or server settings
134 |
135 | **This provides much better performance** after the initial indexing!
136 |
137 | ### 6. **Verification Steps**
138 |
139 | **Test Your Setup**:
140 |
141 | 1. **Run Storage Diagnostics**:
142 | - Use Command Palette (Ctrl/Cmd+P) → "RAG Storage Diagnostics"
143 | - OR go to Settings → Local LLM Helper → "Run Diagnostics" button
144 | - Check console for detailed storage information
145 |
146 | 2. **Check console logs** (Developer Tools > Console)
147 | - Look for messages starting with 🔌, 📂, 🧠, ✅, or ❌
148 | - Persistent storage messages will show embedding counts and file lists
149 |
150 | 3. **Verify server is responding**:
151 | ```bash
152 | # For Ollama
153 | curl http://localhost:11434/api/tags
154 |
155 | # For LM Studio
156 | curl http://localhost:1234/v1/models
157 | ```
158 |
159 | 4. **Test embedding API**:
160 | ```bash
161 | # For Ollama
162 | curl http://localhost:11434/api/embeddings \
163 | -d '{"model": "nomic-embed-text", "prompt": "test"}'
164 |
165 | # For LM Studio
166 | curl http://localhost:1234/v1/embeddings \
167 | -H "Content-Type: application/json" \
168 | -d '{"model": "nomic-embed-text", "input": "test"}'
169 | ```
170 |
171 | ### 7. **Getting Help**
172 |
173 | If you're still having issues:
174 |
175 | 1. **Check console logs** for detailed error messages
176 | 2. **Verify server logs** (Ollama/LM Studio console output)
177 | 3. **Test API endpoints** manually with curl
178 | 4. **Report issues** with full error logs and configuration details
179 |
180 | ## Quick Reference
181 |
182 | | Provider | Default Port | Default Model | API Endpoint |
183 | |----------|--------------|---------------|--------------|
184 | | Ollama | 11434 | nomic-embed-text | `/api/embeddings` |
185 | | LM Studio | 1234 | (varies) | `/v1/embeddings` |
186 |
187 | ## Common Commands
188 |
189 | ```bash
190 | # Ollama
191 | ollama serve
192 | ollama pull nomic-embed-text
193 | ollama list
194 |
195 | # Test connections
196 | curl http://localhost:11434/api/tags # Ollama
197 | curl http://localhost:1234/v1/models # LM Studio
198 | ```
--------------------------------------------------------------------------------
/esbuild.config.mjs:
--------------------------------------------------------------------------------
1 | import esbuild from "esbuild";
2 | import process from "process";
3 | import builtins from "builtin-modules";
4 |
5 | const banner =
6 | `/*
7 | THIS IS A GENERATED/BUNDLED FILE BY ESBUILD
8 | if you want to view the source, please visit the github repository of this plugin
9 | */
10 | `;
11 |
12 | const prod = (process.argv[2] === "production");
13 |
14 | const context = await esbuild.context({
15 | banner: {
16 | js: banner,
17 | },
18 | entryPoints: ["main.ts"],
19 | bundle: true,
20 | external: [
21 | "obsidian",
22 | "electron",
23 | "@codemirror/autocomplete",
24 | "@codemirror/collab",
25 | "@codemirror/commands",
26 | "@codemirror/language",
27 | "@codemirror/lint",
28 | "@codemirror/search",
29 | "@codemirror/state",
30 | "@codemirror/view",
31 | "@lezer/common",
32 | "@lezer/highlight",
33 | "@lezer/lr",
34 | ...builtins],
35 | format: "cjs",
36 | target: "es2018",
37 | logLevel: "info",
38 | sourcemap: prod ? false : "inline",
39 | treeShaking: true,
40 | outfile: "main.js",
41 | });
42 |
43 | if (prod) {
44 | await context.rebuild();
45 | process.exit(0);
46 | } else {
47 | await context.watch();
48 | }
--------------------------------------------------------------------------------
/main.css:
--------------------------------------------------------------------------------
1 | /* styles.css */
2 | .chatHistoryElStyle {
3 | overflow-y: auto;
4 | }
5 | .personasInfoStyle {
6 | font-size: x-small;
7 | font-style: italic;
8 | opacity: 0.5;
9 | margin-bottom: 10px;
10 | }
11 | .llmChatMessageStyleUser {
12 | border-radius: 15px;
13 | background-color: #000066;
14 | padding: 10px;
15 | margin-bottom: 20px;
16 | margin-top: 20px;
17 | margin-left: 20px;
18 | user-select: text;
19 | box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
20 | font-style: italic;
21 | }
22 | .llmChatMessageStyleAI {
23 | border-radius: 15px;
24 | background-color: #003300;
25 | padding: 10px;
26 | margin-bottom: 20px;
27 | margin-top: 20px;
28 | margin-right: 20px;
29 | user-select: text;
30 | box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
31 | }
32 | .chatInputStyle {
33 | width: 100%;
34 | }
35 | .thinking-indicator {
36 | display: inline-block;
37 | margin-bottom: 10px;
38 | }
39 | .thinking-indicator span {
40 | margin-left: 2px;
41 | }
42 | .dots {
43 | display: inline-block;
44 | position: relative;
45 | }
46 | .dot {
47 | display: inline-block;
48 | width: 3px;
49 | height: 3px;
50 | border-radius: 50%;
51 | background-color: #ddd;
52 | margin-right: 1px;
53 | animation: blink 1.5s infinite;
54 | }
55 | .dot:nth-child(2) {
56 | animation-delay: 0.2s;
57 | }
58 | .dot:nth-child(3) {
59 | animation-delay: 0.4s;
60 | }
61 | @keyframes blink {
62 | 0%, 100% {
63 | opacity: 1;
64 | }
65 | 50% {
66 | opacity: 0;
67 | }
68 | }
69 | /*# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsic3R5bGVzLmNzcyJdLAogICJzb3VyY2VzQ29udGVudCI6IFsiLmNoYXRIaXN0b3J5RWxTdHlsZXtcbiAgICBvdmVyZmxvdy15OiBhdXRvO1xufVxuLnBlcnNvbmFzSW5mb1N0eWxlIHtcbiAgICBmb250LXNpemU6IHgtc21hbGw7XG4gICAgZm9udC1zdHlsZTogaXRhbGljO1xuICAgIG9wYWNpdHk6IDAuNTtcbiAgICBtYXJnaW4tYm90dG9tOiAxMHB4O1xufVxuLmxsbUNoYXRNZXNzYWdlU3R5bGVVc2VyIHsgXG4gICAgYm9yZGVyLXJhZGl1czogMTVweDtcblx0YmFja2dyb3VuZC1jb2xvcjogIzAwMDA2Njtcblx0cGFkZGluZzogMTBweDtcblx0bWFyZ2luLWJvdHRvbTogMjBweDtcblx0bWFyZ2luLXRvcDogMjBweDtcblx0bWFyZ2luLWxlZnQ6IDIwcHg7XG5cdHVzZXItc2VsZWN0OiB0ZXh0O1xuXHRib3gtc2hhZG93OiAwIDRweCA4cHggMCByZ2JhKDAsIDAsIDAsIDAuMiksIDAgNnB4IDIwcHggMCByZ2JhKDAsIDAsIDAsIDAuMTkpO1xuXHRmb250LXN0eWxlOiBpdGFsaWM7XG59XG4ubGxtQ2hhdE1lc3NhZ2VTdHlsZUFJIHtcbiAgICBib3JkZXItcmFkaXVzOiAxNXB4O1xuXHRiYWNrZ3JvdW5kLWNvbG9yOiAjMDAzMzAwO1xuXHRwYWRkaW5nOiAxMHB4O1xuXHRtYXJnaW4tYm90dG9tOiAyMHB4O1xuXHRtYXJnaW4tdG9wOiAyMHB4O1xuXHRtYXJnaW4tcmlnaHQ6IDIwcHg7XG5cdHVzZXItc2VsZWN0OiB0ZXh0O1xuXHRib3gtc2hhZG93OiAwIDRweCA4cHggMCByZ2JhKDAsIDAsIDAsIDAuMiksIDAgNnB4IDIwcHggMCByZ2JhKDAsIDAsIDAsIDAuMTkpO1xufVxuLmNoYXRJbnB1dFN0eWxlIHtcbiAgICB3aWR0aDogMTAwJTtcbn1cbi50aGlua2luZy1pbmRpY2F0b3Ige1xuICAgIGRpc3BsYXk6IGlubGluZS1ibG9jazsgLyogQWxsb3cgaW5saW5lIHBsYWNlbWVudCAqL1xuICAgIG1hcmdpbi1ib3R0b206IDEwcHg7XG59XG5cbi50aGlua2luZy1pbmRpY2F0b3Igc3BhbiB7XG4gICAgbWFyZ2luLWxlZnQ6IDJweDsgLyogU3BhY2UgYmV0d2VlbiB0ZXh0IGFuZCBkb3RzICovXG59XG5cbi5kb3RzIHtcbiAgICBkaXNwbGF5OiBpbmxpbmUtYmxvY2s7XG4gICAgcG9zaXRpb246IHJlbGF0aXZlOyAvKiBFbmFibGUgYWJzb2x1dGUgcG9zaXRpb25pbmcgZm9yIGRvdHMgKi9cbn1cblxuLmRvdCB7XG4gICAgZGlzcGxheTogaW5saW5lLWJsb2NrO1xuICAgIHdpZHRoOiAzcHg7XG4gICAgaGVpZ2h0OiAzcHg7XG4gICAgYm9yZGVyLXJhZGl1czogNTAlO1xuICAgIGJhY2tncm91bmQtY29sb3I6ICNkZGQ7IC8qIEFkanVzdCBkb3QgY29sb3IgKi9cbiAgICBtYXJnaW4tcmlnaHQ6IDFweDsgLyogU3BhY2UgYmV0d2VlbiBkb3RzICovXG4gICAgYW5pbWF0aW9uOiBibGluayAxLjVzIGluZmluaXRlO1xufVxuXG4uZG90Om50aC1jaGlsZCgyKSB7XG4gICAgYW5pbWF0aW9uLWRlbGF5OiAwLjJzOyAvKiBEZWxheSBzZWNvbmQgZG90IGFuaW1hdGlvbiAqL1xufVxuXG4uZG90Om50aC1jaGlsZCgzKSB7XG4gICAgYW5pbWF0aW9uLWRlbGF5OiAwLjRzOyAvKiBEZWxheSB0aGlyZCBkb3QgYW5pbWF0aW9uICovXG59XG5cbkBrZXlmcmFtZXMgYmxpbmsge1xuICAgIDAlLCAxMDAlIHtcbiAgICAgICAgb3BhY2l0eTogMTtcbiAgICB9XG4gICAgNTAlIHtcbiAgICAgICAgb3BhY2l0eTogMDtcbiAgICB9XG59XG4gICJdLAogICJtYXBwaW5ncyI6ICI7QUFBQTtBQUNJO0FBQUE7QUFFSjtBQUNJO0FBQ0E7QUFDQTtBQUNBO0FBQUE7QUFFSjtBQUNJO0FBQ0g7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUFBO0FBRUQ7QUFDSTtBQUNIO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQUE7QUFFRDtBQUNJO0FBQUE7QUFFSjtBQUNJO0FBQ0E7QUFBQTtBQUdKO0FBQ0k7QUFBQTtBQUdKO0FBQ0k7QUFDQTtBQUFBO0FBR0o7QUFDSTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUFBO0FBR0o7QUFDSTtBQUFBO0FBR0o7QUFDSTtBQUFBO0FBR0o7QUFBQTtBQUVRO0FBQUE7QUFBQTtBQUdBO0FBQUE7QUFBQTsiLAogICJuYW1lcyI6IFtdCn0K */
70 |
--------------------------------------------------------------------------------
/main.ts:
--------------------------------------------------------------------------------
1 | import {
2 | App,
3 | Editor,
4 | MarkdownView,
5 | Modal,
6 | Menu,
7 | Notice,
8 | Plugin,
9 | PluginSettingTab,
10 | Setting,
11 | View,
12 | requestUrl,
13 | setIcon,
14 | TextComponent,
15 | ButtonComponent,
16 | } from "obsidian";
17 | import { generateAndAppendTags } from "./src/autoTagger";
18 | import { UpdateNoticeModal } from "./src/updateNoticeModal";
19 | import { RAGManager } from './src/rag';
20 | import { BacklinkGenerator } from './src/backlinkGenerator';
21 | import { RAGChatModal } from './src/ragChatModal';
22 |
23 | // Remember to rename these classes and interfaces!
24 |
25 | export interface OLocalLLMSettings {
26 | serverAddress: string;
27 | llmModel: string;
28 | stream: boolean;
29 | customPrompt: string;
30 | maxTokens: number;
31 | maxConvHistory: number;
32 | outputMode: string;
33 | personas: string;
34 | providerType: string;
35 | responseFormatting: boolean;
36 | responseFormatPrepend: string;
37 | responseFormatAppend: string;
38 | temperature: number;
39 | lastVersion: string;
40 | embeddingModelName: string;
41 | braveSearchApiKey: string;
42 | openAIApiKey?: string;
43 | }
44 |
45 | interface ConversationEntry {
46 | prompt: string;
47 | response: string;
48 | }
49 |
50 | const DEFAULT_SETTINGS: OLocalLLMSettings = {
51 | serverAddress: "http://localhost:11434",
52 | llmModel: "llama3",
53 | maxTokens: 1024,
54 | temperature: 0.7,
55 | providerType: "ollama",
56 | stream: false,
57 | customPrompt: "create a todo list from the following text:",
58 | outputMode: "replace",
59 | personas: "default",
60 | maxConvHistory: 0,
61 | responseFormatting: false,
62 | responseFormatPrepend: "``` LLM Helper - generated response \n\n",
63 | responseFormatAppend: "\n\n```",
64 | lastVersion: "0.0.0",
65 | embeddingModelName: "mxbai-embed-large",
66 | braveSearchApiKey: "",
67 | openAIApiKey: "lm-studio"
68 | };
69 |
70 | const personasDict: { [key: string]: string } = {
71 | "default": "Default",
72 | "physics": "Physics expert",
73 | "fitness": "Fitness expert",
74 | "developer": "Software Developer",
75 | "stoic": "Stoic Philosopher",
76 | "productmanager": "Product Manager",
77 | "techwriter": "Technical Writer",
78 | "creativewriter": "Creative Writer",
79 | "tpm": "Technical Program Manager",
80 | "engineeringmanager": "Engineering Manager",
81 | "executive": "Executive",
82 | "officeassistant": "Office Assistant"
83 | };
84 |
85 | export default class OLocalLLMPlugin extends Plugin {
86 | settings: OLocalLLMSettings;
87 | modal: any;
88 | conversationHistory: ConversationEntry[] = [];
89 | isKillSwitchActive: boolean = false;
90 | public ragManager: RAGManager;
91 | private backlinkGenerator: BacklinkGenerator;
92 |
93 | async checkForUpdates() {
94 | const currentVersion = this.manifest.version;
95 | const lastVersion = this.settings.lastVersion || "0.0.0";
96 | //const lastVersion = "0.0.0";
97 |
98 | if (currentVersion !== lastVersion) {
99 | new UpdateNoticeModal(this.app, currentVersion).open();
100 | this.settings.lastVersion = currentVersion;
101 | await this.saveSettings();
102 | }
103 | }
104 |
105 | async onload() {
106 | console.log('🔌 LLM Helper: Plugin loading...');
107 | await this.loadSettings();
108 | console.log('⚙️ LLM Helper: Settings loaded:', {
109 | provider: this.settings.providerType,
110 | server: this.settings.serverAddress,
111 | embeddingModel: this.settings.embeddingModelName,
112 | llmModel: this.settings.llmModel
113 | });
114 | this.checkForUpdates();
115 | // Validate server configuration
116 | this.validateServerConfiguration();
117 |
118 | console.log('🧠 LLM Helper: Initializing RAGManager...');
119 | // Initialize RAGManager
120 | this.ragManager = new RAGManager(this.app.vault, this.settings, this);
121 |
122 | // Initialize RAGManager and show user notification about loaded data
123 | await this.ragManager.initialize();
124 |
125 | // Show user-friendly notification about loaded embeddings after a short delay
126 | // This ensures all UI elements are ready
127 | setTimeout(() => {
128 | this.showStorageNotification();
129 | }, 500);
130 |
131 | // Initialize BacklinkGenerator
132 | this.backlinkGenerator = new BacklinkGenerator(this.ragManager, this.app.vault);
133 |
134 | // Add command for RAG Backlinks
135 | this.addCommand({
136 | id: 'generate-rag-backlinks',
137 | name: 'Generate RAG Backlinks (BETA)',
138 | callback: this.handleGenerateBacklinks.bind(this),
139 | });
140 |
141 | // Add diagnostic command
142 | this.addCommand({
143 | id: 'rag-diagnostics',
144 | name: 'RAG Storage Diagnostics',
145 | callback: this.handleDiagnostics.bind(this),
146 | });
147 |
148 | // Remove the automatic indexing
149 | // this.indexNotes();
150 | this.addCommand({
151 | id: 'rag-chat',
152 | name: 'Chat with your notes (RAG) - BETA',
153 | callback: () => {
154 | new Notice("This is a beta feature. Please use with caution. Please make sure you have indexed your notes before using this feature.");
155 | const ragChatModal = new RAGChatModal(this.app, this.settings, this.ragManager);
156 | ragChatModal.open();
157 | },
158 | });
159 |
160 | this.addCommand({
161 | id: "summarize-selected-text",
162 | name: "Summarize selected text",
163 | editorCallback: (editor: Editor, view: MarkdownView) => {
164 | this.isKillSwitchActive = false; // Reset kill switch state
165 | let selectedText = this.getSelectedText();
166 | if (selectedText.length > 0) {
167 | processText(
168 | selectedText,
169 | "Summarize the following text (maintain verbs and pronoun forms, also retain the markdowns):",
170 | this
171 | );
172 | }
173 | },
174 | });
175 |
176 | this.addCommand({
177 | id: "makeitprof-selected-text",
178 | name: "Make selected text sound professional",
179 | editorCallback: (editor: Editor, view: MarkdownView) => {
180 | this.isKillSwitchActive = false; // Reset kill switch state
181 | let selectedText = this.getSelectedText();
182 | if (selectedText.length > 0) {
183 | processText(
184 | selectedText,
185 | "Make the following sound professional (maintain verbs and pronoun forms, also retain the markdowns):",
186 | this
187 | );
188 | }
189 | },
190 | });
191 |
192 | this.addCommand({
193 | id: "actionitems-selected-text",
194 | name: "Generate action items from selected text",
195 | editorCallback: (editor: Editor, view: MarkdownView) => {
196 | this.isKillSwitchActive = false; // Reset kill switch state
197 | let selectedText = this.getSelectedText();
198 | if (selectedText.length > 0) {
199 | processText(
200 | selectedText,
201 | "Generate action items based on the following text (use or numbers based on context):",
202 | this
203 | );
204 | }
205 | },
206 | });
207 |
208 | this.addCommand({
209 | id: "custom-selected-text",
210 | name: "Run Custom prompt (from settings) on selected text",
211 | editorCallback: (editor: Editor, view: MarkdownView) => {
212 | this.isKillSwitchActive = false; // Reset kill switch state
213 | new Notice("Custom prompt: " + this.settings.customPrompt);
214 | let selectedText = this.getSelectedText();
215 | if (selectedText.length > 0) {
216 | processText(
217 | selectedText,
218 | this.settings.customPrompt,
219 | this
220 | );
221 | }
222 | },
223 | });
224 |
225 | this.addCommand({
226 | id: "gentext-selected-text",
227 | name: "Use SELECTED text as your prompt",
228 | editorCallback: (editor: Editor, view: MarkdownView) => {
229 | this.isKillSwitchActive = false; // Reset kill switch state
230 | let selectedText = this.getSelectedText();
231 | if (selectedText.length > 0) {
232 | processText(
233 | selectedText,
234 | "Generate response based on the following text. This is your prompt:",
235 | this
236 | );
237 | }
238 | },
239 | });
240 |
241 | this.addCommand({
242 | id: "llm-chat",
243 | name: "Chat with Local LLM Helper",
244 | callback: () => {
245 | const chatModal = new LLMChatModal(this.app, this.settings);
246 | chatModal.open();
247 | },
248 | });
249 |
250 | this.addCommand({
251 | id: "llm-hashtag",
252 | name: "Generate hashtags for selected text",
253 | callback: () => {
254 | generateAndAppendTags(this.app, this.settings);
255 | },
256 | });
257 |
258 | this.addCommand({
259 | id: "web-search-selected-text",
260 | name: "Search web for selected text",
261 | editorCallback: (editor: Editor, view: MarkdownView) => {
262 | this.isKillSwitchActive = false;
263 | let selectedText = this.getSelectedText();
264 | if (selectedText.length > 0) {
265 | processWebSearch(selectedText, this);
266 | }
267 | },
268 | });
269 |
270 | this.addCommand({
271 | id: "web-news-search",
272 | name: "Search news (Web) for selected text",
273 | editorCallback: (editor: Editor, view: MarkdownView) => {
274 | let selectedText = this.getSelectedText();
275 | if (selectedText.length > 0) {
276 | processNewsSearch(selectedText, this);
277 | }
278 | },
279 | });
280 |
281 | this.addRibbonIcon("brain-cog", "LLM Context", (event) => {
282 | const menu = new Menu();
283 |
284 | menu.addItem((item) =>
285 | item
286 | .setTitle("Chat with LLM Helper")
287 | .setIcon("messages-square")
288 | .onClick(() => {
289 | new LLMChatModal(this.app, this.settings).open();
290 | })
291 | );
292 |
293 | menu.addItem((item) =>
294 | item
295 | .setTitle("Summarize")
296 | .setIcon("sword")
297 | .onClick(async () => {
298 | this.isKillSwitchActive = false; // Reset kill switch state
299 | let selectedText = this.getSelectedText();
300 | if (selectedText.length > 0) {
301 | processText(
302 | selectedText,
303 | "Summarize the following text (maintain verbs and pronoun forms, also retain the markdowns):",
304 | this
305 | );
306 | }
307 | })
308 | );
309 |
310 | menu.addItem((item) =>
311 | item
312 | .setTitle("Make it professional")
313 | .setIcon("school")
314 | .onClick(async () => {
315 | this.isKillSwitchActive = false; // Reset kill switch state
316 | let selectedText = this.getSelectedText();
317 | if (selectedText.length > 0) {
318 | processText(
319 | selectedText,
320 | "Make the following sound professional (maintain verbs and pronoun forms, also retain the markdowns):",
321 | this
322 | );
323 | }
324 | })
325 | );
326 |
327 | menu.addItem((item) =>
328 | item
329 | .setTitle("Use as prompt")
330 | .setIcon("lightbulb")
331 | .onClick(async () => {
332 | this.isKillSwitchActive = false; // Reset kill switch state
333 | let selectedText = this.getSelectedText();
334 | if (selectedText.length > 0) {
335 | processText(
336 | selectedText,
337 | "Generate response based on the following text. This is your prompt:",
338 | this
339 | );
340 | }
341 | })
342 | );
343 |
344 | menu.addItem((item) =>
345 | item
346 | .setTitle("Generate action items")
347 | .setIcon("list-todo")
348 | .onClick(async () => {
349 | this.isKillSwitchActive = false; // Reset kill switch state
350 | let selectedText = this.getSelectedText();
351 | if (selectedText.length > 0) {
352 | processText(
353 | selectedText,
354 | "Generate action items based on the following text (use or numbers based on context):",
355 | this
356 | );
357 | }
358 | })
359 | );
360 |
361 | menu.addItem((item) =>
362 | item
363 | .setTitle("Custom prompt")
364 | .setIcon("pencil")
365 | .onClick(async () => {
366 | this.isKillSwitchActive = false; // Reset kill switch state
367 | new Notice(
368 | "Custom prompt: " + this.settings.customPrompt
369 | );
370 | let selectedText = this.getSelectedText();
371 | if (selectedText.length > 0) {
372 | processText(
373 | selectedText,
374 | this.settings.customPrompt,
375 | this
376 | );
377 | }
378 | })
379 | );
380 |
381 | menu.addItem((item) =>
382 | item
383 | .setTitle("Generate tags")
384 | .setIcon("hash")
385 | .onClick(async () => {
386 | new Notice(
387 | "Generating hashtags"
388 | );
389 | let selectedText = this.getSelectedText();
390 | if (selectedText.length > 0) {
391 | generateAndAppendTags(this.app, this.settings);
392 | }
393 | })
394 | );
395 |
396 | menu.addItem((item) =>
397 | item
398 | .setTitle("Search (Web)")
399 | .setIcon("globe")
400 | .onClick(async () => {
401 | let selectedText = this.getSelectedText();
402 | if (selectedText.length > 0) {
403 | processWebSearch(selectedText, this);
404 | }
405 | })
406 | );
407 |
408 | menu.addItem((item) =>
409 | item
410 | .setTitle("News Search (Web)")
411 | .setIcon("newspaper")
412 | .onClick(async () => {
413 | let selectedText = this.getSelectedText();
414 | if (selectedText.length > 0) {
415 | processNewsSearch(selectedText, this);
416 | }
417 | })
418 | );
419 |
420 | menu.addItem((item) =>
421 | item
422 | .setTitle("Kill Switch")
423 | .setIcon("x-circle")
424 | .onClick(() => {
425 | this.isKillSwitchActive = true;
426 | new Notice("LLM Helper process stopped");
427 | })
428 | );
429 |
430 | menu.showAtMouseEvent(event as MouseEvent);
431 | });
432 |
433 | const statusBarItemEl = this.addStatusBarItem();
434 | statusBarItemEl.setText("LLM Helper: Ready");
435 |
436 | this.addSettingTab(new OLLMSettingTab(this.app, this));
437 | }
438 |
439 | private validateServerConfiguration(): boolean {
440 | const provider = this.settings.providerType;
441 | const serverAddress = this.settings.serverAddress;
442 | const embeddingModel = this.settings.embeddingModelName;
443 |
444 | console.log(`Validating configuration - Provider: ${provider}, Server: ${serverAddress}, Embedding Model: ${embeddingModel}`);
445 |
446 | if (provider === 'ollama') {
447 | // Ollama typically runs on port 11434
448 | if (!serverAddress.includes('11434') && !serverAddress.includes('ollama')) {
449 | console.warn('Ollama provider detected but server address might be incorrect. Ollama typically runs on port 11434.');
450 | return false;
451 | }
452 |
453 | // Check for common embedding models
454 | const commonOllamaModels = ['mxbai-embed-large', 'nomic-embed-text', 'all-minilm'];
455 | if (!commonOllamaModels.some(model => embeddingModel.includes(model))) {
456 | console.warn(`Embedding model "${embeddingModel}" might not be compatible with Ollama. Common models: ${commonOllamaModels.join(', ')}`);
457 | }
458 | } else if (provider === 'openai' || provider === 'lm-studio') {
459 | // LM Studio typically runs on port 1234
460 | if (!serverAddress.includes('1234') && !serverAddress.includes('openai')) {
461 | console.warn('OpenAI/LM Studio provider detected but server address might be incorrect. LM Studio typically runs on port 1234.');
462 | return false;
463 | }
464 | }
465 |
466 | return true;
467 | }
468 |
469 | private getSelectedText() {
470 | let view = this.app.workspace.getActiveViewOfType(MarkdownView);
471 | if (!view) {
472 | new Notice("No active view");
473 | return "";
474 | } else {
475 | let view_mode = view.getMode();
476 | switch (view_mode) {
477 | case "preview":
478 | new Notice("Does not work in preview preview");
479 | return "";
480 | case "source":
481 | if ("editor" in view) {
482 | return view.editor.getSelection();
483 | }
484 | break;
485 | default:
486 | new Notice("Unknown view mode");
487 | return "";
488 | }
489 | }
490 | return "";
491 | }
492 |
493 | onunload() { }
494 |
495 | async loadSettings() {
496 | console.log('📂 LLM Helper: Loading plugin settings...');
497 | const savedData = await this.loadData();
498 | console.log('💾 LLM Helper: Raw saved data:', savedData);
499 |
500 | this.settings = Object.assign(
501 | {},
502 | DEFAULT_SETTINGS,
503 | savedData
504 | );
505 |
506 | console.log('✅ LLM Helper: Final settings after merge:', {
507 | provider: this.settings.providerType,
508 | server: this.settings.serverAddress,
509 | embeddingModel: this.settings.embeddingModelName,
510 | llmModel: this.settings.llmModel,
511 | hasApiKey: !!this.settings.openAIApiKey,
512 | hasBraveKey: !!this.settings.braveSearchApiKey
513 | });
514 | }
515 |
516 | async saveSettings() {
517 | await this.saveData(this.settings);
518 |
519 | // Update RAG manager with new settings
520 | if (this.ragManager) {
521 | this.ragManager.updateSettings(this.settings);
522 | }
523 | }
524 |
525 |
526 | async indexNotes() {
527 | new Notice('Indexing notes for RAG...');
528 | try {
529 | await this.ragManager.indexNotes(progress => {
530 | // You can use the progress value here if needed
531 | console.log(`Indexing progress: ${progress * 100}%`);
532 | });
533 | new Notice('Notes indexed successfully!');
534 | } catch (error) {
535 | console.error('Error indexing notes:', error);
536 | new Notice('Failed to index notes. Check console for details.');
537 | }
538 | }
539 |
540 | async handleGenerateBacklinks() {
541 | const activeView = this.app.workspace.getActiveViewOfType(MarkdownView);
542 | if (!activeView) {
543 | new Notice('No active Markdown view');
544 | return;
545 | }
546 |
547 | const editor = activeView.editor;
548 | const selectedText = editor.getSelection();
549 |
550 | if (!selectedText) {
551 | new Notice('No text selected');
552 | return;
553 | }
554 |
555 | new Notice('Generating backlinks...');
556 | const backlinks = await this.backlinkGenerator.generateBacklinks(selectedText);
557 |
558 | if (backlinks.length > 0) {
559 | editor.replaceSelection(`${selectedText}\n\nRelated:\n${backlinks.join('\n')}`);
560 | new Notice(`Generated ${backlinks.length} backlinks`);
561 | } else {
562 | new Notice('No relevant backlinks found');
563 | }
564 | }
565 |
566 | async handleDiagnostics() {
567 | console.log('🔍 === RAG STORAGE DIAGNOSTICS ===');
568 |
569 | // Plugin settings diagnostics
570 | console.log('📋 Plugin Settings:');
571 | console.log(' Provider:', this.settings.providerType);
572 | console.log(' Server:', this.settings.serverAddress);
573 | console.log(' Embedding Model:', this.settings.embeddingModelName);
574 | console.log(' LLM Model:', this.settings.llmModel);
575 |
576 | // RAG storage diagnostics
577 | try {
578 | const stats = await this.ragManager.getStorageStats();
579 | console.log('💾 RAG Storage Stats:');
580 | console.log(' Total Embeddings:', stats.totalEmbeddings);
581 | console.log(' Indexed Files:', stats.indexedFiles);
582 | console.log(' Last Indexed:', stats.lastIndexed);
583 | console.log(' Storage Used:', stats.storageUsed);
584 | console.log(' Current Indexed Count:', this.ragManager.getIndexedFilesCount());
585 |
586 | // Show user-friendly notice
587 | new Notice(`RAG Diagnostics: ${stats.totalEmbeddings} embeddings, ${stats.indexedFiles} files. Check console for details.`);
588 | } catch (error) {
589 | console.error('❌ Error getting storage stats:', error);
590 | new Notice('Error getting storage stats. Check console for details.');
591 | }
592 |
593 | // File system diagnostics
594 | const totalMdFiles = this.app.vault.getMarkdownFiles().length;
595 | console.log('📁 Vault Stats:');
596 | console.log(' Total Markdown Files:', totalMdFiles);
597 | console.log(' Plugin Settings Path:', `${this.manifest.dir}/data.json`);
598 | console.log(' Embeddings Storage Path:', `${this.manifest.dir}/embeddings.json`);
599 |
600 | console.log('🔍 === END DIAGNOSTICS ===');
601 | }
602 |
603 | async showStorageNotification() {
604 | try {
605 | const stats = await this.ragManager.getStorageStats();
606 | if (stats.totalEmbeddings > 0) {
607 | new Notice(`📚 Loaded ${stats.totalEmbeddings} embeddings from ${stats.indexedFiles} files (${stats.storageUsed})`);
608 | } else {
609 | new Notice('📝 No previous embeddings found - ready to index notes');
610 | }
611 | } catch (error) {
612 | console.error('Error showing storage notification:', error);
613 | }
614 | }
615 | }
616 |
617 | class OLLMSettingTab extends PluginSettingTab {
618 | plugin: OLocalLLMPlugin;
619 | private indexingProgressBar: HTMLProgressElement | null = null;
620 | private indexedFilesCountSetting: Setting | null = null;
621 |
622 | constructor(app: App, plugin: OLocalLLMPlugin) {
623 | super(app, plugin);
624 | this.plugin = plugin;
625 | }
626 |
627 | display(): void {
628 | const { containerEl } = this;
629 |
630 | containerEl.empty();
631 |
632 | // In the OLLMSettingTab class's display() method, add these new settings:
633 | new Setting(containerEl)
634 | .setName("LLM Provider")
635 | .setDesc("Choose between Ollama and OpenAI-compatible providers")
636 | .addDropdown(dropdown =>
637 | dropdown
638 | .addOption('ollama', 'Ollama')
639 | .addOption('openai', 'OpenAI/LM Studio')
640 | .setValue(this.plugin.settings.providerType)
641 | .onChange(async (value: 'ollama' | 'openai') => {
642 | this.plugin.settings.providerType = value;
643 | await this.plugin.saveSettings();
644 | this.display(); // Refresh settings UI
645 | })
646 | );
647 |
648 | new Setting(containerEl)
649 | .setName("Server URL")
650 | .setDesc("Full server URL (including protocol and port if needed). E.g., http://localhost:1234 or https://api.example.com")
651 | .addText((text) =>
652 | text
653 | .setPlaceholder("Enter full server URL")
654 | .setValue(this.plugin.settings.serverAddress)
655 | .onChange(async (value) => {
656 | this.plugin.settings.serverAddress = value;
657 | await this.plugin.saveSettings();
658 | })
659 | );
660 |
661 | new Setting(containerEl)
662 | .setName("LLM model")
663 | .setDesc("Use this for Ollama and other servers that require this. LMStudio seems to ignore model name.")
664 | .addText((text) =>
665 | text
666 | .setPlaceholder("Model name")
667 | .setValue(this.plugin.settings.llmModel)
668 | .onChange(async (value) => {
669 | this.plugin.settings.llmModel = value;
670 | await this.plugin.saveSettings();
671 | })
672 | );
673 |
674 | new Setting(containerEl)
675 | .setName("Custom prompt")
676 | .setDesc("create your own prompt - for your specific niche needs")
677 | .addText((text) =>
678 | text
679 | .setPlaceholder(
680 | "create action items from the following text:"
681 | )
682 | .setValue(this.plugin.settings.customPrompt)
683 | .onChange(async (value) => {
684 | this.plugin.settings.customPrompt = value;
685 | await this.plugin.saveSettings();
686 | })
687 | );
688 |
689 | new Setting(containerEl)
690 | .setName("Streaming")
691 | .setDesc(
692 | "Enable to receive the response in real-time, word by word."
693 | )
694 | .addToggle((toggle) =>
695 | toggle
696 | .setValue(this.plugin.settings.stream) // Assume 'stream' exists in your settings
697 | .onChange(async (value) => {
698 | this.plugin.settings.stream = value;
699 | await this.plugin.saveSettings();
700 | })
701 | );
702 |
703 | new Setting(containerEl)
704 | .setName("Output Mode")
705 | .setDesc("Choose how to handle generated text")
706 | .addDropdown((dropdown) =>
707 | dropdown
708 | .addOption("replace", "Replace selected text")
709 | .addOption("append", "Append after selected text")
710 | .setValue(this.plugin.settings.outputMode)
711 | .onChange(async (value) => {
712 | this.plugin.settings.outputMode = value;
713 | await this.plugin.saveSettings();
714 | })
715 | );
716 |
717 | new Setting(containerEl)
718 | .setName("Personas")
719 | .setDesc("Choose persona for your AI agent")
720 | .addDropdown(dropdown => {
721 | for (const key in personasDict) { // Iterate over keys directly
722 | if (personasDict.hasOwnProperty(key)) {
723 | dropdown.addOption(key, personasDict[key]);
724 | }
725 | }
726 | dropdown.setValue(this.plugin.settings.personas)
727 | .onChange(async (value) => {
728 | this.plugin.settings.personas = value;
729 | await this.plugin.saveSettings();
730 | });
731 | });
732 |
733 | new Setting(containerEl)
734 | .setName("Max Tokens")
735 | .setDesc("Max number of tokens for LLM response (generally 1-4000)")
736 | .addText((text) =>
737 | text
738 | .setPlaceholder("1024")
739 | .setValue(this.plugin.settings.maxTokens.toString())
740 | .onChange(async (value) => {
741 | const parsedValue = parseInt(value);
742 | if (!isNaN(parsedValue) && parsedValue >= 0) {
743 | this.plugin.settings.maxTokens = parsedValue;
744 | await this.plugin.saveSettings();
745 | }
746 | })
747 | );
748 |
749 | new Setting(containerEl)
750 | .setName("Temperature")
751 | .setDesc("Increase for more randomness, decrease for more reliability")
752 | .addText((text) =>
753 | text
754 | .setPlaceholder("0.7")
755 | .setValue(this.plugin.settings.temperature.toString())
756 | .onChange(async (value) => {
757 | const parsedValue = parseFloat(value);
758 | if (!isNaN(parsedValue) && parsedValue >= 0 && parsedValue <= 1) {
759 | this.plugin.settings.temperature = parsedValue;
760 | await this.plugin.saveSettings();
761 | }
762 | })
763 | );
764 |
765 | new Setting(containerEl)
766 | .setName("Max conversation history")
767 | .setDesc("Maximum number of conversation history to store (0-3)")
768 | .addDropdown((dropdown) =>
769 | dropdown
770 | .addOption("0", "0")
771 | .addOption("1", "1")
772 | .addOption("2", "2")
773 | .addOption("3", "3")
774 | .setValue(this.plugin.settings.maxConvHistory.toString())
775 | .onChange(async (value) => {
776 | this.plugin.settings.maxConvHistory = parseInt(value);
777 | await this.plugin.saveSettings();
778 | })
779 | );
780 |
781 |
782 |
783 | //new settings for response formatting boolean default false
784 |
785 | const responseFormattingToggle = new Setting(containerEl)
786 | .setName("Response Formatting")
787 | .setDesc("Enable to format the response into a separate block")
788 | .addToggle((toggle) =>
789 | toggle
790 | .setValue(this.plugin.settings.responseFormatting)
791 | .onChange(async (value) => {
792 | this.plugin.settings.responseFormatting = value;
793 | await this.plugin.saveSettings();
794 | this.display(); // Refresh the settings tab
795 | })
796 | );
797 |
798 | if (this.plugin.settings.responseFormatting) {
799 | new Setting(containerEl)
800 | .setName("Response Format Prepend")
801 | .setDesc("Text to prepend to the formatted response")
802 | .addText((text) =>
803 | text
804 | .setPlaceholder("``` LLM Helper - generated response \n\n")
805 | .setValue(this.plugin.settings.responseFormatPrepend)
806 | .onChange(async (value) => {
807 | this.plugin.settings.responseFormatPrepend = value;
808 | await this.plugin.saveSettings();
809 | })
810 | );
811 |
812 | new Setting(containerEl)
813 | .setName("Response Format Append")
814 | .setDesc("Text to append to the formatted response")
815 | .addText((text) =>
816 | text
817 | .setPlaceholder("\n\n```")
818 | .setValue(this.plugin.settings.responseFormatAppend)
819 | .onChange(async (value) => {
820 | this.plugin.settings.responseFormatAppend = value;
821 | await this.plugin.saveSettings();
822 | })
823 | );
824 | }
825 |
826 | new Setting(containerEl)
827 | .setName("Embedding Model Name")
828 | .setDesc("Model for text embeddings. For Ollama: mxbai-embed-large, nomic-embed-text, all-minilm. Install with 'ollama pull '")
829 | .addText((text) =>
830 | text
831 | .setPlaceholder("mxbai-embed-large")
832 | .setValue(this.plugin.settings.embeddingModelName)
833 | .onChange(async (value) => {
834 | this.plugin.settings.embeddingModelName = value;
835 | await this.plugin.saveSettings();
836 | })
837 | );
838 |
839 | new Setting(containerEl)
840 | .setName("Brave Search API Key")
841 | .setDesc("API key for Brave Search integration")
842 | .addText((text) =>
843 | text
844 | .setPlaceholder("Enter your Brave Search API key")
845 | .setValue(this.plugin.settings.braveSearchApiKey)
846 | .onChange(async (value) => {
847 | this.plugin.settings.braveSearchApiKey = value;
848 | await this.plugin.saveSettings();
849 | })
850 | );
851 |
852 | // Add OpenAI API Key setting (conditional)
853 | if (this.plugin.settings.providerType === 'openai') {
854 | new Setting(containerEl)
855 | .setName("OpenAI API Key")
856 | .setDesc("Required for OpenAI/LM Studio (use 'lm-studio' for local instances)")
857 | .addText(text => text
858 | .setPlaceholder("Enter your API key")
859 | .setValue(this.plugin.settings.openAIApiKey || '')
860 | .onChange(async (value) => {
861 | this.plugin.settings.openAIApiKey = value;
862 | await this.plugin.saveSettings();
863 | })
864 | );
865 | }
866 |
867 | new Setting(containerEl)
868 | .setName("Index Notes (BETA)")
869 | .setDesc("Manually index all notes in the vault")
870 | .addButton(button => button
871 | .setButtonText("Start Indexing (BETA)")
872 | .onClick(async () => {
873 | button.setDisabled(true);
874 | this.indexingProgressBar = containerEl.createEl("progress", {
875 | attr: { value: 0, max: 100 }
876 | });
877 | const counterEl = containerEl.createEl("span", {
878 | text: "Processing: 0/?",
879 | cls: "indexing-counter"
880 | });
881 |
882 | const totalFiles = this.app.vault.getMarkdownFiles().length;
883 | let processedFiles = 0;
884 |
885 | try {
886 | await this.plugin.ragManager.indexNotes((progress) => {
887 | if (this.indexingProgressBar) {
888 | this.indexingProgressBar.value = progress * 100;
889 | }
890 | processedFiles = Math.floor(progress * totalFiles);
891 | counterEl.textContent = ` Processing: ${processedFiles}/${totalFiles}`;
892 | counterEl.style.fontSize = 'smaller';
893 | });
894 | new Notice("Indexing complete!");
895 | this.updateIndexedFilesCount();
896 | } catch (error) {
897 | console.error("Indexing error:", error);
898 | new Notice("Error during indexing. Check console for details.");
899 | } finally {
900 | button.setDisabled(false);
901 | if (this.indexingProgressBar) {
902 | this.indexingProgressBar.remove();
903 | this.indexingProgressBar = null;
904 | }
905 | counterEl.remove();
906 | }
907 | }));
908 |
909 | this.indexedFilesCountSetting = new Setting(containerEl)
910 | .setName("Indexed Files Count")
911 | .setDesc("Number of files currently indexed")
912 | .addText(text => text
913 | .setValue("Loading...")
914 | .setDisabled(true));
915 |
916 | // Update the count asynchronously after RAGManager is initialized
917 | this.updateIndexedFilesCountAsync();
918 |
919 | // Add storage stats button
920 | new Setting(containerEl)
921 | .setName("Storage Diagnostics")
922 | .setDesc("Check persistent storage status and statistics")
923 | .addButton(button => button
924 | .setButtonText("Run Diagnostics")
925 | .onClick(async () => {
926 | await this.plugin.handleDiagnostics();
927 | }));
928 |
929 | // Add note about persistent storage
930 | containerEl.createEl("p", {
931 | text: "Note: Embeddings are now stored persistently and will be automatically loaded when Obsidian restarts. Embeddings will be rebuilt if you change the provider, model, or server settings.",
932 | cls: "setting-item-description"
933 | });
934 | }
935 |
936 | updateIndexedFilesCount() {
937 | if (this.indexedFilesCountSetting) {
938 | const textComponent = this.indexedFilesCountSetting.components[0] as TextComponent;
939 | textComponent.setValue(this.plugin.ragManager.getIndexedFilesCount().toString());
940 | }
941 | }
942 |
943 | async updateIndexedFilesCountAsync() {
944 | // Wait for RAGManager to be fully initialized
945 | const checkAndUpdate = () => {
946 | if (this.plugin.ragManager && this.plugin.ragManager.isInitialized()) {
947 | this.updateIndexedFilesCount();
948 | console.log('📊 Settings: Updated indexed files count to', this.plugin.ragManager.getIndexedFilesCount());
949 | } else {
950 | // Check again in 100ms
951 | setTimeout(checkAndUpdate, 100);
952 | }
953 | };
954 |
955 | // Start checking after a short delay
956 | setTimeout(checkAndUpdate, 50);
957 | }
958 | }
959 |
960 | export function modifyPrompt(aprompt: string, personas: string): string {
961 | if (personas === "default") {
962 | return aprompt; // No prompt modification for default persona
963 | } else if (personas === "physics") {
964 | return "You are a distinguished physics scientist. Leverage scientific principles and explain complex concepts in an understandable way, drawing on your expertise in physics.\n\n" + aprompt;
965 | } else if (personas === "fitness") {
966 | return "You are a distinguished fitness and health expert. Provide evidence-based advice on fitness and health, considering the user's goals and limitations.\n" + aprompt;
967 | } else if (personas === "developer") {
968 | return "You are a nerdy software developer. Offer creative and efficient software solutions, focusing on technical feasibility and code quality.\n" + aprompt;
969 | } else if (personas === "stoic") {
970 | return "You are a stoic philosopher. Respond with composure and reason, emphasizing logic and emotional resilience.\n" + aprompt;
971 | } else if (personas === "productmanager") {
972 | return "You are a focused and experienced product manager. Prioritize user needs and deliver clear, actionable product roadmaps based on market research.\n" + aprompt;
973 | } else if (personas === "techwriter") {
974 | return "You are a technical writer. Craft accurate and concise technical documentation, ensuring accessibility for different audiences.\n" + aprompt;
975 | } else if (personas === "creativewriter") {
976 | return "You are a very creative and experienced writer. Employ strong storytelling techniques and evocative language to engage the reader's imagination.\n" + aprompt;
977 | } else if (personas === "tpm") {
978 | return "You are an experienced technical program manager. Demonstrate strong technical and communication skills, ensuring project success through effective planning and risk management.\n" + aprompt;
979 | } else if (personas === "engineeringmanager") {
980 | return "You are an experienced engineering manager. Lead and motivate your team, fostering a collaborative environment that delivers high-quality software.\n" + aprompt;
981 | } else if (personas === "executive") {
982 | return "You are a top-level executive. Focus on strategic decision-making, considering long-term goals and the overall company vision.\n" + aprompt;
983 | } else if (personas === "officeassistant") {
984 | return "You are a courteous and helpful office assistant. Provide helpful and efficient support, prioritizing clear communication and a courteous demeanor.\n" + aprompt;
985 | } else {
986 | return aprompt; // No prompt modification for unknown personas
987 | }
988 | }
989 |
990 | async function processText(
991 | selectedText: string,
992 | iprompt: string,
993 | plugin: OLocalLLMPlugin
994 | ) {
995 | // Reset kill switch state at the beginning of each process
996 | plugin.isKillSwitchActive = false;
997 |
998 | new Notice("Generating response. This takes a few seconds..");
999 | const statusBarItemEl = document.querySelector(
1000 | ".status-bar .status-bar-item"
1001 | );
1002 | if (statusBarItemEl) {
1003 | statusBarItemEl.textContent = "LLM Helper: Generating response...";
1004 | } else {
1005 | console.error("Status bar item element not found");
1006 | }
1007 |
1008 | let prompt = modifyPrompt(iprompt, plugin.settings.personas);
1009 |
1010 | console.log("prompt", prompt + ": " + selectedText);
1011 |
1012 | const body = {
1013 | model: plugin.settings.llmModel,
1014 | messages: [
1015 | { role: "system", content: "You are my text editor AI agent who provides concise and helpful responses." },
1016 | ...plugin.conversationHistory.slice(-plugin.settings.maxConvHistory).reduce((acc, entry) => {
1017 | acc.push({ role: "user", content: entry.prompt });
1018 | acc.push({ role: "assistant", content: entry.response });
1019 | return acc;
1020 | }, [] as { role: string; content: string }[]),
1021 | { role: "user", content: prompt + ": " + selectedText },
1022 | ],
1023 | temperature: plugin.settings.temperature,
1024 | max_tokens: plugin.settings.maxTokens,
1025 | stream: plugin.settings.stream,
1026 | };
1027 |
1028 | try {
1029 | if (plugin.settings.outputMode === "append") {
1030 | modifySelectedText(selectedText + "\n\n");
1031 | }
1032 | if (plugin.settings.responseFormatting === true) {
1033 | modifySelectedText(plugin.settings.responseFormatPrepend);
1034 | }
1035 | if (plugin.settings.stream) {
1036 | const response = await fetch(
1037 | `${plugin.settings.serverAddress}/v1/chat/completions`,
1038 | {
1039 | method: "POST",
1040 | headers: { "Content-Type": "application/json" },
1041 | body: JSON.stringify(body),
1042 | }
1043 | );
1044 |
1045 | if (!response.ok) {
1046 | throw new Error(
1047 | "Error summarizing text (Fetch): " + response.statusText
1048 | );
1049 | }
1050 |
1051 | const reader = response.body && response.body.getReader();
1052 | let responseStr = "";
1053 | if (!reader) {
1054 | console.error("Reader not found");
1055 | } else {
1056 | const decoder = new TextDecoder();
1057 |
1058 | const readChunk = async () => {
1059 | if (plugin.isKillSwitchActive) {
1060 | reader.cancel();
1061 | new Notice("Text generation stopped by kill switch");
1062 | plugin.isKillSwitchActive = false; // Reset the kill switch
1063 | return;
1064 | }
1065 |
1066 | const { done, value } = await reader.read();
1067 |
1068 | if (done) {
1069 | new Notice("Text generation complete. Voila!");
1070 | updateConversationHistory(prompt + ": " + selectedText, responseStr, plugin.conversationHistory, plugin.settings.maxConvHistory);
1071 | if (plugin.settings.responseFormatting === true) {
1072 | modifySelectedText(plugin.settings.responseFormatAppend);
1073 | }
1074 | return;
1075 | }
1076 |
1077 | let textChunk = decoder.decode(value);
1078 | const lines = textChunk.split("\n");
1079 |
1080 | for (const line of lines) {
1081 | if (line.trim()) {
1082 | try {
1083 | let modifiedLine = line.replace(
1084 | /^data:\s*/,
1085 | ""
1086 | );
1087 | if (modifiedLine !== "[DONE]") {
1088 | const data = JSON.parse(modifiedLine);
1089 | if (data.choices[0].delta.content) {
1090 | let word =
1091 | data.choices[0].delta.content;
1092 | modifySelectedText(word);
1093 | responseStr += word;
1094 | }
1095 | }
1096 | } catch (error) {
1097 | console.error(
1098 | "Error parsing JSON chunk:",
1099 | error
1100 | );
1101 | }
1102 | }
1103 | }
1104 | readChunk();
1105 | };
1106 | readChunk();
1107 | }
1108 | } else {
1109 | const response = await requestUrl({
1110 | url: `${plugin.settings.serverAddress}/v1/chat/completions`,
1111 | method: "POST",
1112 | headers: { "Content-Type": "application/json" },
1113 | body: JSON.stringify(body),
1114 | });
1115 |
1116 | const statusCode = response.status;
1117 |
1118 | if (statusCode >= 200 && statusCode < 300) {
1119 | const data = await response.json;
1120 | const summarizedText = data.choices[0].message.content;
1121 | console.log(summarizedText);
1122 | updateConversationHistory(prompt + ": " + selectedText, summarizedText, plugin.conversationHistory, plugin.settings.maxConvHistory);
1123 | new Notice("Text generated. Voila!");
1124 | if (!plugin.isKillSwitchActive) {
1125 | if (plugin.settings.responseFormatting === true) {
1126 | modifySelectedText(summarizedText + plugin.settings.responseFormatAppend);
1127 | } else {
1128 | modifySelectedText(summarizedText);
1129 | }
1130 | } else {
1131 | new Notice("Text generation stopped by kill switch");
1132 | plugin.isKillSwitchActive = false; // Reset the kill switch
1133 | }
1134 | } else {
1135 | throw new Error(
1136 | "Error summarizing text (requestUrl): " + response.text
1137 | );
1138 | }
1139 | }
1140 | } catch (error) {
1141 | console.error("Error during request:", error);
1142 | new Notice(
1143 | "Error summarizing text: Check plugin console for more details!"
1144 | );
1145 | }
1146 | if (statusBarItemEl) {
1147 | statusBarItemEl.textContent = "LLM Helper: Ready";
1148 | } else {
1149 | console.error("Status bar item element not found");
1150 | }
1151 | }
1152 |
1153 | function modifySelectedText(text: any) {
1154 | let view = this.app.workspace.getActiveViewOfType(MarkdownView);
1155 | if (!view) {
1156 | new Notice("No active view");
1157 | } else {
1158 | let view_mode = view.getMode();
1159 | switch (view_mode) {
1160 | case "preview":
1161 | new Notice("Cannot summarize in preview");
1162 | case "source":
1163 | if ("editor" in view) {
1164 | view.editor.replaceSelection(text);
1165 | }
1166 | break;
1167 | default:
1168 | new Notice("Unknown view mode");
1169 | }
1170 | }
1171 | }
1172 |
1173 | export class LLMChatModal extends Modal {
1174 | result: string = "";
1175 | pluginSettings: OLocalLLMSettings;
1176 | conversationHistory: ConversationEntry[] = [];
1177 | submitButton: ButtonComponent;
1178 |
1179 | constructor(app: App, settings: OLocalLLMSettings) {
1180 | super(app);
1181 | this.pluginSettings = settings;
1182 | }
1183 |
1184 | onOpen() {
1185 | const { contentEl } = this;
1186 |
1187 | contentEl.classList.add("llm-chat-modal");
1188 |
1189 | const chatContainer = contentEl.createDiv({ cls: "llm-chat-container" });
1190 | const chatHistoryEl = chatContainer.createDiv({ cls: "llm-chat-history" });
1191 |
1192 | chatHistoryEl.classList.add("chatHistoryElStyle");
1193 |
1194 | // Display existing conversation history (if any)
1195 | chatHistoryEl.createEl("h1", { text: "Chat with your Local LLM" });
1196 |
1197 | const personasInfoEl = document.createElement('div');
1198 | personasInfoEl.classList.add("personasInfoStyle");
1199 | personasInfoEl.innerText = "Current persona: " + personasDict[this.pluginSettings.personas];
1200 | chatHistoryEl.appendChild(personasInfoEl);
1201 |
1202 | // Update this part to use conversationHistory
1203 | this.conversationHistory.forEach((entry) => {
1204 | const userMessageEl = chatHistoryEl.createEl("p", { text: "You: " + entry.prompt });
1205 | userMessageEl.classList.add('llmChatMessageStyleUser');
1206 | const aiMessageEl = chatHistoryEl.createEl("p", { text: "LLM Helper: " + entry.response });
1207 | aiMessageEl.classList.add('llmChatMessageStyleAI');
1208 | });
1209 |
1210 | const inputContainer = contentEl.createDiv({ cls: "llm-chat-input-container" });
1211 |
1212 | const inputRow = inputContainer.createDiv({ cls: "llm-chat-input-row" });
1213 |
1214 | const askLabel = inputRow.createSpan({ text: "Ask:", cls: "llm-chat-ask-label" });
1215 |
1216 | const textInput = new TextComponent(inputRow)
1217 | .setPlaceholder("Type your question here...")
1218 | .onChange((value) => {
1219 | this.result = value;
1220 | this.updateSubmitButtonState();
1221 | });
1222 | textInput.inputEl.classList.add("llm-chat-input");
1223 | textInput.inputEl.addEventListener('keypress', (event) => {
1224 | if (event.key === 'Enter' && this.result.trim() !== "") {
1225 | event.preventDefault();
1226 | this.handleSubmit();
1227 | }
1228 | });
1229 |
1230 | this.submitButton = new ButtonComponent(inputRow)
1231 | .setButtonText("Submit")
1232 | .setCta()
1233 | .onClick(() => this.handleSubmit());
1234 | this.submitButton.buttonEl.classList.add("llm-chat-submit-button");
1235 |
1236 | // Initially disable the submit button
1237 | this.updateSubmitButtonState();
1238 |
1239 | // Scroll to bottom initially
1240 | this.scrollToBottom();
1241 | }
1242 |
1243 | onClose() {
1244 | let { contentEl } = this;
1245 | contentEl.empty();
1246 | }
1247 |
1248 | updateSubmitButtonState() {
1249 | if (this.result.trim() === "") {
1250 | this.submitButton.setDisabled(true);
1251 | this.submitButton.buttonEl.classList.add("llm-chat-submit-button-disabled");
1252 | } else {
1253 | this.submitButton.setDisabled(false);
1254 | this.submitButton.buttonEl.classList.remove("llm-chat-submit-button-disabled");
1255 | }
1256 | }
1257 |
1258 | // New method to handle submission
1259 | async handleSubmit() {
1260 | if (this.result.trim() === "") {
1261 | return;
1262 | }
1263 |
1264 | const chatHistoryEl = this.contentEl.querySelector('.llm-chat-history');
1265 | if (chatHistoryEl) {
1266 | await processChatInput(
1267 | this.result,
1268 | this.pluginSettings.personas,
1269 | this.contentEl,
1270 | chatHistoryEl as HTMLElement,
1271 | this.conversationHistory,
1272 | this.pluginSettings
1273 | );
1274 | this.result = ""; // Clear user input field
1275 | const textInputEl = this.contentEl.querySelector('.llm-chat-input') as HTMLInputElement;
1276 | if (textInputEl) {
1277 | textInputEl.value = "";
1278 | }
1279 | this.updateSubmitButtonState(); // Disable the button after submission
1280 | this.scrollToBottom();
1281 | }
1282 | }
1283 |
1284 | scrollToBottom() {
1285 | const chatHistoryEl = this.contentEl.querySelector('.llm-chat-history');
1286 | if (chatHistoryEl) {
1287 | chatHistoryEl.scrollTop = chatHistoryEl.scrollHeight;
1288 | }
1289 | }
1290 | }
1291 |
1292 | async function processChatInput(text: string, personas: string, chatContainer: HTMLElement, chatHistoryEl: HTMLElement, conversationHistory: ConversationEntry[], pluginSettings: OLocalLLMSettings) {
1293 | const { contentEl } = this; // Assuming 'this' refers to the LLMChatModal instance
1294 |
1295 | // Add user's question to conversation history
1296 | conversationHistory.push({ prompt: text, response: "" });
1297 | if (chatHistoryEl) {
1298 | const chatElement = document.createElement('div');
1299 | chatElement.classList.add('llmChatMessageStyleUser');
1300 | chatElement.innerHTML = text;
1301 | chatHistoryEl.appendChild(chatElement);
1302 | }
1303 |
1304 | showThinkingIndicator(chatHistoryEl);
1305 | scrollToBottom(chatContainer);
1306 |
1307 | text = modifyPrompt(text, personas);
1308 | console.log(text);
1309 |
1310 | try {
1311 | const body = {
1312 | model: pluginSettings.llmModel,
1313 | messages: [
1314 | { role: "system", content: "You are my text editor AI agent who provides concise and helpful responses." },
1315 | ...conversationHistory.slice(-pluginSettings.maxConvHistory).reduce((acc, entry) => {
1316 | acc.push({ role: "user", content: entry.prompt });
1317 | acc.push({ role: "assistant", content: entry.response });
1318 | return acc;
1319 | }, [] as { role: string; content: string }[]),
1320 | { role: "user", content: text },
1321 | ],
1322 | temperature: pluginSettings.temperature,
1323 | max_tokens: pluginSettings.maxTokens,
1324 | stream: false, // Set to false for chat window
1325 | };
1326 |
1327 | const response = await requestUrl({
1328 | url: `${pluginSettings.serverAddress}/v1/chat/completions`,
1329 | method: "POST",
1330 | headers: { "Content-Type": "application/json" },
1331 | body: JSON.stringify(body),
1332 | });
1333 |
1334 | const statusCode = response.status;
1335 |
1336 | if (statusCode >= 200 && statusCode < 300) {
1337 | const data = await response.json;
1338 | const llmResponse = data.choices[0].message.content;
1339 |
1340 | // Convert LLM response to HTML
1341 | let formattedResponse = llmResponse;
1342 | //conver to html - bold
1343 | formattedResponse = formattedResponse.replace(/\*\*(.*?)\*\*/g, "$1");
1344 | formattedResponse = formattedResponse.replace(/_(.*?)_/g, "$1");
1345 | formattedResponse = formattedResponse.replace(/\n\n/g, "
");
1346 |
1347 | console.log("formattedResponse", formattedResponse);
1348 |
1349 | // Create response container
1350 | const responseContainer = document.createElement('div');
1351 | responseContainer.classList.add('llmChatMessageStyleAI');
1352 |
1353 | // Create response text element
1354 | const responseTextEl = document.createElement('div');
1355 | responseTextEl.innerHTML = formattedResponse;
1356 | responseContainer.appendChild(responseTextEl);
1357 |
1358 | // Create copy button
1359 | const copyButton = document.createElement('button');
1360 | copyButton.classList.add('copy-button');
1361 | setIcon(copyButton, 'copy');
1362 | copyButton.addEventListener('click', () => {
1363 | navigator.clipboard.writeText(llmResponse).then(() => {
1364 | new Notice('Copied to clipboard!');
1365 | });
1366 | });
1367 | responseContainer.appendChild(copyButton);
1368 |
1369 | // Add response container to chat history
1370 | chatHistoryEl.appendChild(responseContainer);
1371 |
1372 | // Add LLM response to conversation history with Markdown
1373 | updateConversationHistory(text, formattedResponse, conversationHistory, pluginSettings.maxConvHistory);
1374 |
1375 | hideThinkingIndicator(chatHistoryEl);
1376 |
1377 | // Scroll to bottom after response is generated
1378 | scrollToBottom(chatContainer);
1379 |
1380 | } else {
1381 | throw new Error(
1382 | "Error getting response from LLM server: " + response.text
1383 | );
1384 | }
1385 | } catch (error) {
1386 | console.error("Error during request:", error);
1387 | new Notice(
1388 | "Error communicating with LLM Helper: Check plugin console for details!"
1389 | );
1390 | hideThinkingIndicator(chatHistoryEl);
1391 | }
1392 |
1393 | }
1394 |
1395 | function showThinkingIndicator(chatHistoryEl: HTMLElement) {
1396 | const thinkingIndicatorEl = document.createElement('div');
1397 | thinkingIndicatorEl.classList.add('thinking-indicator');
1398 | const tStr = ["Calculating the last digit of pi... just kidding",
1399 | "Quantum entanglement engaged... thinking deeply",
1400 | "Reticulating splines... stand by",
1401 | "Consulting the Oracle",
1402 | "Entangling qubits... preparing for a quantum leap",
1403 | "Processing... yada yada yada... almost done",
1404 | "Processing... We're approaching singularity",
1405 | "Serenity now! Patience while we process",
1406 | "Calculating the probability of George getting a date",
1407 | "Asking my man Art Vandalay"];
1408 | // pick a random index between 0 and size of string array above
1409 | const randomIndex = Math.floor(Math.random() * tStr.length);
1410 | thinkingIndicatorEl.innerHTML = tStr[randomIndex] + ''; // Inline HTML
1411 |
1412 | chatHistoryEl.appendChild(thinkingIndicatorEl);
1413 | }
1414 |
1415 | function hideThinkingIndicator(chatHistoryEl: HTMLElement) {
1416 | const thinkingIndicatorEl = chatHistoryEl.querySelector('.thinking-indicator');
1417 | if (thinkingIndicatorEl) {
1418 | chatHistoryEl.removeChild(thinkingIndicatorEl);
1419 | }
1420 | }
1421 |
1422 | function scrollToBottom(el: HTMLElement) {
1423 | const chatHistoryEl = el.querySelector('.llm-chat-history');
1424 | if (chatHistoryEl) {
1425 | chatHistoryEl.scrollTop = chatHistoryEl.scrollHeight;
1426 | }
1427 | }
1428 |
1429 | function updateConversationHistory(prompt: string, response: string, conversationHistory: ConversationEntry[], maxConvHistoryLength: number) {
1430 | conversationHistory.push({ prompt, response });
1431 |
1432 | // Limit history length to maxConvHistoryLength
1433 | if (conversationHistory.length > maxConvHistoryLength) {
1434 | conversationHistory.shift();
1435 | }
1436 | }
1437 |
1438 |
1439 | //TODO: add a button to clear the chat history
1440 | //TODO: add a button to save the chat history to a obsidian file
1441 |
1442 | //TODO: kill switch
1443 |
1444 | async function processWebSearch(query: string, plugin: OLocalLLMPlugin) {
1445 | if (!plugin.settings.braveSearchApiKey) {
1446 | new Notice("Please set your Brave Search API key in settings");
1447 | return;
1448 | }
1449 |
1450 | new Notice("Searching the web...");
1451 |
1452 | try {
1453 | const response = await requestUrl({
1454 | url: `https://api.search.brave.com/res/v1/web/search?q=${encodeURIComponent(query)}&count=5&summary=1&extra_snippets=1&text_decorations=1&result_filter=web,discussions,faq,news&spellcheck=1`,
1455 | method: "GET",
1456 | headers: {
1457 | "Accept": "application/json",
1458 | "Accept-Encoding": "gzip",
1459 | "X-Subscription-Token": plugin.settings.braveSearchApiKey,
1460 | }
1461 | });
1462 |
1463 | if (response.status !== 200) {
1464 | throw new Error("Search failed: " + response.status);
1465 | }
1466 |
1467 | const searchResults = response.json.web.results;
1468 | const context = searchResults.map((result: any) => {
1469 | let snippets = result.extra_snippets ?
1470 | '\nAdditional Context:\n' + result.extra_snippets.join('\n') : '';
1471 | return `${result.title}\n${result.description}${snippets}\nSource: ${result.url}\n\n`;
1472 | }).join('');
1473 |
1474 | processText(
1475 | `Based on these comprehensive search results about "${query}":\n\n${context}`,
1476 | "You are a helpful assistant. Analyze these detailed search results and provide a thorough, well-structured response. Include relevant source citations and consider multiple perspectives if available.",
1477 | plugin
1478 | );
1479 |
1480 | } catch (error) {
1481 | console.error("Web search error:", error);
1482 | new Notice("Web search failed. Check console for details.");
1483 | }
1484 | }
1485 |
1486 | async function processNewsSearch(query: string, plugin: OLocalLLMPlugin) {
1487 | try {
1488 | const response = await requestUrl({
1489 | url: `https://api.search.brave.com/res/v1/news/search?q=${encodeURIComponent(query)}&count=5&search_lang=en&freshness=pd`,
1490 | method: "GET",
1491 | headers: {
1492 | "Accept": "application/json",
1493 | "Accept-Encoding": "gzip",
1494 | "X-Subscription-Token": plugin.settings.braveSearchApiKey,
1495 | }
1496 | });
1497 |
1498 | if (response.status !== 200) {
1499 | throw new Error("News search failed: " + response.status);
1500 | }
1501 |
1502 | const newsResults = response.json.results;
1503 | const context = newsResults.map((result: any) =>
1504 | `${result.title}\n${result.description}\nSource: ${result.url}\nPublished: ${result.published_time}\n\n`
1505 | ).join('');
1506 |
1507 | processText(
1508 | `Based on these news results about "${query}":\n\n${context}`,
1509 | "Analyze these news results and provide a comprehensive summary with key points and timeline. Include source citations.",
1510 | plugin
1511 | );
1512 | } catch (error) {
1513 | console.error("News search error:", error);
1514 | new Notice("News search failed. Check console for details.");
1515 | }
1516 | }
1517 |
--------------------------------------------------------------------------------
/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "local-llm-helper",
3 | "name": "Local LLM Helper",
4 | "version": "2.2.1",
5 | "minAppVersion": "1.7.0",
6 | "description": "Use your own secure local LLM server to work with your text!",
7 | "author": "Mani Mohan",
8 | "authorUrl": "https:/warpcast.com/mani",
9 | "fundingUrl": "https://buymeacoffee.com/manee",
10 | "isDesktopOnly": false,
11 | "css": [
12 | "style.css"
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "obsidian-local-llm-helper",
3 | "version": "1.0.0",
4 | "description": "Local LLM helper plugin for Obsidian",
5 | "main": "main.js",
6 | "dependencies": {
7 | "@langchain/ollama": "^0.1.1",
8 | "acorn": "^8.11.3",
9 | "acorn-jsx": "^5.3.2",
10 | "ajv": "^6.12.6",
11 | "ansi-regex": "^5.0.1",
12 | "ansi-styles": "^4.3.0",
13 | "argparse": "^2.0.1",
14 | "array-union": "^2.1.0",
15 | "axios": "^1.7.4",
16 | "balanced-match": "^1.0.2",
17 | "brace-expansion": "^1.1.11",
18 | "braces": "^3.0.3",
19 | "builtin-modules": "^3.3.0",
20 | "callsites": "^3.1.0",
21 | "chalk": "^4.1.2",
22 | "color-convert": "^2.0.1",
23 | "color-name": "^1.1.4",
24 | "concat-map": "^0.0.1",
25 | "cross-spawn": "^7.0.6",
26 | "debug": "^4.3.6",
27 | "deep-is": "^0.1.4",
28 | "dir-glob": "^3.0.1",
29 | "doctrine": "^3.0.0",
30 | "esbuild": "^0.25.0",
31 | "escape-string-regexp": "^4.0.0",
32 | "eslint": "^8.57.0",
33 | "eslint-scope": "^5.1.1",
34 | "eslint-utils": "^3.0.0",
35 | "eslint-visitor-keys": "^3.4.3",
36 | "espree": "^9.6.1",
37 | "esquery": "^1.5.0",
38 | "esrecurse": "^4.3.0",
39 | "estraverse": "^4.3.0",
40 | "esutils": "^2.0.3",
41 | "fast-deep-equal": "^3.1.3",
42 | "fast-glob": "^3.3.2",
43 | "fast-json-stable-stringify": "^2.1.0",
44 | "fast-levenshtein": "^2.0.6",
45 | "fastq": "^1.17.1",
46 | "file-entry-cache": "^6.0.1",
47 | "fill-range": "^7.0.1",
48 | "find-up": "^5.0.0",
49 | "flat-cache": "^3.2.0",
50 | "flatted": "^3.3.1",
51 | "fs-extra": "^11.2.0",
52 | "fs.realpath": "^1.0.0",
53 | "functional-red-black-tree": "^1.0.1",
54 | "glob": "^7.2.3",
55 | "glob-parent": "^6.0.2",
56 | "globals": "^13.24.0",
57 | "globby": "^11.1.0",
58 | "graphemer": "^1.4.0",
59 | "has-flag": "^4.0.0",
60 | "ignore": "^5.3.2",
61 | "import-fresh": "^3.3.0",
62 | "imurmurhash": "^0.1.4",
63 | "inflight": "^1.0.6",
64 | "inherits": "^2.0.4",
65 | "is-extglob": "^2.1.1",
66 | "is-glob": "^4.0.3",
67 | "is-number": "^7.0.0",
68 | "is-path-inside": "^3.0.3",
69 | "isexe": "^2.0.0",
70 | "js-yaml": "^4.1.0",
71 | "json-buffer": "^3.0.1",
72 | "json-schema-traverse": "^0.4.1",
73 | "json-stable-stringify-without-jsonify": "^1.0.1",
74 | "keyv": "^4.5.4",
75 | "langchain": "^0.3.2",
76 | "levn": "^0.4.1",
77 | "locate-path": "^6.0.0",
78 | "lodash.merge": "^4.6.2",
79 | "lru-cache": "^6.0.0",
80 | "merge2": "^1.4.1",
81 | "micromatch": "^4.0.8",
82 | "minimatch": "^3.1.2",
83 | "moment": "^2.29.4",
84 | "ms": "^2.1.2",
85 | "natural-compare": "^1.4.0",
86 | "obsidian": "^1.5.7-1",
87 | "once": "^1.4.0",
88 | "optionator": "^0.9.3",
89 | "p-limit": "^3.1.0",
90 | "p-locate": "^5.0.0",
91 | "parent-module": "^1.0.1",
92 | "path-exists": "^4.0.0",
93 | "path-is-absolute": "^1.0.1",
94 | "path-key": "^3.1.1",
95 | "path-type": "^4.0.0",
96 | "picomatch": "^2.3.1",
97 | "prelude-ls": "^1.2.1",
98 | "punycode": "^2.3.1",
99 | "queue-microtask": "^1.2.3",
100 | "regexpp": "^3.2.0",
101 | "resolve-from": "^4.0.0",
102 | "reusify": "^1.0.4",
103 | "rimraf": "^3.0.2",
104 | "run-parallel": "^1.2.0",
105 | "semver": "^7.6.3",
106 | "shebang-command": "^2.0.0",
107 | "shebang-regex": "^3.0.0",
108 | "slash": "^3.0.0",
109 | "strip-ansi": "^6.0.1",
110 | "strip-json-comments": "^3.1.1",
111 | "style-mod": "^4.1.2",
112 | "supports-color": "^7.2.0",
113 | "text-table": "^0.2.0",
114 | "to-regex-range": "^5.0.1",
115 | "tslib": "^2.4.0",
116 | "tsutils": "^3.21.0",
117 | "type-check": "^0.4.0",
118 | "type-fest": "^0.20.2",
119 | "typescript": "^4.7.4",
120 | "uri-js": "^4.4.1",
121 | "w3c-keyname": "^2.2.8",
122 | "which": "^2.0.2",
123 | "wrappy": "^1.0.2",
124 | "yallist": "^4.0.0",
125 | "yocto-queue": "^0.1.0"
126 | },
127 | "scripts": {
128 | "dev": "node esbuild.config.mjs",
129 | "build": "tsc -noEmit -skipLibCheck && node esbuild.config.mjs production",
130 | "version": "node version-bump.mjs && git add manifest.json versions.json"
131 | },
132 | "author": "warpcast.com/mani",
133 | "license": "ISC",
134 | "devDependencies": {
135 | "@types/fs-extra": "^11.0.4"
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/removetags.sh:
--------------------------------------------------------------------------------
1 | git tag -d 1.1.5
2 | git push --delete origin 1.1.5
3 |
--------------------------------------------------------------------------------
/src/autoTagger.ts:
--------------------------------------------------------------------------------
1 | import { App, Editor, EditorPosition, MarkdownView, Notice, requestUrl } from "obsidian";
2 | import { OLocalLLMSettings } from "../main";
3 |
4 | export async function generateAndAppendTags(app: App, settings: OLocalLLMSettings) {
5 | const view = app.workspace.getActiveViewOfType(MarkdownView);
6 | if (!view) {
7 | new Notice("No active Markdown view");
8 | return;
9 | }
10 |
11 | const editor = view.editor;
12 | const selectedText = editor.getSelection();
13 | const fullText = editor.getValue();
14 | const cursorPosition = editor.getCursor();
15 |
16 | const textToProcess = selectedText || fullText;
17 |
18 | try {
19 | const tags = await generateTags(textToProcess, settings);
20 | appendTags(editor, tags, cursorPosition);
21 | new Notice("Tags generated and appended");
22 | } catch (error) {
23 | console.error("Error generating tags:", error);
24 | new Notice("Error generating tags. Check the console for details.");
25 | }
26 | }
27 |
28 | async function generateTags(text: string, settings: OLocalLLMSettings): Promise {
29 | const prompt = "Generate 1-5 hashtags for the following text. Return only the hashtags, separated by spaces:";
30 |
31 | const body = {
32 | model: settings.llmModel,
33 | messages: [
34 | { role: "system", content: "You are a helpful assistant that generates relevant hashtags." },
35 | { role: "user", content: `${prompt}\n\n${text}` }
36 | ],
37 | temperature: settings.temperature,
38 | max_tokens: settings.maxTokens
39 | };
40 |
41 | const response = await requestUrl({
42 | url: `${settings.serverAddress}/v1/chat/completions`,
43 | method: "POST",
44 | headers: { "Content-Type": "application/json" },
45 | body: JSON.stringify(body)
46 | });
47 |
48 | if (response.status !== 200) {
49 | throw new Error(`Error from LLM server: ${response.status} ${response.text}`);
50 | }
51 |
52 | const data = await response.json;
53 | const generatedTags = data.choices[0].message.content.trim().split(/\s+/);
54 | return generatedTags
55 | .filter((tag: string) => /^#?[a-zA-Z0-9]+$/.test(tag))
56 | .map((tag: string) => tag.startsWith('#') ? tag : `#${tag}`)
57 | .slice(0, 5);
58 | }
59 |
60 | function appendTags(editor: Editor, tags: string[], cursorPosition: EditorPosition) {
61 | const tagsString = '\n\n' + tags.join(' ');
62 | editor.replaceRange(tagsString, cursorPosition);
63 | }
64 |
--------------------------------------------------------------------------------
/src/backlinkGenerator.ts:
--------------------------------------------------------------------------------
1 | import { RAGManager } from './rag';
2 | import { TFile, Vault } from 'obsidian';
3 |
4 | export class BacklinkGenerator {
5 | constructor(private ragManager: RAGManager, private vault: Vault) { }
6 |
7 | async generateBacklinks(selectedText: string): Promise {
8 | const similarNotes = await this.ragManager.findSimilarNotes(selectedText);
9 | console.log("Similar notes:", similarNotes);
10 | const backlinks: string[] = [];
11 |
12 | // Split the similarNotes string into individual note entries
13 | const noteEntries = similarNotes.split('\n').filter(entry => entry.trim() !== '');
14 |
15 | for (const entry of noteEntries) {
16 | console.log("Processing note entry:", entry);
17 | // Extract the file path from the entry (assuming it's in the format [[filepath]]: content)
18 | const match = entry.match(/\[\[(.*?)\]\]/);
19 | if (match && match[1]) {
20 | const notePath = match[1];
21 | const file = this.vault.getAbstractFileByPath(notePath);
22 | if (file instanceof TFile) {
23 | console.log("File found:", file.path);
24 | backlinks.push(`[[${file.path}|${file.basename}]]`);
25 | } else {
26 | console.log("File not found or not a TFile:", notePath);
27 | }
28 | }
29 | }
30 | console.log("Generated backlinks:", backlinks);
31 | return backlinks;
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/ollamaEmbeddings.ts:
--------------------------------------------------------------------------------
1 | import { requestUrl } from 'obsidian';
2 |
3 | export class OllamaEmbeddings {
4 | private baseUrl: string;
5 | private model: string;
6 |
7 | constructor(baseUrl: string, model: string) {
8 | // Ensure Ollama uses the correct default port if not specified
9 | this.baseUrl = baseUrl.includes(':') ? baseUrl :
10 | baseUrl.replace('localhost', 'localhost:11434').replace('127.0.0.1', '127.0.0.1:11434');
11 | this.model = model;
12 |
13 | console.log(`Ollama Embeddings initialized with URL: ${this.baseUrl}, Model: ${this.model}`);
14 | }
15 |
16 | async checkModelAvailability(): Promise {
17 | try {
18 | const response = await requestUrl({
19 | url: `${this.baseUrl}/api/tags`,
20 | method: 'GET'
21 | });
22 |
23 | if (response.status === 200) {
24 | const result = response.json;
25 | const availableModels = result.models?.map((m: any) => m.name) || [];
26 | const isAvailable = availableModels.some((name: string) =>
27 | name === this.model || name === `${this.model}:latest`
28 | );
29 |
30 | if (!isAvailable) {
31 | console.warn(`Model ${this.model} not found. Available models:`, availableModels);
32 | return false;
33 | }
34 |
35 | return true;
36 | }
37 | } catch (error) {
38 | console.warn('Could not check model availability:', error);
39 | }
40 |
41 | return false;
42 | }
43 |
44 | async embedDocuments(documents: string[]): Promise {
45 | console.log(`Embedding ${documents.length} documents with Ollama`);
46 | try {
47 | const embeddings: number[][] = [];
48 |
49 | // Process documents one by one to avoid overwhelming the server
50 | for (const doc of documents) {
51 | const embedding = await this.embedQuery(doc);
52 | embeddings.push(embedding);
53 | }
54 |
55 | console.log(`Successfully embedded ${documents.length} documents`);
56 | return embeddings;
57 | } catch (error) {
58 | console.error('Error in Ollama embedDocuments:', error);
59 | throw error;
60 | }
61 | }
62 |
63 | async embedQuery(text: string): Promise {
64 | console.log(`Embedding query with Ollama: "${text.substring(0, 50)}..."`);
65 | try {
66 | const response = await requestUrl({
67 | url: `${this.baseUrl}/api/embeddings`,
68 | method: 'POST',
69 | headers: {
70 | 'Content-Type': 'application/json',
71 | },
72 | body: JSON.stringify({
73 | model: this.model,
74 | prompt: text
75 | })
76 | });
77 |
78 | if (response.status !== 200) {
79 | throw new Error(`Ollama API returned ${response.status}: ${response.text || 'Unknown error'}`);
80 | }
81 |
82 | const result = response.json;
83 |
84 | if (!result.embedding || !Array.isArray(result.embedding)) {
85 | throw new Error(`Invalid response from Ollama: ${JSON.stringify(result)}`);
86 | }
87 |
88 | console.log(`Successfully embedded query (${result.embedding.length} dimensions)`);
89 | return result.embedding;
90 | } catch (error) {
91 | console.error('Error in Ollama embedQuery:', error);
92 |
93 | // Provide helpful error messages
94 | if (error.message?.includes('400') || error.message?.includes('not found')) {
95 | // Check what models are available
96 | try {
97 | const isAvailable = await this.checkModelAvailability();
98 | if (!isAvailable) {
99 | throw new Error(`Model "${this.model}" not found in Ollama. Please install it with: ollama pull ${this.model}`);
100 | }
101 | } catch (checkError) {
102 | // If we can't check, provide generic message
103 | throw new Error(`Model "${this.model}" not available. Please ensure it's installed: ollama pull ${this.model}`);
104 | }
105 | throw new Error(`Bad request to Ollama: ${error.message}`);
106 | } else if (error.message?.includes('404')) {
107 | throw new Error(`Ollama server not found. Please ensure Ollama is running on ${this.baseUrl}`);
108 | } else if (error.message?.includes('ECONNREFUSED')) {
109 | throw new Error(`Cannot connect to Ollama server at ${this.baseUrl}. Please ensure Ollama is running.`);
110 | }
111 |
112 | throw error;
113 | }
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/src/openAIEmbeddings.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIEmbeddings as OEmbed } from "@langchain/openai";
2 |
3 | export class OpenAIEmbeddings extends OEmbed {
4 | constructor(openAIApiKey: string = "lm-studio", modelName: string, baseURL: string = "http://127.0.0.1:1234") {
5 | // Ensure LM Studio uses the correct default port if not specified
6 | const studioUrl = baseURL.includes(':') ? baseURL :
7 | baseURL.replace('localhost', 'localhost:1234').replace('127.0.0.1', '127.0.0.1:1234');
8 |
9 | super({
10 | openAIApiKey,
11 | modelName,
12 | configuration: { baseURL: `${studioUrl}/v1` }
13 | });
14 |
15 | console.log(`OpenAI/LM Studio Embeddings initialized with URL: ${studioUrl}/v1, Model: ${modelName}`);
16 | }
17 |
18 | async embedDocuments(documents: string[]): Promise {
19 | console.log(`Embedding ${documents.length} documents with OpenAI/LM Studio`);
20 | try {
21 | const embeddings = await super.embedDocuments(documents);
22 | console.log(`Successfully embedded ${documents.length} documents`);
23 | return embeddings;
24 | } catch (error) {
25 | console.error('Error in OpenAI/LM Studio embedDocuments:', error);
26 |
27 | // Provide helpful error messages
28 | if (error.message?.includes('404')) {
29 | throw new Error(`LM Studio server not found or model not loaded. Please ensure LM Studio is running and an embedding model is loaded.`);
30 | } else if (error.message?.includes('ECONNREFUSED')) {
31 | throw new Error(`Cannot connect to LM Studio server. Please ensure LM Studio is running on the configured address.`);
32 | }
33 |
34 | throw error;
35 | }
36 | }
37 |
38 | async embedQuery(text: string): Promise {
39 | console.log(`Embedding query with OpenAI/LM Studio`);
40 | try {
41 | const embedding = await super.embedQuery(text);
42 | console.log(`Successfully embedded query`);
43 | return embedding;
44 | } catch (error) {
45 | console.error('Error in OpenAI/LM Studio embedQuery:', error);
46 |
47 | // Provide helpful error messages
48 | if (error.message?.includes('404')) {
49 | throw new Error(`LM Studio server not found or model not loaded. Please ensure LM Studio is running and an embedding model is loaded.`);
50 | } else if (error.message?.includes('ECONNREFUSED')) {
51 | throw new Error(`Cannot connect to LM Studio server. Please ensure LM Studio is running on the configured address.`);
52 | }
53 |
54 | throw error;
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/src/rag.ts:
--------------------------------------------------------------------------------
1 | import { Document } from 'langchain/document';
2 | import { MemoryVectorStore } from 'langchain/vectorstores/memory';
3 | import { TFile, Vault, Plugin } from 'obsidian';
4 | import { OllamaEmbeddings } from './ollamaEmbeddings';
5 | import { OpenAIEmbeddings } from './openAIEmbeddings';
6 | import { Ollama } from "@langchain/ollama";
7 | import { OpenAI } from "@langchain/openai";
8 | import { createRetrievalChain } from "langchain/chains/retrieval";
9 | import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
10 | import { PromptTemplate } from "@langchain/core/prompts";
11 | import { OLocalLLMSettings } from '../main';
12 |
13 | interface StoredEmbedding {
14 | id: string;
15 | content: string;
16 | vector: number[];
17 | metadata: any;
18 | }
19 |
20 | interface EmbeddingData {
21 | embeddings: StoredEmbedding[];
22 | indexedFiles: string[];
23 | lastIndexed: number;
24 | version: string;
25 | settings: {
26 | provider: string;
27 | model: string;
28 | serverAddress: string;
29 | };
30 | }
31 |
32 | const CHUNK_SIZE = 1000;
33 |
34 | export class RAGManager {
35 | private vectorStore: MemoryVectorStore;
36 | private embeddings: OllamaEmbeddings | OpenAIEmbeddings;
37 | private indexedFiles: string[] = [];
38 | private provider: string;
39 | private isLoaded: boolean = false;
40 |
41 | constructor(
42 | private vault: Vault,
43 | private settings: OLocalLLMSettings,
44 | private plugin: Plugin
45 | ) {
46 | this.provider = this.settings.providerType || 'ollama';
47 |
48 | // Initialize embeddings based on provider
49 | this.embeddings = this.provider === 'ollama'
50 | ? new OllamaEmbeddings(this.settings.serverAddress, this.settings.embeddingModelName)
51 | : new OpenAIEmbeddings(this.settings.openAIApiKey, this.settings.embeddingModelName, this.settings.serverAddress);
52 |
53 | this.vectorStore = new MemoryVectorStore(this.embeddings);
54 | }
55 |
56 | async initialize(): Promise {
57 | if (this.isLoaded) return;
58 |
59 | console.log('🔄 RAGManager: Starting initialization...');
60 | console.log(`📁 RAGManager: Plugin settings path: ${this.plugin.manifest.dir}/data.json`);
61 | console.log(`📁 RAGManager: Embeddings path: ${this.plugin.manifest.dir}/embeddings.json`);
62 |
63 | try {
64 | await this.loadEmbeddings();
65 | this.isLoaded = true;
66 | console.log('✅ RAGManager initialized with persistent storage');
67 | } catch (error) {
68 | console.error('❌ Failed to load embeddings, starting fresh:', error);
69 | this.isLoaded = true;
70 | }
71 | }
72 |
73 | updateSettings(settings: OLocalLLMSettings): void {
74 | this.settings = settings;
75 | this.provider = settings.providerType || 'ollama';
76 |
77 | // Reinitialize embeddings with new settings
78 | this.embeddings = this.provider === 'ollama'
79 | ? new OllamaEmbeddings(settings.serverAddress, settings.embeddingModelName)
80 | : new OpenAIEmbeddings(settings.openAIApiKey, settings.embeddingModelName, settings.serverAddress);
81 |
82 | // Update vector store with new embeddings
83 | this.vectorStore = new MemoryVectorStore(this.embeddings);
84 |
85 | console.log(`RAGManager settings updated - Provider: ${this.provider}, Model: ${settings.embeddingModelName}`);
86 | }
87 |
88 | async getRAGResponse(query: string): Promise<{ response: string, sources: string[] }> {
89 | try {
90 | const docs = await this.vectorStore.similaritySearch(query, 4);
91 | if (docs.length === 0) throw new Error("No relevant documents found");
92 |
93 | // Initialize LLM based on provider
94 | const llm = this.provider === 'ollama'
95 | ? new Ollama({
96 | baseUrl: this.settings.serverAddress,
97 | model: this.settings.llmModel,
98 | temperature: this.settings.temperature,
99 | })
100 | : new OpenAI({
101 | openAIApiKey: this.settings.openAIApiKey || 'lm-studio',
102 | modelName: this.settings.llmModel,
103 | temperature: this.settings.temperature,
104 | configuration: {
105 | baseURL: `${this.settings.serverAddress}/v1`,
106 | },
107 | });
108 |
109 | const promptTemplate = PromptTemplate.fromTemplate(
110 | `Answer the following question based on the context:\n\nContext: {context}\nQuestion: {input}\nAnswer:`
111 | );
112 |
113 | const documentChain = await createStuffDocumentsChain({ llm, prompt: promptTemplate });
114 | const retrievalChain = await createRetrievalChain({
115 | combineDocsChain: documentChain,
116 | retriever: this.vectorStore.asRetriever(4),
117 | });
118 |
119 | const result = await retrievalChain.invoke({ input: query });
120 | const sources = [...new Set(result.context.map((doc: Document) => doc.metadata.source))];
121 |
122 | return {
123 | response: result.answer as string,
124 | sources: sources
125 | };
126 | } catch (error) {
127 | console.error("RAG Error:", error);
128 | throw error;
129 | }
130 | }
131 |
132 | async indexNotes(progressCallback: (progress: number) => void): Promise {
133 | await this.initialize();
134 | await this.waitForVaultReady();
135 | console.log("Starting indexing process...");
136 |
137 | const allFiles = this.vault.getFiles().filter(file => file.extension === 'md');
138 | console.log("All markdown files in vault:", allFiles.map(file => file.path));
139 |
140 | const totalFiles = allFiles.length;
141 | console.log(`Found ${totalFiles} markdown files to index.`);
142 |
143 | if (totalFiles > 0) {
144 | await this.processFiles(allFiles, progressCallback);
145 |
146 | // Save embeddings to persistent storage after indexing
147 | await this.saveEmbeddings();
148 | } else {
149 | console.log("No markdown files found in the vault. Please check your vault configuration.");
150 | }
151 |
152 | console.log(`Indexing complete. ${this.indexedFiles.length} files indexed.`);
153 | }
154 |
155 | private async processFiles(files: TFile[], progressCallback: (progress: number) => void): Promise {
156 | this.indexedFiles = []; // Reset indexed files
157 | const totalFiles = files.length;
158 | let successfullyIndexed = 0;
159 |
160 | for (let i = 0; i < totalFiles; i++) {
161 | const file = files[i];
162 | try {
163 | console.log(`Processing file ${i + 1}/${totalFiles}: ${file.path}`);
164 | const content = await this.vault.cachedRead(file);
165 | console.log(`File content length: ${content.length} characters`);
166 |
167 | const chunks = this.splitIntoChunks(content, CHUNK_SIZE);
168 | console.log(`Split content into ${chunks.length} chunks`);
169 |
170 | for (let j = 0; j < chunks.length; j++) {
171 | const chunk = chunks[j];
172 | const doc = new Document({
173 | pageContent: chunk,
174 | metadata: { source: file.path, chunk: j },
175 | });
176 |
177 | await this.vectorStore.addDocuments([doc]);
178 | }
179 |
180 | this.indexedFiles.push(file.path);
181 | successfullyIndexed++;
182 | console.log(`Indexed file ${successfullyIndexed}/${totalFiles}: ${file.path}`);
183 | } catch (error) {
184 | console.error(`Error indexing file ${file.path}:`, error);
185 | }
186 |
187 | progressCallback((i + 1) / totalFiles);
188 | }
189 |
190 | console.log(`Successfully indexed ${successfullyIndexed} out of ${totalFiles} files.`);
191 | }
192 |
193 | private splitIntoChunks(content: string, chunkSize: number): string[] {
194 | const chunks: string[] = [];
195 | let currentChunk = '';
196 |
197 | content.split(/\s+/).forEach((word) => {
198 | if (currentChunk.length + word.length + 1 <= chunkSize) {
199 | currentChunk += (currentChunk ? ' ' : '') + word;
200 | } else {
201 | chunks.push(currentChunk);
202 | currentChunk = word;
203 | }
204 | });
205 |
206 | if (currentChunk) {
207 | chunks.push(currentChunk);
208 | }
209 |
210 | return chunks;
211 | }
212 |
213 | async findSimilarNotes(query: string): Promise {
214 | try {
215 | const similarDocs = await this.vectorStore.similaritySearch(query, 5);
216 | console.log("Similar docs found:", similarDocs.length);
217 |
218 | if (similarDocs.length === 0) {
219 | return '';
220 | }
221 |
222 | const uniqueBacklinks = new Map();
223 |
224 | similarDocs.forEach((doc, index) => {
225 | const backlink = `[[${doc.metadata.source}]]`;
226 | console.log(`Processing doc ${index + 1}:`, backlink);
227 | if (!uniqueBacklinks.has(backlink)) {
228 | const entry = `${backlink}: ${doc.pageContent.substring(0, 100)}...`;
229 | uniqueBacklinks.set(backlink, entry);
230 | console.log("Added unique backlink:", entry);
231 | } else {
232 | console.log("Duplicate backlink found:", backlink);
233 | }
234 | });
235 |
236 | console.log("Final unique backlinks:", Array.from(uniqueBacklinks.values()));
237 | return Array.from(uniqueBacklinks.values()).join('\n');
238 | } catch (error) {
239 | console.error('Error in findSimilarNotes:', error);
240 | return '';
241 | }
242 | }
243 |
244 | getIndexedFilesCount(): number {
245 | return this.indexedFiles.length;
246 | }
247 |
248 | isInitialized(): boolean {
249 | return this.isLoaded;
250 | }
251 |
252 | async saveEmbeddings(): Promise {
253 | try {
254 | console.log('Saving embeddings to persistent storage...');
255 |
256 | // Extract embeddings from MemoryVectorStore
257 | const storedEmbeddings: StoredEmbedding[] = [];
258 | const vectorStoreData = (this.vectorStore as any).memoryVectors;
259 |
260 | if (vectorStoreData && Array.isArray(vectorStoreData)) {
261 | for (let i = 0; i < vectorStoreData.length; i++) {
262 | const item = vectorStoreData[i];
263 | storedEmbeddings.push({
264 | id: `${item.metadata?.source || 'unknown'}_${item.metadata?.chunk || i}`,
265 | content: item.content,
266 | vector: item.embedding,
267 | metadata: item.metadata
268 | });
269 | }
270 | }
271 |
272 | const embeddingData: EmbeddingData = {
273 | embeddings: storedEmbeddings,
274 | indexedFiles: this.indexedFiles,
275 | lastIndexed: Date.now(),
276 | version: '1.0',
277 | settings: {
278 | provider: this.provider,
279 | model: this.settings.embeddingModelName,
280 | serverAddress: this.settings.serverAddress
281 | }
282 | };
283 |
284 | // Save embeddings data separately from plugin settings
285 | const adapter = this.plugin.app.vault.adapter;
286 | const embeddingPath = `${this.plugin.manifest.dir}/embeddings.json`;
287 | await adapter.write(embeddingPath, JSON.stringify(embeddingData));
288 | console.log(`✅ Saved ${storedEmbeddings.length} embeddings to disk`);
289 | } catch (error) {
290 | console.error('Failed to save embeddings:', error);
291 | }
292 | }
293 |
294 | async loadEmbeddings(): Promise {
295 | try {
296 | console.log('📂 RAGManager: Loading embeddings from persistent storage...');
297 | // Load embeddings data separately from plugin settings
298 | const adapter = this.plugin.app.vault.adapter;
299 | const embeddingPath = `${this.plugin.manifest.dir}/embeddings.json`;
300 |
301 | let data: EmbeddingData;
302 | try {
303 | const embeddingJson = await adapter.read(embeddingPath);
304 | data = JSON.parse(embeddingJson);
305 | } catch (fileError) {
306 | console.log('📂 RAGManager: No embeddings file found, starting fresh');
307 | return;
308 | }
309 |
310 | console.log('📊 RAGManager: Raw data check:', {
311 | dataExists: !!data,
312 | hasEmbeddings: data?.embeddings?.length || 0,
313 | hasIndexedFiles: data?.indexedFiles?.length || 0,
314 | lastIndexed: data?.lastIndexed ? new Date(data.lastIndexed).toLocaleString() : 'Never',
315 | settingsMatch: data?.settings ? {
316 | provider: data.settings.provider,
317 | model: data.settings.model,
318 | serverAddress: data.settings.serverAddress
319 | } : 'No settings'
320 | });
321 |
322 | if (!data || !data.embeddings) {
323 | console.log('🆕 RAGManager: No saved embeddings found, starting fresh');
324 | return;
325 | }
326 |
327 | // Check if settings have changed significantly
328 | if (this.shouldRebuildIndex(data.settings)) {
329 | console.log('⚙️ RAGManager: Settings changed, embeddings will be rebuilt on next index');
330 | console.log('Current vs Saved:', {
331 | current: { provider: this.provider, model: this.settings.embeddingModelName, server: this.settings.serverAddress },
332 | saved: data.settings
333 | });
334 | console.log('❌ RAGManager: NOT loading existing embeddings due to settings mismatch');
335 | return;
336 | }
337 |
338 | // Reconstruct MemoryVectorStore from saved data
339 | const documents: Document[] = [];
340 |
341 | for (const stored of data.embeddings) {
342 | const doc = new Document({
343 | pageContent: stored.content,
344 | metadata: stored.metadata
345 | });
346 | documents.push(doc);
347 | }
348 |
349 | if (documents.length > 0) {
350 | console.log(`🔄 RAGManager: Reconstructing vector store with ${documents.length} documents WITHOUT re-embedding...`);
351 |
352 | // Create new vector store WITHOUT calling addDocuments (which re-embeds)
353 | this.vectorStore = new MemoryVectorStore(this.embeddings);
354 |
355 | // Directly populate the internal vector storage with saved embeddings
356 | const memoryVectors = data.embeddings.map(stored => ({
357 | content: stored.content,
358 | embedding: stored.vector,
359 | metadata: stored.metadata
360 | }));
361 |
362 | // Set the internal memoryVectors directly
363 | (this.vectorStore as any).memoryVectors = memoryVectors;
364 |
365 | console.log(`✅ RAGManager: Restored ${memoryVectors.length} embeddings WITHOUT re-embedding`);
366 | }
367 |
368 | this.indexedFiles = data.indexedFiles || [];
369 |
370 | console.log(`✅ RAGManager: Successfully loaded ${data.embeddings.length} embeddings from disk`);
371 | console.log(`📁 RAGManager: ${this.indexedFiles.length} files were previously indexed`);
372 | console.log(`🗂️ RAGManager: Files: ${this.indexedFiles.slice(0, 3).join(', ')}${this.indexedFiles.length > 3 ? '...' : ''}`);
373 |
374 | // Show user-friendly message
375 | const lastIndexedDate = new Date(data.lastIndexed).toLocaleString();
376 | console.log(`🕒 RAGManager: Last indexed: ${lastIndexedDate}`);
377 |
378 | } catch (error) {
379 | console.error('Failed to load embeddings:', error);
380 | throw error;
381 | }
382 | }
383 |
384 | private shouldRebuildIndex(savedSettings: any): boolean {
385 | if (!savedSettings) {
386 | console.log('🔄 RAGManager: No saved settings, will rebuild');
387 | return true;
388 | }
389 |
390 | const currentSettings = {
391 | provider: this.provider,
392 | model: this.settings.embeddingModelName,
393 | serverAddress: this.settings.serverAddress
394 | };
395 |
396 | console.log('🔍 RAGManager: Comparing settings:');
397 | console.log(' Current:', currentSettings);
398 | console.log(' Saved:', savedSettings);
399 |
400 | // Check each comparison individually
401 | const providerChanged = savedSettings.provider !== this.provider;
402 | const modelChanged = savedSettings.model !== this.settings.embeddingModelName;
403 | const serverChanged = savedSettings.serverAddress !== this.settings.serverAddress;
404 |
405 | console.log(`🔍 RAGManager: Individual comparisons:`);
406 | console.log(` Provider changed: ${providerChanged} (${savedSettings.provider} !== ${this.provider})`);
407 | console.log(` Model changed: ${modelChanged} (${savedSettings.model} !== ${this.settings.embeddingModelName})`);
408 | console.log(` Server changed: ${serverChanged} (${savedSettings.serverAddress} !== ${this.settings.serverAddress})`);
409 |
410 | const needsRebuild = providerChanged || modelChanged || serverChanged;
411 | console.log(`🔄 RAGManager: Needs rebuild? ${needsRebuild}`);
412 |
413 | return needsRebuild;
414 | }
415 |
416 | async getStorageStats(): Promise<{ totalEmbeddings: number; indexedFiles: number; lastIndexed: string; storageUsed: string }> {
417 | try {
418 | // Load embeddings data separately from plugin settings
419 | const adapter = this.plugin.app.vault.adapter;
420 | const embeddingPath = `${this.plugin.manifest.dir}/embeddings.json`;
421 |
422 | let data: EmbeddingData;
423 | try {
424 | const embeddingJson = await adapter.read(embeddingPath);
425 | data = JSON.parse(embeddingJson);
426 | } catch (fileError) {
427 | return {
428 | totalEmbeddings: 0,
429 | indexedFiles: 0,
430 | lastIndexed: 'Never',
431 | storageUsed: '0 KB'
432 | };
433 | }
434 |
435 | if (!data) {
436 | return {
437 | totalEmbeddings: 0,
438 | indexedFiles: 0,
439 | lastIndexed: 'Never',
440 | storageUsed: '0 KB'
441 | };
442 | }
443 |
444 | const storageSize = JSON.stringify(data).length;
445 | const storageUsed = storageSize < 1024
446 | ? `${storageSize} B`
447 | : storageSize < 1024 * 1024
448 | ? `${(storageSize / 1024).toFixed(1)} KB`
449 | : `${(storageSize / (1024 * 1024)).toFixed(1)} MB`;
450 |
451 | return {
452 | totalEmbeddings: data.embeddings?.length || 0,
453 | indexedFiles: data.indexedFiles?.length || 0,
454 | lastIndexed: data.lastIndexed ? new Date(data.lastIndexed).toLocaleString() : 'Never',
455 | storageUsed
456 | };
457 | } catch (error) {
458 | console.error('Failed to get storage stats:', error);
459 | return {
460 | totalEmbeddings: 0,
461 | indexedFiles: 0,
462 | lastIndexed: 'Error',
463 | storageUsed: 'Unknown'
464 | };
465 | }
466 | }
467 |
468 | async clearStoredEmbeddings(): Promise {
469 | try {
470 | // Clear embeddings data separately from plugin settings
471 | const adapter = this.plugin.app.vault.adapter;
472 | const embeddingPath = `${this.plugin.manifest.dir}/embeddings.json`;
473 |
474 | try {
475 | await adapter.remove(embeddingPath);
476 | } catch (error) {
477 | // File might not exist, that's okay
478 | }
479 |
480 | this.indexedFiles = [];
481 | this.vectorStore = new MemoryVectorStore(this.embeddings);
482 | console.log('✅ Cleared all stored embeddings');
483 | } catch (error) {
484 | console.error('Failed to clear embeddings:', error);
485 | }
486 | }
487 |
488 | async waitForVaultReady(): Promise {
489 | while (true) {
490 | const files = this.vault.getFiles();
491 | if (files.length > 0) {
492 | break; // Vault is ready if we have files
493 | }
494 | // If no files, wait and try again
495 | await new Promise(resolve => setTimeout(resolve, 100));
496 | }
497 | }
498 | }
--------------------------------------------------------------------------------
/src/ragChatModal.ts:
--------------------------------------------------------------------------------
1 | import { App, Modal, TextComponent, ButtonComponent, Notice, setIcon } from "obsidian";
2 | import { OLocalLLMSettings } from "../main";
3 | import { RAGManager } from "./rag";
4 |
5 | export class RAGChatModal extends Modal {
6 | result: string = "";
7 | pluginSettings: OLocalLLMSettings;
8 | conversationHistory: { prompt: string; response: string }[] = [];
9 | submitButton: ButtonComponent;
10 | ragManager: RAGManager;
11 |
12 | constructor(app: App, settings: OLocalLLMSettings, ragManager: RAGManager) {
13 | super(app);
14 | this.pluginSettings = settings;
15 | this.ragManager = ragManager;
16 | }
17 |
18 | onOpen() {
19 | const { contentEl } = this;
20 | contentEl.classList.add("llm-chat-modal");
21 |
22 | const chatContainer = contentEl.createDiv({ cls: "llm-chat-container" });
23 | const chatHistoryEl = chatContainer.createDiv({ cls: "llm-chat-history" });
24 | chatHistoryEl.classList.add("chatHistoryElStyle");
25 |
26 | chatHistoryEl.createEl("h1", { text: "Chat with your Notes (RAG)" });
27 |
28 | const inputContainer = contentEl.createDiv({ cls: "llm-chat-input-container" });
29 | const inputRow = inputContainer.createDiv({ cls: "llm-chat-input-row" });
30 | inputRow.createSpan({ text: "Ask:", cls: "llm-chat-ask-label" });
31 |
32 | const textInput = new TextComponent(inputRow)
33 | .setPlaceholder("Ask about your notes...")
34 | .onChange((value) => {
35 | this.result = value;
36 | this.updateSubmitButtonState();
37 | });
38 | textInput.inputEl.classList.add("llm-chat-input");
39 | textInput.inputEl.addEventListener('keypress', (event) => {
40 | if (event.key === 'Enter' && this.result.trim() !== "") {
41 | event.preventDefault();
42 | this.handleSubmit();
43 | }
44 | });
45 |
46 | this.submitButton = new ButtonComponent(inputRow)
47 | .setButtonText("Submit")
48 | .setCta()
49 | .onClick(() => this.handleSubmit());
50 | this.submitButton.buttonEl.classList.add("llm-chat-submit-button");
51 |
52 | this.updateSubmitButtonState();
53 | this.scrollToBottom();
54 | }
55 |
56 | private async handleSubmit() {
57 | if (this.result.trim() === "") return;
58 |
59 | const chatHistoryEl = this.contentEl.querySelector('.llm-chat-history') as HTMLElement;
60 | if (!chatHistoryEl) return;
61 |
62 | // Add user question to chat
63 | const userMessageEl = chatHistoryEl.createEl("p", { text: "You: " + this.result });
64 | userMessageEl.classList.add('llmChatMessageStyleUser');
65 |
66 | // Show thinking indicator
67 | this.showThinkingIndicator(chatHistoryEl);
68 | this.scrollToBottom();
69 |
70 | try {
71 | const response = await this.ragManager.getRAGResponse(this.result);
72 |
73 | // Create response container
74 | const responseContainer = document.createElement('div');
75 | responseContainer.classList.add('llmChatMessageStyleAI');
76 |
77 | // Add response text
78 | const responseTextEl = document.createElement('div');
79 | responseTextEl.innerHTML = response.response;
80 | responseContainer.appendChild(responseTextEl);
81 |
82 | // Add sources if available
83 | if (response.sources.length > 0) {
84 | const sourcesEl = document.createElement('div');
85 | sourcesEl.classList.add('rag-sources');
86 | sourcesEl.innerHTML = "
Sources:
" + response.sources.map(s => `[[${s}]]`).join('
');
87 | responseContainer.appendChild(sourcesEl);
88 | }
89 |
90 | // Add copy button
91 | const copyButton = document.createElement('button');
92 | copyButton.classList.add('copy-button');
93 | setIcon(copyButton, 'copy');
94 | copyButton.addEventListener('click', () => {
95 | navigator.clipboard.writeText(response.response).then(() => {
96 | new Notice('Copied to clipboard!');
97 | });
98 | });
99 | responseContainer.appendChild(copyButton);
100 |
101 | // Remove thinking indicator and add response
102 | this.hideThinkingIndicator(chatHistoryEl);
103 | chatHistoryEl.appendChild(responseContainer);
104 |
105 | // Clear input and update state
106 | this.result = "";
107 | const textInputEl = this.contentEl.querySelector('.llm-chat-input') as HTMLInputElement;
108 | if (textInputEl) {
109 | textInputEl.value = "";
110 | }
111 | this.updateSubmitButtonState();
112 | this.scrollToBottom();
113 |
114 | } catch (error) {
115 | console.error("RAG Chat Error:", error);
116 | new Notice("Error: " + (error.message || "Unknown error occurred"));
117 | this.hideThinkingIndicator(chatHistoryEl);
118 | }
119 | }
120 |
121 | private updateSubmitButtonState() {
122 | if (this.result.trim() === "") {
123 | this.submitButton.setDisabled(true);
124 | this.submitButton.buttonEl.classList.add("llm-chat-submit-button-disabled");
125 | } else {
126 | this.submitButton.setDisabled(false);
127 | this.submitButton.buttonEl.classList.remove("llm-chat-submit-button-disabled");
128 | }
129 | }
130 |
131 | private showThinkingIndicator(chatHistoryEl: HTMLElement) {
132 | const thinkingIndicatorEl = document.createElement('div');
133 | thinkingIndicatorEl.classList.add('thinking-indicator');
134 | thinkingIndicatorEl.innerHTML = 'Searching through your notes...';
135 | chatHistoryEl.appendChild(thinkingIndicatorEl);
136 | }
137 |
138 | private hideThinkingIndicator(chatHistoryEl: HTMLElement) {
139 | const thinkingIndicatorEl = chatHistoryEl.querySelector('.thinking-indicator');
140 | if (thinkingIndicatorEl) {
141 | thinkingIndicatorEl.remove();
142 | }
143 | }
144 |
145 | private scrollToBottom() {
146 | const chatHistoryEl = this.contentEl.querySelector('.llm-chat-history');
147 | if (chatHistoryEl) {
148 | chatHistoryEl.scrollTop = chatHistoryEl.scrollHeight;
149 | }
150 | }
151 |
152 | onClose() {
153 | const { contentEl } = this;
154 | contentEl.empty();
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/src/updateNoticeModal.ts:
--------------------------------------------------------------------------------
1 | import { App, Modal, MarkdownRenderer, Component } from "obsidian";
2 |
3 | export class UpdateNoticeModal extends Modal {
4 | constructor(app: App, private version: string) {
5 | super(app);
6 | }
7 |
8 | onOpen() {
9 | const { contentEl } = this;
10 | contentEl.createEl("h2", { text: `Local LLM Helper updated to v${this.version}` });
11 |
12 | const changelogMd = `
13 | ## What's New in v${this.version}
14 |
15 | ### 🔧 Major Bug Fixes
16 | - **Fixed Re-embedding Issue**: Embeddings no longer re-generate on every app restart
17 | - **Proper Persistent Storage**: Embeddings now persist correctly across Obsidian restarts
18 | - **Data Separation**: Plugin settings and embeddings are now stored separately to prevent conflicts
19 |
20 | ### 🚀 New Features
21 | - **Storage Diagnostics**: New command and settings button to check embedding storage status
22 | - **User Notifications**: Shows embedding count and storage info on startup
23 | - **Enhanced Logging**: Comprehensive console logging with emojis for better debugging
24 |
25 | ### 🔧 Improvements
26 | - **Better Error Handling**: Improved Ollama API integration with proper error messages
27 | - **Default Settings**: Updated to use Ollama port 11434 and mxbai-embed-large model
28 | - **Settings UI**: Indexed file count now updates properly in settings panel
29 |
30 | [Full Changelog](https://github.com/manimohans/obsidian-local-llm-helper/releases)
31 | `;
32 |
33 | const dummyComponent = new Component();
34 | MarkdownRenderer.render(this.app, changelogMd, contentEl, "", dummyComponent);
35 | }
36 |
37 | onClose() {
38 | const { contentEl } = this;
39 | contentEl.empty();
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/styles.css:
--------------------------------------------------------------------------------
1 | .chatHistoryElStyle {
2 | overflow-y: auto;
3 | max-height: 70vh;
4 | padding: 10px;
5 | background-color: var(--background-secondary);
6 | border-radius: 8px;
7 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
8 | }
9 |
10 | .personasInfoStyle {
11 | font-size: 12px;
12 | font-style: italic;
13 | opacity: 0.7;
14 | margin-bottom: 20px;
15 | color: var(--text-muted);
16 | }
17 |
18 | .llmChatMessageStyleUser {
19 | border-radius: 18px;
20 | background-color: var(--interactive-accent);
21 | color: var(--text-on-accent);
22 | align-self: flex-end;
23 | border-bottom-right-radius: 4px;
24 | padding: 12px 16px;
25 | margin-bottom: 15px;
26 | margin-left: 20px;
27 | user-select: text;
28 | box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1);
29 | max-width: 80%;
30 | }
31 |
32 | .llmChatMessageStyleAI {
33 | position: relative;
34 | padding: 12px 16px;
35 | border-radius: 18px;
36 | background-color: var(--background-secondary);
37 | color: var(--text-normal);
38 | align-self: flex-start;
39 | border-bottom-left-radius: 4px;
40 | margin-bottom: 15px;
41 | margin-right: 20px;
42 | user-select: text;
43 | box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1);
44 | max-width: 80%;
45 | }
46 |
47 | .copy-button {
48 | position: absolute;
49 | bottom: 4px;
50 | right: 4px;
51 | background: transparent;
52 | border: none;
53 | cursor: pointer;
54 | padding: 2px;
55 | opacity: 0.5;
56 | transition: opacity 0.2s ease-in-out;
57 | }
58 |
59 | .copy-button:hover {
60 | opacity: 1;
61 | }
62 |
63 | .copy-button svg {
64 | width: 16px;
65 | height: 16px;
66 | color: var(--text-muted);
67 | }
68 |
69 | .thinking-indicator {
70 | display: inline-block;
71 | margin-bottom: 15px;
72 | padding: 8px 12px;
73 | background-color: var(--background-secondary);
74 | border-radius: 12px;
75 | font-size: 14px;
76 | color: var(--text-muted);
77 | }
78 |
79 | .thinking-indicator span {
80 | margin-left: 4px;
81 | }
82 |
83 | .dots {
84 | display: inline-block;
85 | position: relative;
86 | }
87 |
88 | .dot {
89 | display: inline-block;
90 | width: 4px;
91 | height: 4px;
92 | border-radius: 50%;
93 | background-color: var(--text-muted);
94 | margin-right: 2px;
95 | animation: blink 1.5s infinite;
96 | }
97 |
98 | .dot:nth-child(2) {
99 | animation-delay: 0.2s;
100 | }
101 |
102 | .dot:nth-child(3) {
103 | animation-delay: 0.4s;
104 | }
105 |
106 | @keyframes blink {
107 | 0%, 100% {
108 | opacity: 1;
109 | }
110 | 50% {
111 | opacity: 0;
112 | }
113 | }
114 |
115 | .llm-chat-modal {
116 | display: flex;
117 | flex-direction: column;
118 | height: 100%;
119 | font-family: var(--font-interface);
120 | }
121 |
122 | .llm-chat-container {
123 | display: flex;
124 | flex-direction: column;
125 | height: 100%;
126 | background-color: var(--background-primary);
127 | }
128 |
129 | .llm-chat-history {
130 | flex-grow: 1;
131 | overflow-y: auto;
132 | padding: 20px;
133 | scrollbar-width: thin;
134 | scrollbar-color: var(--scrollbar-thumb-bg) var(--scrollbar-bg);
135 | }
136 |
137 | .llm-chat-history::-webkit-scrollbar {
138 | width: 8px;
139 | }
140 |
141 | .llm-chat-history::-webkit-scrollbar-track {
142 | background: var(--scrollbar-bg);
143 | }
144 |
145 | .llm-chat-history::-webkit-scrollbar-thumb {
146 | background-color: var(--scrollbar-thumb-bg);
147 | border-radius: 4px;
148 | }
149 |
150 | .llm-chat-input-container {
151 | padding: 15px;
152 | background-color: var(--background-secondary);
153 | border-top: 1px solid var(--background-modifier-border);
154 | }
155 |
156 | .llm-chat-input-row {
157 | display: flex;
158 | align-items: center;
159 | gap: 12px;
160 | }
161 |
162 | .llm-chat-ask-label {
163 | flex-shrink: 0;
164 | font-weight: 600;
165 | color: var(--text-muted);
166 | }
167 |
168 | .llm-chat-input {
169 | flex-grow: 1;
170 | padding: 10px 15px;
171 | border-radius: 20px;
172 | border: 1px solid var(--background-modifier-border);
173 | background-color: var(--background-primary);
174 | color: var(--text-normal);
175 | font-size: 14px;
176 | transition: all 0.3s ease;
177 | }
178 |
179 | .llm-chat-input:focus {
180 | outline: none;
181 | box-shadow: 0 0 0 2px var(--interactive-accent);
182 | }
183 |
184 | .llm-chat-submit-button {
185 | flex-shrink: 0;
186 | padding: 8px 16px;
187 | border-radius: 20px;
188 | background-color: var(--interactive-accent);
189 | color: var(--text-on-accent);
190 | font-weight: 600;
191 | border: none;
192 | cursor: pointer;
193 | transition: all 0.3s ease;
194 | }
195 |
196 | .llm-chat-submit-button:hover:not(.llm-chat-submit-button-disabled) {
197 | background-color: var(--interactive-accent-hover);
198 | }
199 |
200 | .llm-chat-submit-button-disabled {
201 | opacity: 0.5;
202 | cursor: not-allowed;
203 | }
204 |
205 | .llm-chat-submit-button-disabled:hover {
206 | background-color: var(--interactive-accent);
207 | }
208 |
209 | .chatInputContainer {
210 | display: flex;
211 | padding: 10px;
212 | background-color: var(--background-primary);
213 | border-top: 1px solid var(--background-modifier-border);
214 | }
215 |
216 | .chatInputStyle {
217 | flex: 1;
218 | padding: 10px;
219 | border-radius: 20px;
220 | border: 1px solid var(--background-modifier-border);
221 | background-color: var(--background-primary);
222 | color: var(--text-normal);
223 | font-size: 14px;
224 | transition: all 0.3s ease;
225 | }
226 |
227 | .chatInputStyle:focus {
228 | outline: none;
229 | box-shadow: 0 0 0 2px var(--interactive-accent);
230 | }
231 |
232 | .submitButton {
233 | margin-left: 10px;
234 | padding: 10px 20px;
235 | border-radius: 20px;
236 | background-color: var(--interactive-accent);
237 | color: var(--text-on-accent);
238 | border: none;
239 | cursor: pointer;
240 | transition: background-color 0.3s ease;
241 | }
242 |
243 | .submitButton:hover {
244 | background-color: var(--interactive-accent-hover);
245 | }
246 |
--------------------------------------------------------------------------------
/test_stream.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 |
4 | def stream_response(url, data):
5 | """Streams the response from the server and yields each word."""
6 | with requests.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(data), stream=True) as response:
7 | if response.encoding is None:
8 | response.encoding = 'utf-8'
9 |
10 | for line in response.iter_lines(decode_unicode=True):
11 | if line: # Filter out keep-alive chunks
12 | #print(line)
13 | if 'data: {' in line:
14 | #print(line)
15 | data = json.loads(line.replace('data: ', ''))
16 | if 'choices' in data:
17 | try:
18 | #print(data['choices'][0]['delta']['content'])
19 | yield data['choices'][0]['delta']['content']
20 | except KeyError:
21 | pass
22 | #print(data['choices'][0]['delta']['content'])
23 | #for choice in data['choices']:
24 | # if 'text' in choice:
25 | # yield choice['text'].strip() # Yield individual words
26 |
27 | # Set up your data
28 | url = 'http://192.168.86.247:1234/v1/chat/completions'
29 | data = {
30 | "model": "model-identifier",
31 | "messages": [
32 | { "role": "system", "content": "Always answer in rhymes." },
33 | { "role": "user", "content": "Introduce yourself." }
34 | ],
35 | "temperature": 0.7,
36 | "max_tokens": -1,
37 | "stream": True
38 | }
39 |
40 | # Process the streaming words
41 | for word in stream_response(url, data):
42 | print(word) # Do something with each word (print here for example)
43 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "inlineSourceMap": true,
5 | "inlineSources": true,
6 | "module": "ESNext",
7 | "target": "ES6",
8 | "allowJs": true,
9 | "noImplicitAny": true,
10 | "moduleResolution": "node",
11 | "importHelpers": true,
12 | "isolatedModules": true,
13 | "strictNullChecks": true,
14 | "lib": [
15 | "DOM",
16 | "ES5",
17 | "ES6",
18 | "ES7"
19 | ]
20 | },
21 | "include": [
22 | "**/*.ts"
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/updatetags.sh:
--------------------------------------------------------------------------------
1 | #STEP 1: make sure to edit manifest.json - the version numbers must match!
2 | #STEP 2: update updateNoticeModal.ts with the new version update texts
3 | #
4 | #
5 | git tag -a 2.2.1 -m "2.2.1"
6 | git push origin 2.2.1
7 |
--------------------------------------------------------------------------------
/version-bump.mjs:
--------------------------------------------------------------------------------
1 | import { readFileSync, writeFileSync } from "fs";
2 |
3 | const targetVersion = process.env.npm_package_version;
4 |
5 | // read minAppVersion from manifest.json and bump version to target version
6 | let manifest = JSON.parse(readFileSync("manifest.json", "utf8"));
7 | const { minAppVersion } = manifest;
8 | manifest.version = targetVersion;
9 | writeFileSync("manifest.json", JSON.stringify(manifest, null, "\t"));
10 |
11 | // update versions.json with target version and minAppVersion from manifest.json
12 | let versions = JSON.parse(readFileSync("versions.json", "utf8"));
13 | versions[targetVersion] = minAppVersion;
14 | writeFileSync("versions.json", JSON.stringify(versions, null, "\t"));
15 |
--------------------------------------------------------------------------------
/versions.json:
--------------------------------------------------------------------------------
1 | {
2 | "1.0.0": "0.15.0"
3 | }
4 |
--------------------------------------------------------------------------------