├── .gitignore
├── LICENSE
├── README.md
├── docs
├── enable_gpu_support.md
├── images
│ ├── architecture.png
│ ├── chatd_ollama_running.png
│ └── select_a_model.png
└── select_a_custom_model.md
├── forge.config.js
├── package-lock.json
├── package.json
├── public
├── chatd.icns
├── chatd.ico
├── chatd.iconset
│ ├── icon_128x128.png
│ ├── icon_128x128@2x.png
│ ├── icon_16x16.png
│ ├── icon_16x16@2x.png
│ ├── icon_256x256.png
│ ├── icon_256x256@2x.png
│ ├── icon_32x32.png
│ ├── icon_32x32@2x.png
│ ├── icon_512x512.png
│ └── icon_512x512@2x.png
├── chatd.png
├── dark-logo.png
└── prism
│ ├── prism.css
│ └── prism.js
├── screenshots
├── chat_screen.png
├── home_screen.png
├── logo.png
└── welcome_screen.png
└── src
├── api.js
├── client.js
├── index.css
├── index.html
├── index.js
├── preload.js
└── service
├── document
├── parse
│ ├── clean.js
│ ├── docx.js
│ ├── html.js
│ ├── index.js
│ ├── md.js
│ ├── odt.js
│ ├── pdf.js
│ └── txt.js
└── reader.js
├── embedding.js
├── logger.js
├── ollama
├── ollama.js
└── runners
│ └── README.md
├── vector.js
└── worker.js
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | lerna-debug.log*
8 |
9 | # Diagnostic reports (https://nodejs.org/api/report.html)
10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
11 |
12 | # Runtime data
13 | pids
14 | *.pid
15 | *.seed
16 | *.pid.lock
17 | .DS_Store
18 |
19 | # Directory for instrumented libs generated by jscoverage/JSCover
20 | lib-cov
21 |
22 | # Coverage directory used by tools like istanbul
23 | coverage
24 | *.lcov
25 |
26 | # nyc test coverage
27 | .nyc_output
28 |
29 | # node-waf configuration
30 | .lock-wscript
31 |
32 | # Compiled binary addons (https://nodejs.org/api/addons.html)
33 | build/Release
34 |
35 | # Dependency directories
36 | node_modules/
37 | jspm_packages/
38 |
39 | # TypeScript v1 declaration files
40 | typings/
41 |
42 | # TypeScript cache
43 | *.tsbuildinfo
44 |
45 | # Optional npm cache directory
46 | .npm
47 |
48 | # Optional eslint cache
49 | .eslintcache
50 |
51 | # Optional REPL history
52 | .node_repl_history
53 |
54 | # Output of 'npm pack'
55 | *.tgz
56 |
57 | # Yarn Integrity file
58 | .yarn-integrity
59 |
60 | # dotenv environment variables file
61 | .env
62 | .env.test
63 |
64 | # parcel-bundler cache (https://parceljs.org/)
65 | .cache
66 |
67 | # next.js build output
68 | .next
69 |
70 | # nuxt.js build output
71 | .nuxt
72 |
73 | # vuepress build output
74 | .vuepress/dist
75 |
76 | # Serverless directories
77 | .serverless/
78 |
79 | # FuseBox cache
80 | .fusebox/
81 |
82 | # DynamoDB Local files
83 | .dynamodb/
84 |
85 | # Webpack
86 | .webpack/
87 |
88 | # Vite
89 | .vite/
90 |
91 | # Electron-Forge
92 | out/
93 |
94 | # this is where large ollama executables are stored
95 | src/service/ollama/runners
96 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) chatd
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | [See a video demo.](https://github.com/BruceMacD/chatd/assets/5853428/a7d8b77c-beae-41a4-bfd6-0fd5cf932b43)
6 |
7 | Chat with your documents using local AI. All your data stays on your computer and is never sent to the cloud. Chatd is a completely private and secure way to interact with your documents.
8 |
9 | Chatd is a desktop application that lets you use a local large language model (`Mistral-7B`) to chat with your documents. What makes chatd different from other "chat with local documents" apps is that it comes with the local LLM runner packaged in. This means that you don't need to install anything else to use chatd, just run the executable.
10 |
11 | Chatd uses Ollama to run the LLM. Ollama is an LLM server that provides a cross-platform LLM runner API. If you already have an Ollama instance running locally, chatd will automatically use it. Otherwise, chatd will start an Ollama server for you and manage its lifecycle.
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | ## Quickstart
20 |
21 | 1. Download the latest release from [chatd.ai](https://chatd.ai) or the [releases page](https://github.com/BruceMacD/chatd/releases).
22 | 2. Unzip the downloaded file.
23 | 3. Run the `chatd` executable.
24 |
25 | ## Advanced Setup
26 |
27 | - [Enable GPU support.](docs/enable_gpu_support.md)
28 | - [Select a custom model.](docs/select_a_custom_model.md)
29 |
30 | ## Links
31 |
32 | - [chatd.ai](https://chatd.ai)
33 | - [ollama.ai](https://ollama.ai)
34 |
35 | ## Development
36 |
37 | Run the following commands in the root directory.
38 |
39 | ```bash
40 | npm install
41 | npm run start
42 | ```
43 |
44 | ## Packaging and Distribution
45 |
46 | ### MacOS
47 |
48 | 1. Download the latest `ollama-darwin` release for MacOS from [here](https://github.com/ollama/ollama/releases).
49 | 2. Make the downloaded binary executable: `chmod +x path/to/ollama-darwin`
50 | 3. Copy the `ollama-darwin` executable to the `chatd/src/service/ollama/runners` directory.
51 | 4. Optional: The Electron app needs be signed to be able to run on MacOS systems other than the one it was compiled on, so you need a developer certificate. To sign the app, set the following environment variables:
52 |
53 | ```bash
54 | APPLE_ID=your_apple_id@example.com
55 | APPLE_IDENTITY="Developer ID Application: Your Name (ABCDEF1234)"
56 | APPLE_ID_PASSWORD=your_apple_id_app_specific_password
57 | APPLE_TEAM_ID=ABCDEF1234
58 | ```
59 |
60 | You can find your Apple ID, Apple Team ID, and Apple ID Application in your Apple Developer account. You can create an app-specific password [here](https://appleid.apple.com/account/manage).
61 |
62 | 5. Run `npm run package` to package the app.
63 |
64 | ### Windows
65 |
66 | 1. Download the latest `ollama-windows-amd64.zip` release from [here](https://github.com/ollama/ollama/releases).
67 | 2. Copy the contents of the zip into `chatd/src/service/ollama/runners/`.
68 | 3. Run `npm run package` to package the app.
69 |
70 | Note: The Windows app is not signed, so you will get a warning when you run it.
71 |
72 | ### Linux
73 |
74 | 1. Download the latest `ollama-linux-amd64` release from [here](https://github.com/ollama/ollama/releases).
75 | 2. Copy the `ollama` executable to `chatd/src/service/ollama/runners/ollama-linux`.
76 | 3. Run `npm run package` to package the app.
77 |
--------------------------------------------------------------------------------
/docs/enable_gpu_support.md:
--------------------------------------------------------------------------------
1 | # How do I use my GPU to make chatd faster?
2 |
3 | If you have a GPU it is possible for chatd to use it to speed up the chat response time. Currently only NVIDIA GPUs are supported.
4 |
5 | ## Requirements
6 |
7 | - NVIDIA GPU with CUDA support
8 | - Linux (Windows support is planned)
9 |
10 | ## Enable GPU support
11 |
12 | 1. Download [Ollama](https://www.ollama.ai/download/linux) for Linux:
13 |
14 | ```bash
15 | curl https://ollama.ai/install.sh | sh
16 | ```
17 |
18 | 2. After installing Ollama, chatd will automatically use your GPU to speed up chat response time. You can verify that chatd is connected to Ollama by checking for the `ollama is running` message on the home screen.
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/images/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/docs/images/architecture.png
--------------------------------------------------------------------------------
/docs/images/chatd_ollama_running.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/docs/images/chatd_ollama_running.png
--------------------------------------------------------------------------------
/docs/images/select_a_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/docs/images/select_a_model.png
--------------------------------------------------------------------------------
/docs/select_a_custom_model.md:
--------------------------------------------------------------------------------
1 | # Select a Custom Model
2 |
3 | By default chatd will manages the AI model for you. This means that chatd will download the model, cache it, and keep it up to date. If you want to use a custom model, you can do so by following the steps below.
4 |
5 | ## Requirements
6 |
7 | - [Ollama](https://ollama.ai/download)
8 |
9 | ## Steps
10 |
11 | 1. Download [Ollama](https://www.ollama.ai/download) for your platform.
12 | 2. Select the model you want to use from the [Ollama library](https://www.ollama.ai/library) page.
13 | 3. Open a terminal and run the following command to download the model:
14 |
15 | ```bash
16 | ollama pull
17 | ```
18 |
19 | 4. While Ollama is running, open chatd and click the settings button on the home screen (top right).
20 |
21 |
22 |
23 | 5. Type the name of the model you want to use in the `Model Name` field and click `Save`.
24 |
25 |
26 |
--------------------------------------------------------------------------------
/forge.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | rebuildConfig: {},
3 | makers: [
4 | {
5 | name: "@electron-forge/maker-squirrel",
6 | config: {},
7 | },
8 | {
9 | name: "@electron-forge/maker-zip",
10 | platforms: ["darwin"],
11 | },
12 | {
13 | name: "@electron-forge/maker-deb",
14 | config: {},
15 | },
16 | {
17 | name: "@electron-forge/maker-rpm",
18 | config: {},
19 | },
20 | ],
21 | packagerConfig: {
22 | icon: "./public/chatd",
23 | osxSign: {
24 | identity: process.env.APPLE_IDENTITY,
25 | },
26 | osxNotarize: {
27 | tool: "notarytool",
28 | appleId: process.env.APPLE_ID,
29 | appleIdPassword: process.env.APPLE_PASSWORD,
30 | teamId: process.env.APPLE_TEAM_ID,
31 | },
32 | },
33 | };
34 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chatd",
3 | "productName": "chatd",
4 | "version": "1.0.0",
5 | "description": "Chat with your documents using local AI",
6 | "main": "src/index.js",
7 | "scripts": {
8 | "start": "electron-forge start",
9 | "package": "electron-forge package",
10 | "make": "electron-forge make",
11 | "publish": "electron-forge publish",
12 | "lint": "echo \"No linting configured\""
13 | },
14 | "keywords": [],
15 | "author": "BruceMacD",
16 | "license": "MIT",
17 | "dependencies": {
18 | "@opendocsg/pdf2md": "^0.1.28",
19 | "@orama/orama": "^2.0.7",
20 | "@xenova/transformers": "^2.6.2",
21 | "electron-squirrel-startup": "^1.0.0",
22 | "langchain": "^0.0.170",
23 | "mammoth": "^1.6.0",
24 | "odt2html": "^1.0.1",
25 | "ollama": "^0.4.6",
26 | "winston": "^3.11.0"
27 | },
28 | "devDependencies": {
29 | "@electron-forge/cli": "^6.1.1",
30 | "@electron-forge/maker-deb": "^6.1.1",
31 | "@electron-forge/maker-rpm": "^6.1.1",
32 | "@electron-forge/maker-squirrel": "^6.1.1",
33 | "@electron-forge/maker-zip": "^6.1.1",
34 | "electron": "^24.1.1"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/public/chatd.icns:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.icns
--------------------------------------------------------------------------------
/public/chatd.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.ico
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_128x128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_128x128.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_128x128@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_128x128@2x.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_16x16.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_16x16@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_16x16@2x.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_256x256.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_256x256.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_256x256@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_256x256@2x.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_32x32.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_32x32@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_32x32@2x.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_512x512.png
--------------------------------------------------------------------------------
/public/chatd.iconset/icon_512x512@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.iconset/icon_512x512@2x.png
--------------------------------------------------------------------------------
/public/chatd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/chatd.png
--------------------------------------------------------------------------------
/public/dark-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/public/dark-logo.png
--------------------------------------------------------------------------------
/public/prism/prism.css:
--------------------------------------------------------------------------------
1 | /* PrismJS 1.29.0
2 | https://prismjs.com/download.html#themes=prism-twilight&languages=markup+css+clike+javascript+abap+abnf+actionscript+ada+agda+al+antlr4+apacheconf+apex+apl+applescript+aql+arduino+arff+armasm+arturo+asciidoc+aspnet+asm6502+asmatmel+autohotkey+autoit+avisynth+avro-idl+awk+bash+basic+batch+bbcode+bbj+bicep+birb+bison+bnf+bqn+brainfuck+brightscript+bro+bsl+c+csharp+cpp+cfscript+chaiscript+cil+cilkc+cilkcpp+clojure+cmake+cobol+coffeescript+concurnas+csp+cooklang+coq+crystal+css-extras+csv+cue+cypher+d+dart+dataweave+dax+dhall+diff+django+dns-zone-file+docker+dot+ebnf+editorconfig+eiffel+ejs+elixir+elm+etlua+erb+erlang+excel-formula+fsharp+factor+false+firestore-security-rules+flow+fortran+ftl+gml+gap+gcode+gdscript+gedcom+gettext+gherkin+git+glsl+gn+linker-script+go+go-module+gradle+graphql+groovy+haml+handlebars+haskell+haxe+hcl+hlsl+hoon+http+hpkp+hsts+ichigojam+icon+icu-message-format+idris+ignore+inform7+ini+io+j+java+javadoc+javadoclike+javastacktrace+jexl+jolie+jq+jsdoc+js-extras+json+json5+jsonp+jsstacktrace+js-templates+julia+keepalived+keyman+kotlin+kumir+kusto+latex+latte+less+lilypond+liquid+lisp+livescript+llvm+log+lolcode+lua+magma+makefile+markdown+markup-templating+mata+matlab+maxscript+mel+mermaid+metafont+mizar+mongodb+monkey+moonscript+n1ql+n4js+nand2tetris-hdl+naniscript+nasm+neon+nevod+nginx+nim+nix+nsis+objectivec+ocaml+odin+opencl+openqasm+oz+parigp+parser+pascal+pascaligo+psl+pcaxis+peoplecode+perl+php+phpdoc+php-extras+plant-uml+plsql+powerquery+powershell+processing+prolog+promql+properties+protobuf+pug+puppet+pure+purebasic+purescript+python+qsharp+q+qml+qore+r+racket+cshtml+jsx+tsx+reason+regex+rego+renpy+rescript+rest+rip+roboconf+robotframework+ruby+rust+sas+sass+scss+scala+scheme+shell-session+smali+smalltalk+smarty+sml+solidity+solution-file+soy+sparql+splunk-spl+sqf+sql+squirrel+stan+stata+iecst+stylus+supercollider+swift+systemd+t4-templating+t4-cs+t4-vb+tap+tcl+tt2+textile+toml+tremor+turtle+twig+typescript+typoscript+unrealscript+uorazor+uri+v+vala+vbnet+velocity+verilog+vhdl+vim+visual-basic+warpscript+wasm+web-idl+wgsl+wiki+wolfram+wren+xeora+xml-doc+xojo+xquery+yaml+yang+zig&plugins=toolbar+copy-to-clipboard */
3 | code[class*=language-],pre[class*=language-]{color:#fff;background:0 0;font-family:Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace;font-size:1em;text-align:left;text-shadow:0 -.1em .2em #000;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;line-height:1.5;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none}:not(pre)>code[class*=language-],pre[class*=language-]{background:#141414}pre[class*=language-]{border-radius:.5em;border:.3em solid #545454;box-shadow:1px 1px .5em #000 inset;margin:.5em 0;overflow:auto;padding:1em}pre[class*=language-]::-moz-selection{background:#27292a}pre[class*=language-]::selection{background:#27292a}code[class*=language-] ::-moz-selection,code[class*=language-]::-moz-selection,pre[class*=language-] ::-moz-selection,pre[class*=language-]::-moz-selection{text-shadow:none;background:hsla(0,0%,93%,.15)}code[class*=language-] ::selection,code[class*=language-]::selection,pre[class*=language-] ::selection,pre[class*=language-]::selection{text-shadow:none;background:hsla(0,0%,93%,.15)}:not(pre)>code[class*=language-]{border-radius:.3em;border:.13em solid #545454;box-shadow:1px 1px .3em -.1em #000 inset;padding:.15em .2em .05em;white-space:normal}.token.cdata,.token.comment,.token.doctype,.token.prolog{color:#777}.token.punctuation{opacity:.7}.token.namespace{opacity:.7}.token.boolean,.token.deleted,.token.number,.token.tag{color:#ce6849}.token.builtin,.token.constant,.token.keyword,.token.property,.token.selector,.token.symbol{color:#f9ed99}.language-css .token.string,.style .token.string,.token.attr-name,.token.attr-value,.token.char,.token.entity,.token.inserted,.token.operator,.token.string,.token.url,.token.variable{color:#909e6a}.token.atrule{color:#7385a5}.token.important,.token.regex{color:#e8c062}.token.bold,.token.important{font-weight:700}.token.italic{font-style:italic}.token.entity{cursor:help}.language-markup .token.attr-name,.language-markup .token.punctuation,.language-markup .token.tag{color:#ac885c}.token{position:relative;z-index:1}.line-highlight.line-highlight{background:hsla(0,0%,33%,.25);background:linear-gradient(to right,hsla(0,0%,33%,.1) 70%,hsla(0,0%,33%,0));border-bottom:1px dashed #545454;border-top:1px dashed #545454;margin-top:.75em;z-index:0}.line-highlight.line-highlight:before,.line-highlight.line-highlight[data-end]:after{background-color:#8693a6;color:#f4f1ef}
4 | div.code-toolbar{position:relative}div.code-toolbar>.toolbar{position:absolute;z-index:10;top:.3em;right:.2em;transition:opacity .3s ease-in-out;opacity:0}div.code-toolbar:hover>.toolbar{opacity:1}div.code-toolbar:focus-within>.toolbar{opacity:1}div.code-toolbar>.toolbar>.toolbar-item{display:inline-block}div.code-toolbar>.toolbar>.toolbar-item>a{cursor:pointer}div.code-toolbar>.toolbar>.toolbar-item>button{background:0 0;border:0;color:inherit;font:inherit;line-height:normal;overflow:visible;padding:0;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none}div.code-toolbar>.toolbar>.toolbar-item>a,div.code-toolbar>.toolbar>.toolbar-item>button,div.code-toolbar>.toolbar>.toolbar-item>span{color:#bbb;font-size:.8em;padding:0 .5em;background:#f5f2f0;background:rgba(224,224,224,.2);box-shadow:0 2px 0 0 rgba(0,0,0,.2);border-radius:.5em}div.code-toolbar>.toolbar>.toolbar-item>a:focus,div.code-toolbar>.toolbar>.toolbar-item>a:hover,div.code-toolbar>.toolbar>.toolbar-item>button:focus,div.code-toolbar>.toolbar>.toolbar-item>button:hover,div.code-toolbar>.toolbar>.toolbar-item>span:focus,div.code-toolbar>.toolbar>.toolbar-item>span:hover{color:inherit;text-decoration:none}
5 |
--------------------------------------------------------------------------------
/screenshots/chat_screen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/screenshots/chat_screen.png
--------------------------------------------------------------------------------
/screenshots/home_screen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/screenshots/home_screen.png
--------------------------------------------------------------------------------
/screenshots/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/screenshots/logo.png
--------------------------------------------------------------------------------
/screenshots/welcome_screen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BruceMacD/chatd/a2160988a7f08ca682cbe4b07345868f7c1bc2d3/screenshots/welcome_screen.png
--------------------------------------------------------------------------------
/src/api.js:
--------------------------------------------------------------------------------
1 | const { Worker } = require('worker_threads');
2 | const { dialog } = require("electron");
3 | const path = require("path");
4 | const { embed } = require("./service/embedding.js");
5 | const {
6 | store,
7 | search,
8 | clearVectorStore,
9 | vectorStoreSize,
10 | } = require("./service/vector.js");
11 | const {
12 | abort,
13 | run,
14 | chat,
15 | stop,
16 | serve,
17 | } = require("./service/ollama/ollama.js");
18 |
19 | let model = "mistral";
20 | let loadingDoc = false;
21 |
22 | function debugLog(msg) {
23 | if (global.debug) {
24 | console.log(msg);
25 | }
26 | }
27 |
28 | async function setModel(event, msg) {
29 | model = msg;
30 | }
31 |
32 | async function getModel(event) {
33 | event.reply("model:get", { success: true, content: model });
34 | }
35 |
36 | async function runOllamaModel(event, msg) {
37 | try {
38 | // send an empty message to the model to load it into memory
39 | await run(model, (json) => {
40 | // status will be set if the model is downloading
41 | if (json.status) {
42 | if (json.status.includes("pulling")) {
43 | const percent = Math.round((json.completed / json.total) * 100);
44 | const content = isNaN(percent)
45 | ? "Downloading AI model..."
46 | : `Downloading AI model... ${percent}%`;
47 | event.reply("ollama:run", { success: true, content: content });
48 | return;
49 | }
50 | if (json.status.includes("verifying")) {
51 | const content = `Verifying AI model...`;
52 | event.reply("ollama:run", { success: true, content: content });
53 | return;
54 | }
55 | }
56 | if (json.done) {
57 | event.reply("ollama:run", { success: true, content: json });
58 | return;
59 | }
60 | event.reply("ollama:run", { success: true, content: "Initializing..." });
61 | });
62 | } catch (err) {
63 | console.log(err);
64 | event.reply("ollama:run", { success: false, content: err.message });
65 | }
66 | }
67 |
68 | async function sendChat(event, msg) {
69 | let prompt = msg;
70 | const size = await vectorStoreSize();
71 | if (size > 0) {
72 | const msgEmbeds = await embed({
73 | data: [
74 | {
75 | section: "",
76 | content: [msg],
77 | },
78 | ],
79 | });
80 | const searchResults = await search(msgEmbeds[0].embedding, 20);
81 | // format the system context search results
82 | let documentString = searchResults.join("\n\n");
83 | // Ensure the contextString does not exceed 500 characters
84 | if (documentString.length > 500) {
85 | documentString = documentString.substring(0, 497) + "...";
86 | }
87 | prompt = `Using the provided document, answer the user question to the best of your ability. You must try to use information from the provided document. Combine information in the document into a coherent answer.
88 | If there is nothing in the document relevant to the user question, say "Hmm, I don't see anything about that in this document." before providing any other information you know.
89 | Anything between the following \`document\` html blocks is retrieved from a knowledge bank, not part of the conversation with the user.
90 |
91 | ${documentString}
92 |
93 |
94 | If there is no relevant information within the document, say "Hmm, I don't see anything about that in this document." before providing any other information you know. Anything between the preceding 'document' html blocks is retrieved from a knowledge bank, not part of the conversation with the user.
95 |
96 | Anything between the following \`user\` html blocks is is part of the conversation with the user.
97 |
98 | ${msg}
99 |
100 | `;
101 | }
102 | if (loadingDoc) {
103 | prompt += "Start your response by saying some variation on 'The document is still process, but I will answer to the best of my abilities.'.";
104 | }
105 | try {
106 | debugLog("Sending prompt to Ollama...");
107 | debugLog(prompt);
108 | await chat(model, prompt, (json) => {
109 | // Reply with the content every time we receive data
110 | event.reply("chat:reply", { success: true, content: json });
111 | });
112 | } catch (err) {
113 | console.log(err);
114 | event.reply("chat:reply", { success: false, content: err.message });
115 | }
116 | }
117 |
118 | async function stopChat() {
119 | await abort();
120 | }
121 |
122 | async function loadDocument(event) {
123 | loadingDoc = true;
124 | try {
125 | clearVectorStore();
126 | const filePath = await selectDocumentFile();
127 | debugLog(`Loading file: ${filePath}`);
128 | processDocument(filePath, event);
129 | } catch (err) {
130 | handleDocumentLoadError(err, event);
131 | }
132 | }
133 |
134 | async function selectDocumentFile() {
135 | const options = {
136 | properties: ["openFile"],
137 | filters: [{ name: "Text Files", extensions: ["docx", "md", "odt", "pdf", "txt", "html", "htm"] }],
138 | };
139 |
140 | const result = await dialog.showOpenDialog(options);
141 | if (result.canceled || result.filePaths.length === 0) {
142 | throw new Error("No file selected");
143 | }
144 |
145 | return result.filePaths[0];
146 | }
147 |
148 | function processDocument(filePath, event) {
149 | const worker = new Worker('./src/service/worker.js');
150 | worker.postMessage(filePath);
151 |
152 | worker.on('message', async (e) => {
153 | if (e.success) {
154 | debugLog("Storing embeddings...");
155 | await store(e.embeddings);
156 | debugLog("Embeddings stored");
157 | event.reply("doc:load", { success: true, content: path.basename(filePath) });
158 | loadingDoc = false;
159 | } else {
160 | event.reply("doc:load", { success: false, content: e.content });
161 | loadingDoc = false;
162 | }
163 | });
164 |
165 | worker.on('error', err => handleDocumentLoadError(err, event));
166 | }
167 |
168 | function handleDocumentLoadError(err, event) {
169 | loadingDoc = false;
170 | console.log('Error:', err);
171 | event.reply("doc:load", { success: false, content: err.message });
172 | }
173 |
174 | async function serveOllama(event) {
175 | try {
176 | const serveType = await serve();
177 | event.reply("ollama:serve", { success: true, content: serveType });
178 | } catch (err) {
179 | event.reply("ollama:serve", { success: false, content: err.message });
180 | }
181 | }
182 |
183 | function stopOllama(event) {
184 | stop();
185 | }
186 |
187 | module.exports = {
188 | setModel,
189 | getModel,
190 | stopChat,
191 | sendChat,
192 | loadDocument,
193 | serveOllama,
194 | runOllamaModel,
195 | stopOllama,
196 | };
197 |
--------------------------------------------------------------------------------
/src/client.js:
--------------------------------------------------------------------------------
1 | // This script handles interaction with the user interface, as well as communication
2 | // between the renderer thread (UI) and the worker thread (processing).
3 |
4 | const userInput = document.getElementById("user-input-text");
5 | const historyContainer = document.getElementById("history");
6 | const openFileButton = document.getElementById("file-open");
7 | const fileButtonText = document.getElementById("file-button-text");
8 | const initalSpinner = document.getElementById("spinner");
9 | const statusMsg = document.getElementById("status-msg");
10 | const settingsIcon = document.getElementById("settings-icon");
11 | const statusContainer = document.getElementById("status-container");
12 | const stopRequestContainer = document.getElementById("stop-request-container");
13 | const stopRequestBtn = document.getElementById("stop-request-btn");
14 | const chatView = document.getElementById("chat-view");
15 | const settingsView = document.getElementById("settings-view");
16 | const settingsCancelBtn = document.getElementById("cancel-btn");
17 | const settingsSaveBtn = document.getElementById("save-btn");
18 | const modelSelectInput = document.getElementById("model-select");
19 |
20 | let responseElem;
21 |
22 | /**
23 | * This is the initial chain of events that must run on start-up.
24 | * 1. Start the Ollama server.
25 | * 2. Run the model. This will load the model into memory so that first chat is not slow.
26 | * This step will also download the model if it is not already downloaded.
27 | * 3. Monitor the run status
28 | * 4. Load the chat
29 | */
30 |
31 | // 1. Start the Ollama server
32 | window.electronAPI.serveOllama();
33 | // 2. Run the model
34 | window.electronAPI.onOllamaServe((event, data) => {
35 | if (!data.success) {
36 | initalSpinner.style.display = "none";
37 | statusMsg.textContent =
38 | "Error: " + (data.content || "Unknown error occurred.");
39 | return;
40 | }
41 | if (data.content === "system") {
42 | // Ollama was already running, and we just connected to it, let the user know
43 | document.getElementById("status-container").style.display = "flex";
44 | settingsIcon.style.display = "inline-block";
45 | }
46 | window.electronAPI.runOllama();
47 | });
48 | // 3. Monitor the run status
49 | window.electronAPI.onOllamaRun((event, data) => {
50 | if (!data.success) {
51 | initalSpinner.style.display = "none";
52 | statusMsg.textContent = "Error: " + data.content;
53 | return;
54 | }
55 | if (data.content.done) {
56 | // 4. Load the chat
57 | document.getElementById("initial-view").style.display = "none";
58 | chatView.style.display = "block";
59 | userInput.focus();
60 | return;
61 | }
62 | statusMsg.textContent = data.content;
63 | });
64 |
65 | // Update the display when a document is loaded
66 | window.electronAPI.onDocumentLoaded((event, data) => {
67 | document.getElementById("file-spinner").style.display = "none";
68 | fileButtonText.innerText = data.content; // change the button to say the name of the document
69 | userInput.focus();
70 | });
71 |
72 | // Send chat on enter key
73 | userInput.addEventListener("keydown", function (event) {
74 | if (event.key === "Enter" && !event.shiftKey) {
75 | event.preventDefault();
76 | statusContainer.style.display = "none"; // once the first chat is sent, hide the initial status message
77 | settingsIcon.style.display = "none"; // once the first chat is sent, hide the settings icon
78 | stopRequestContainer.style.display = "flex";
79 | // Disable input while processing
80 | userInput.disabled = true;
81 | userInput.placeholder = "";
82 |
83 | const message = userInput.value;
84 | userInput.value = "";
85 | userInput.style.height = ""; // reset the height of the input box
86 |
87 | // Create a new text block
88 | const historyMessage = document.createElement("div");
89 | historyMessage.className = "history-user-message";
90 | historyMessage.innerText = message;
91 | historyContainer.appendChild(historyMessage);
92 |
93 | // Add the element that will display the response
94 | responseElem = document.createElement("div");
95 | responseElem.className = "history-chat-response";
96 | historyContainer.appendChild(responseElem);
97 |
98 | // Add loading animation
99 | const loadingAnimation = document.createElement("div");
100 | loadingAnimation.className = "dots-loading";
101 | for (let i = 0; i < 3; i++) {
102 | const dot = document.createElement("div");
103 | loadingAnimation.appendChild(dot);
104 | }
105 | responseElem.appendChild(loadingAnimation);
106 |
107 | // Send chat to Ollama server
108 | window.electronAPI.sendChat(message);
109 | chatView.scrollTop = chatView.scrollHeight;
110 | // The response will be received in the onChatReply event
111 | }
112 | });
113 |
114 | // Receive chat response from Ollama server
115 | window.electronAPI.onChatReply((event, data) => {
116 | // clear loading animation
117 | const loadingDots = responseElem.querySelector(".dots-loading");
118 | if (loadingDots) {
119 | loadingDots.remove();
120 | }
121 |
122 | if (!data.success) {
123 | if (data.content !== "The operation was aborted.") {
124 | // Don't display an error if the user stopped the request
125 | responseElem.innerText = "Error: " + data.content;
126 | }
127 | stopRequestContainer.style.display = "none";
128 | userInput.disabled = false;
129 | userInput.focus();
130 | return;
131 | }
132 |
133 | if (data.content.message.content) {
134 | displayResponse(data.content.message.content);
135 | }
136 |
137 | if (data.content.done) {
138 | // The chat is done, remove the stop request button and re-enable input
139 | stopRequestContainer.style.display = "none";
140 | userInput.disabled = false;
141 | userInput.focus();
142 | }
143 |
144 | // Check if the view is already at the bottom of the content
145 | const isAtBottom =
146 | chatView.scrollTop + chatView.clientHeight >= chatView.scrollHeight - 50; // 10 is a tolerance value
147 |
148 | // If they're at the bottom, scroll to the new bottom
149 | if (isAtBottom) {
150 | chatView.scrollTop = chatView.scrollHeight;
151 | }
152 | });
153 |
154 | let responseBuffer = '';
155 | let isBufferingMarkdown = false;
156 |
157 | // Update the display when a response is received from the Ollama server
158 | function displayResponse(response) {
159 | responseBuffer += response;
160 |
161 | if (isBufferingMarkdown || responseBuffer.includes('```')) {
162 | processMarkdownResponse();
163 | } else if (!responseBuffer.endsWith('`') || response.done) {
164 | // display regular text
165 | displayRegularText(responseBuffer);
166 | responseBuffer = '';
167 | }
168 | }
169 |
170 | function displayRegularText(text) {
171 | const textNode = document.createTextNode(text);
172 | responseElem.appendChild(textNode);
173 | }
174 |
175 | function processMarkdownResponse() {
176 | if (!isBufferingMarkdown) {
177 | // Write out any text before the Markdown block
178 | const splitIndex = responseBuffer.indexOf('```');
179 | const textBeforeMarkdown = responseBuffer.substring(0, splitIndex);
180 | displayRegularText(textBeforeMarkdown);
181 | // Set the buffer to the content after the initial ```
182 | responseBuffer = responseBuffer.substring(splitIndex);
183 |
184 | // Handle the start of a Markdown block
185 | const markdownElem = document.createElement('pre');
186 | const codeElem = document.createElement('code');
187 | codeElem.className = 'language-markdown';
188 | markdownElem.appendChild(codeElem);
189 | responseElem.appendChild(markdownElem);
190 |
191 | isBufferingMarkdown = true;
192 | }
193 |
194 | // Update Markdown content and apply highlighting
195 | if (isBufferingMarkdown) {
196 | let contentAfterMarkdown = ''; // this will store any content after the closing ``` if there is any
197 | // Check if there is more than one occurrence of '```', which indicates the end of the Markdown block
198 | if (responseBuffer.match(/```/g)?.length > 1) {
199 | // Clear the buffer for the next content after the closing ```
200 | const endIndex = responseBuffer.lastIndexOf('```') + 3;
201 | contentAfterMarkdown = responseBuffer.substring(endIndex);
202 | responseBuffer = responseBuffer.substring(0, endIndex); // cut off the content after the closing ```
203 | isBufferingMarkdown = false;
204 | }
205 |
206 | // Update the Markdown content
207 | const markdownElems = document.querySelectorAll('pre > .language-markdown');
208 | const lastMarkdownElem = markdownElems[markdownElems.length - 1];
209 | lastMarkdownElem.textContent = responseBuffer;
210 | Prism.highlightElement(lastMarkdownElem);
211 |
212 | // if the Markdown block is done, append any content after the closing ```
213 | if (!isBufferingMarkdown) {
214 | displayRegularText(contentAfterMarkdown);
215 | responseBuffer = '';
216 | }
217 | }
218 | }
219 |
220 | // Open file dialog
221 | openFileButton.addEventListener("click", () => {
222 | document.getElementById("file-open-icon").style.display = "none";
223 | document.getElementById("file-spinner").style.display = "inline-block";
224 | fileButtonText.innerText = "Loading...";
225 | window.electronAPI.loadDocument();
226 | });
227 |
228 | // Stop request button that appears when a request is in progress
229 | stopRequestBtn.addEventListener("click", () => {
230 | window.electronAPI.stopChat();
231 | stopRequestContainer.style.display = "none";
232 | userInput.disabled = false;
233 | userInput.focus();
234 | });
235 |
236 | settingsIcon.addEventListener("click", () => {
237 | // Send a request to get the current model, settings view will be displayed when the response is received
238 | modelSelectInput.value = window.electronAPI.getModel();
239 | });
240 |
241 | // A modelGet response means the settings view should be displayed, it is checking what the current loaded model is
242 | window.electronAPI.onModelGet((event, data) => {
243 | if (!data.success) {
244 | console.log("Error getting model: " + data.content);
245 | }
246 | modelSelectInput.value = data.content;
247 | chatView.style.display = "none";
248 | settingsView.style.display = "flex";
249 | });
250 |
251 | // Cancel button in the settings view
252 | settingsCancelBtn.addEventListener("click", () => {
253 | chatView.style.display = "block";
254 | settingsView.style.display = "none";
255 | });
256 |
257 | // Save button in the settings view
258 | settingsSaveBtn.addEventListener("click", () => {
259 | window.electronAPI.setModel(modelSelectInput.value);
260 | chatView.style.display = "block";
261 | settingsView.style.display = "none";
262 | });
263 |
264 | // Auto-resize the input box to fit the text
265 | userInput.addEventListener("input", function () {
266 | this.style.height = "auto";
267 | this.style.height = this.scrollHeight + "px";
268 | chatView.scrollTop = chatView.scrollHeight; // scroll to bottom of the screen
269 | });
270 |
--------------------------------------------------------------------------------
/src/index.css:
--------------------------------------------------------------------------------
1 | * {
2 | padding: 0;
3 | margin: 0;
4 | box-sizing: border-box;
5 | font-family: 'Roboto', sans-serif;
6 | }
7 |
8 | html,
9 | body {
10 | height: 100%;
11 | background-color: #181818;
12 | color: #C8C8C8;
13 | }
14 |
15 | body {
16 | display: flex;
17 | justify-content: center;
18 | align-items: center;
19 | }
20 |
21 | textarea {
22 | width: 100%;
23 | padding: 20px;
24 | background-color: #181818;
25 | margin-bottom: 16px;
26 | font-size: 1rem;
27 | color: #C8C8C8;
28 | border: none;
29 | resize: none; /* This will prevent users from resizing the textarea manually */
30 | overflow-y: hidden; /* In case the text exceeds the height */
31 | height: auto; /* This will ensure the height changes according to the content */
32 | }
33 |
34 | textarea:focus {
35 | border: none;
36 | outline: none;
37 | }
38 |
39 | button {
40 | background-color: transparent;
41 | border: 1px solid #C8C8C8;
42 | color: #C8C8C8;
43 | padding: 8px 16px;
44 | font-size: 1rem;
45 | border-radius: 4px;
46 | cursor: pointer;
47 | transition: background-color 0.3s ease;
48 | }
49 |
50 | button:hover {
51 | background-color: #2a2a2a;
52 | }
53 |
54 | button:active {
55 | background-color: #1e1e1e;
56 | }
57 |
58 | button:focus {
59 | outline: none;
60 | box-shadow: 0 0 0 2px #656565;
61 | }
62 |
63 | #status-msg {
64 | padding-top: 5%;
65 | }
66 |
67 | .chat {
68 | width: 100%;
69 | padding: 20px;
70 | padding-top: 40px;
71 | }
72 |
73 | .history {
74 | width: 100%;
75 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
76 | }
77 |
78 | .history-user-message {
79 | width: 100%;
80 | padding: 20px;
81 | background-color: #181818;
82 | }
83 |
84 | .history-chat-response {
85 | width: 100%;
86 | border-radius: 4px;
87 | background-color: #202020;
88 | padding: 20px;
89 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
90 | white-space: pre-wrap;
91 | }
92 |
93 | .user-message {
94 | width: 100%;
95 | background-color: #202020;
96 | padding: 20px;
97 | }
98 |
99 | .user-input {
100 | width: 100%;
101 | }
102 |
103 | #user-input-text:focus {
104 | border: none;
105 | outline: none;
106 | }
107 |
108 | #chat-view {
109 | display: none;
110 | position: relative;
111 | width: 100%;
112 | height: 100%;
113 | overflow-y: scroll;
114 | }
115 |
116 | #chat-view::before {
117 | content: "";
118 | position: fixed;
119 | top: 0;
120 | left: 0;
121 | right: 0;
122 | height: 5%;
123 | background: linear-gradient(to bottom, rgba(24, 24, 24, 1), rgba(24, 24, 24, 0));
124 | z-index: 999;
125 | pointer-events: none;
126 | overflow-y: auto;
127 | }
128 |
129 | /* overide prism markdown styles */
130 | pre[class*=language-] {
131 | border: none !important;
132 | box-shadow: none !important;
133 | }
134 |
135 | #settings-view {
136 | display: none;
137 | width: 100%;
138 | height: 100%;
139 | justify-content: center;
140 | align-items: center;
141 | }
142 |
143 | .settings-content {
144 | width: 60%;
145 | display: flex;
146 | flex-direction: column;
147 | gap: 16px;
148 | }
149 |
150 | #model-label {
151 | color: #C8C8C8;
152 | font-size: 0.9rem;
153 | }
154 |
155 | #model-select {
156 | padding: 10px;
157 | background-color: #202020;
158 | border: 1px solid #C8C8C8;
159 | color: #C8C8C8;
160 | border-radius: 4px;
161 | font-size: 1rem;
162 | width: 100%;
163 | }
164 |
165 | #model-select:focus {
166 | outline: none;
167 | }
168 |
169 | .button-group {
170 | display: flex;
171 | justify-content: flex-end;
172 | }
173 |
174 | #cancel-btn {
175 | background-color: #C8C8C8;
176 | color: #202020;
177 | margin-right: 4px;
178 | }
179 |
180 | #cancel-btn:hover {
181 | background-color: #B0B0B0;
182 | color: #1a1a1a;
183 | }
184 |
185 | #initial-view {
186 | display: flex;
187 | flex-direction: column;
188 | align-items: center;
189 | justify-content: center;
190 | height: 100vh;
191 | text-align: center;
192 | }
193 |
194 | #app-logo {
195 | max-width: 50%;
196 | height: auto;
197 | margin-bottom: 20px;
198 | }
199 |
200 | #file-open {
201 | background-color: rgba(32, 32, 32, 0.85);
202 | border: 0.5px solid rgba(200, 200, 200, 0.8);
203 | cursor: pointer;
204 | padding: 2.5px 5px;
205 | display: flex;
206 | align-items: center;
207 | gap: 8px;
208 | position: fixed;
209 | top: 10px;
210 | left: 50%;
211 | transform: translateX(-50%);
212 | z-index: 1000; /* To make sure the button is always on top of the chat view */
213 | }
214 |
215 | #file-open svg {
216 | width: 18px;
217 | height: 18px;
218 | display: block;
219 | }
220 |
221 | #file-open svg {
222 | display: block;
223 | }
224 |
225 | #file-open:active svg rect {
226 | fill: #1e1e1e;
227 | }
228 |
229 | #file-open:focus {
230 | outline: none;
231 | box-shadow: 0 0 0 2px #656565;
232 | }
233 |
234 | #file-open:hover {
235 | background-color: #C8C8C8;
236 | color: #181818;
237 | }
238 |
239 | #file-open:hover .folder-shape {
240 | fill: #181818;
241 | }
242 |
243 | #file-open:hover .plus-sign {
244 | stroke: #C8C8C8;
245 | }
246 |
247 | #file-button-text {
248 | font-size: 0.8rem;
249 | }
250 |
251 | #file-open-err-msg {
252 | padding-top: 10px;
253 | }
254 |
255 | #settings-icon {
256 | display: none;
257 | position: fixed;
258 | top: 10px;
259 | right: 10px;
260 | cursor: pointer;
261 | z-index: 1001;
262 | width: 16px;
263 | height: 16px;
264 | }
265 |
266 | #stop-request-container {
267 | display: none;
268 | align-items: center;
269 | gap: 5px;
270 | position: fixed;
271 | bottom: 10px;
272 | left: 50%;
273 | transform: translateX(-50%);
274 | font-size: 0.75rem;
275 | }
276 |
277 | #stop-request-btn {
278 | font-size: 0.75rem;
279 | background-color: rgba(32, 32, 32, 0.85);
280 | }
281 |
282 | #status-container {
283 | display: none;
284 | align-items: center;
285 | gap: 5px;
286 | position: fixed;
287 | bottom: 10px;
288 | left: 50%;
289 | transform: translateX(-50%);
290 | font-size: 0.75rem;
291 | }
292 |
293 | /* Loading Animation Styles */
294 | .dots-loading {
295 | display: flex;
296 | justify-content: center;
297 | align-items: center;
298 | }
299 |
300 | .dots-loading>div {
301 | width: 4px;
302 | height: 4px;
303 | background-color: #C8C8C8;
304 | border-radius: 50%;
305 | margin: 0 2px;
306 | animation: bounce 0.6s infinite alternate;
307 | }
308 |
309 | .dots-loading>div:nth-child(2) {
310 | animation-delay: 0.2s;
311 | }
312 |
313 | .dots-loading>div:nth-child(3) {
314 | animation-delay: 0.4s;
315 | }
316 |
317 | @keyframes bounce {
318 | to {
319 | transform: translateY(-6px);
320 | }
321 | }
322 |
323 | #spinner {
324 | border: 2px solid #F8F8F8;
325 | border-radius: 50%;
326 | border-top: 4px solid #181818;
327 | width: 20px;
328 | height: 20px;
329 | animation: spin 1s linear infinite;
330 | }
331 |
332 | #file-spinner {
333 | border: 2px solid #181818;
334 | border-radius: 50%;
335 | border-top: 2px solid #656565;
336 | width: 10px;
337 | height: 10px;
338 | animation: spin 1s linear infinite;
339 | display: none;
340 | }
341 |
342 | @keyframes spin {
343 | 0% {
344 | transform: rotate(0deg);
345 | }
346 |
347 | 100% {
348 | transform: rotate(360deg);
349 | }
350 | }
--------------------------------------------------------------------------------
/src/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | chatd
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
42 |
43 |
44 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
66 | ollama is running
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/src/index.js:
--------------------------------------------------------------------------------
1 | const {
2 | app,
3 | dialog,
4 | BrowserWindow,
5 | session,
6 | ipcMain,
7 | autoUpdater,
8 | } = require("electron");
9 | const os = require("os");
10 | const path = require("path");
11 | const winston = require("winston");
12 | const {
13 | getModel,
14 | setModel,
15 | sendChat,
16 | stopChat,
17 | serveOllama,
18 | stopOllama,
19 | loadDocument,
20 | runOllamaModel,
21 | } = require("./api.js");
22 |
23 | // When debug is set to true, the app will log debug messages to the console
24 | // This will be turned on by default when running the app in non-packaged mode
25 | global.debug = false;
26 |
27 | const appVersion = app.getVersion();
28 | const osType = os.type(); // e.g., 'Darwin', 'Windows_NT', etc.
29 | const osArch = os.arch(); // e.g., 'x64', 'ia32', etc.
30 | const updateURL = `https://chatd.ai/api/update?version=${appVersion}&os=${osType}&arch=${osArch}`;
31 | const logger = winston.createLogger({
32 | format: winston.format.simple(),
33 | transports: [
34 | new winston.transports.Console(),
35 | new winston.transports.File({
36 | filename: path.join(app.getPath("home"), ".chatd", "app.log"),
37 | maxSize: 1000000, // 1 MB
38 | maxFiles: 1,
39 | }),
40 | ],
41 | });
42 |
43 | // Handle creating/removing shortcuts on Windows when installing/uninstalling.
44 | if (require("electron-squirrel-startup")) {
45 | app.quit();
46 | }
47 |
48 | const createWindow = () => {
49 | const mainWindow = new BrowserWindow({
50 | width: 800,
51 | height: 600,
52 | webPreferences: {
53 | preload: path.join(__dirname, "preload.js"),
54 | },
55 | autoHideMenuBar: true,
56 | });
57 |
58 | mainWindow.loadFile(path.join(__dirname, "index.html"));
59 |
60 | if (!app.isPackaged) {
61 | global.debug = true;
62 | mainWindow.webContents.openDevTools();
63 | }
64 | };
65 |
66 | app.on("ready", () => {
67 | // Add a handler for the interprocess events. This enables 2-way communication
68 | // between the renderer process (UI) and the main process.
69 | ipcMain.on("model:set", setModel);
70 | ipcMain.on("model:get", getModel);
71 | ipcMain.on("chat:send", sendChat);
72 | ipcMain.on("chat:stop", stopChat);
73 | ipcMain.on("doc:load", loadDocument);
74 | ipcMain.on("ollama:serve", serveOllama);
75 | ipcMain.on("ollama:run", runOllamaModel);
76 | ipcMain.on("ollama:stop", stopOllama);
77 |
78 | if (app.isPackaged) {
79 | // Check app location
80 | if (process.platform === "darwin" && !app.isInApplicationsFolder()) {
81 | const chosen = dialog.showMessageBoxSync({
82 | type: "question",
83 | buttons: ["Move to Applications", "Do Not Move"],
84 | message:
85 | "Would you like to move this chatd to the Applications folder?",
86 | defaultId: 0,
87 | cancelId: 1,
88 | });
89 |
90 | if (chosen === 0) {
91 | try {
92 | app.moveToApplicationsFolder();
93 | } catch (err) {
94 | dialog.showErrorBox(
95 | "Unable to move to Applications folder",
96 | err.message
97 | );
98 | }
99 | }
100 | }
101 |
102 | // TODO: auto-update on Windows, there is a squirrel error that needs to be fixed
103 | if (!process.platform === "win32") {
104 | autoUpdater.setFeedURL({
105 | url: updateURL,
106 | });
107 | autoUpdater.checkForUpdates();
108 |
109 | setInterval(() => {
110 | autoUpdater.checkForUpdates();
111 | }, 3600000); // Check every hour
112 |
113 | autoUpdater.on("update-available", (info) => {
114 | logger.info("Update available");
115 | });
116 |
117 | autoUpdater.on("update-downloaded", () => {
118 | // The update is ready to be installed on app restart.
119 | logger.info("Update downloaded");
120 | });
121 |
122 | autoUpdater.on("error", (err) => {
123 | logger.error("Error in auto-updater: ", err);
124 | });
125 | }
126 | }
127 |
128 | createWindow();
129 |
130 | // Define a custom Content Security Policy to only allow loading resources from the app's origin, this is needed to call across the interprocess boundary
131 | session.defaultSession.webRequest.onHeadersReceived((details, callback) => {
132 | callback({
133 | responseHeaders: {
134 | ...details.responseHeaders,
135 | "Content-Security-Policy": ["default-src 'self'"],
136 | },
137 | });
138 | });
139 | });
140 |
141 | // Quit when all windows are closed, except on macOS. There, it's common
142 | // for applications and their menu bar to stay active until the user quits
143 | // explicitly with Cmd + Q.
144 | app.on("window-all-closed", () => {
145 | // Stop the ollama server when the app is closed
146 | stopOllama();
147 | if (process.platform !== "darwin") {
148 | app.quit();
149 | }
150 | });
151 |
152 | app.on("activate", () => {
153 | // On OS X it's common to re-create a window in the app when the
154 | // dock icon is clicked and there are no other windows open.
155 | if (BrowserWindow.getAllWindows().length === 0) {
156 | createWindow();
157 | }
158 | });
159 |
--------------------------------------------------------------------------------
/src/preload.js:
--------------------------------------------------------------------------------
1 | // See the Electron documentation for details on how to use preload scripts:
2 | // https://www.electronjs.org/docs/latest/tutorial/process-model#preload-scripts
3 |
4 | const { contextBridge, ipcRenderer } = require("electron");
5 |
6 | // Here, we use the `contextBridge` API to expose a custom API to the renderer process.
7 | // This API allows the renderer process to invoke events in the main process which interact with the operating system.
8 | contextBridge.exposeInMainWorld("electronAPI", {
9 | sendChat: (text) => ipcRenderer.send("chat:send", text),
10 | onChatReply: (callback) => {
11 | ipcRenderer.on("chat:reply", (event, data) => {
12 | callback(event, data);
13 | });
14 | },
15 | stopChat: () => ipcRenderer.send("chat:stop"),
16 | loadDocument: () => ipcRenderer.send("doc:load"),
17 | onDocumentLoaded: (callback) => {
18 | ipcRenderer.on("doc:load", (event, data) => {
19 | callback(event, data);
20 | });
21 | },
22 | serveOllama: () => ipcRenderer.send("ollama:serve"),
23 | onOllamaServe: (callback) => {
24 | ipcRenderer.on("ollama:serve", (event, data) => {
25 | callback(event, data);
26 | });
27 | },
28 | runOllama: () => ipcRenderer.send("ollama:run"),
29 | onOllamaRun: (callback) => {
30 | ipcRenderer.on("ollama:run", (event, data) => {
31 | callback(event, data);
32 | });
33 | },
34 | getModel: () => ipcRenderer.send("model:get"),
35 | onModelGet: (callback) => {
36 | ipcRenderer.on("model:get", (event, data) => {
37 | callback(event, data);
38 | });
39 | },
40 | setModel: (model) => ipcRenderer.send("model:set", model),
41 | });
42 |
--------------------------------------------------------------------------------
/src/service/document/parse/clean.js:
--------------------------------------------------------------------------------
1 | function removeImages(content) {
2 | return content.replace(/!\[\[.*?\]\]/g, "");
3 | }
4 |
5 | function removeHyperlinks(content) {
6 | return content.replace(/\[(.*?)\]\((.*?)\)/g, "$1");
7 | }
8 |
9 | function removeCitations(content) {
10 | return content.replace(/\[\d+\]/g, "");
11 | }
12 |
13 | function splitText(text) {
14 | let cleanedText = text.replace(/\n/g, " "); // Replace newlines with spaces
15 | cleanedText = cleanedText.replace(/\s+/g, " "); // Replace multiple spaces with single space
16 | cleanedText = removeCitations(cleanedText); // Remove citation numbers like [85]
17 | cleanedText = removeHyperlinks(cleanedText); // Remove hyperlinks
18 | cleanedText = removeImages(cleanedText); // Remove images
19 |
20 | if (cleanedText.includes(".")) {
21 | // This regex pattern attempts to split the text into sentences based on periods
22 | const sentenceRegex = /(? to
and
5 | let cleanedHtml = html.replace(/<(?!\/?(h[1-6]|p)(?=>|\s.*>))\/?.*?>/gi, '');
6 |
7 | // Split into sections based on heading tags
8 | const sectionRegex = /]*>(.*?)<\/h[1-6]>|
]*>(.*?)<\/p>/gi;
9 | let match;
10 | let currentSection = { section: '', content: '' };
11 | const extractedSections = [];
12 |
13 | while ((match = sectionRegex.exec(cleanedHtml)) !== null) {
14 | if (match[0].startsWith(' {
12 | const isHeader = line.match(/^#+\s/);
13 | if (isHeader) {
14 | if (header !== null) {
15 | if (text === "") {
16 | // found a section with no text
17 | return;
18 | }
19 | sections.push({
20 | section: header,
21 | content: splitText(text),
22 | });
23 | }
24 |
25 | header = line.replace(/#/g, "").trim();
26 | text = ""; // begin searching for text
27 | } else {
28 | text += line + "\n";
29 | }
30 | });
31 |
32 | if (header !== null) {
33 | sections.push({
34 | section: header,
35 | content: splitText(text),
36 | });
37 | }
38 |
39 | return sections;
40 | }
41 |
42 | module.exports = {
43 | parseMd,
44 | };
45 |
--------------------------------------------------------------------------------
/src/service/document/parse/odt.js:
--------------------------------------------------------------------------------
1 | const odt2html = require('odt2html');
2 | const { removeCitations, removeHyperlinks } = require("./clean");
3 | const { extractSectionsAndContent } = require('./html');
4 | const { error } = require("../../logger.js");
5 |
6 | async function parseOdt(odtFilePath) {
7 | try {
8 | let html = await odt2html.toHTML({ path: odtFilePath });
9 |
10 | if (!html || !html.value) {
11 | return [];
12 | }
13 |
14 | html = removeCitations(html);
15 | html = removeHyperlinks(html);
16 |
17 | return extractSectionsAndContent(html);
18 | } catch (err) {
19 | error("Error parsing ODT file:", err);
20 | return [];
21 | }
22 | }
23 |
24 | module.exports = {
25 | parseOdt,
26 | };
27 |
--------------------------------------------------------------------------------
/src/service/document/parse/pdf.js:
--------------------------------------------------------------------------------
1 | const path = require("path");
2 | const fs = require("fs").promises;
3 | const pdf2md = require('@opendocsg/pdf2md');
4 | const { parseMd } = require("./md");
5 |
6 | async function parsePdf(filePath) {
7 | const pdfBuffer = await fs.readFile(filePath);
8 | const md = await pdf2md(pdfBuffer);
9 | return {
10 | fileName: path.basename(filePath),
11 | data: parseMd(md),
12 | };
13 | }
14 |
15 | module.exports = {
16 | parsePdf,
17 | };
18 |
--------------------------------------------------------------------------------
/src/service/document/parse/txt.js:
--------------------------------------------------------------------------------
1 | function parseTxt(data) {
2 | // chunk the data based on new line characters
3 | let lines = data.split("\n");
4 | // further chunk the data based on patterns which indicate a new sentence
5 | let chunks = [];
6 | lines.forEach((line) => {
7 | // Split the content by periods that are followed by a space and a capital letter,
8 | // a question mark or exclamation mark followed by a space and a capital letter,
9 | // or the end of the content.
10 | // This regular expression tries to account for periods used in abbreviations,
11 | // decimal numbers, etc., by not splitting in those cases.
12 | const sentenceEndings =
13 | /(? chunk !== "");
19 | // TODO: add metadata and info here too as keys in map (see md.js)
20 | return [
21 | {
22 | section: content[0], // make the first sentence the section name
23 | content: content,
24 | },
25 | ];
26 | }
27 |
28 | module.exports = {
29 | parseTxt,
30 | };
31 |
--------------------------------------------------------------------------------
/src/service/document/reader.js:
--------------------------------------------------------------------------------
1 | const path = require("path");
2 | const fs = require("fs").promises;
3 | const { parsePdf, parseMd, parseOdt, parseTxt, parseDocx } = require("./parse");
4 |
5 | async function loadFile(filePath) {
6 | const fileExtension = path.extname(filePath).toLowerCase();
7 |
8 | switch (fileExtension) {
9 | case ".docx":
10 | const docx = await fs.readFile(filePath);
11 | return {
12 | fileName: path.basename(filePath),
13 | data: await parseDocx(docx),
14 | };
15 | case ".md":
16 | let markdown = await fs.readFile(filePath, "utf-8");
17 | return {
18 | fileName: path.basename(filePath),
19 | data: parseMd(markdown),
20 | };
21 | case ".odt":
22 | return {
23 | fileName: path.basename(filePath),
24 | data: await parseOdt(filePath),
25 | };
26 | case ".pdf":
27 | return await parsePdf(filePath);
28 | default:
29 | // just try to parse it as a text file
30 | let rawText = await fs.readFile(filePath, "utf-8");
31 | return {
32 | fileName: path.basename(filePath),
33 | data: parseTxt(rawText),
34 | };
35 | }
36 | }
37 |
38 | module.exports = {
39 | loadFile,
40 | };
41 |
--------------------------------------------------------------------------------
/src/service/embedding.js:
--------------------------------------------------------------------------------
1 | // This file contains all the logic for loading the model and creating embeddings.
2 |
3 | class ExtractorPipeline {
4 | static task = "feature-extraction";
5 | static model = "Xenova/all-MiniLM-L6-v2"; // if you want to use a different model, change the vector size in the vector store
6 | static instance = null;
7 |
8 | static async getInstance(progress_callback = null) {
9 | if (this.instance === null) {
10 | // Dynamically import the Transformers.js library
11 | let { pipeline, env } = await import("@xenova/transformers");
12 | this.instance = pipeline(this.task, this.model, { progress_callback });
13 | }
14 |
15 | return this.instance;
16 | }
17 | }
18 |
19 | // The embed function is used by the `doc:load` event handler.
20 | async function embed(doc) {
21 | // Load the model
22 | const extractor = await ExtractorPipeline.getInstance();
23 |
24 | // Extract the embeddings
25 | let embeddings = [];
26 | // Using an array to store promises from the forEach loop
27 | let promiseArray = [];
28 |
29 | doc.data.forEach((data) => {
30 | data.content.forEach((line) => {
31 | // Create a promise for each line and process it
32 | const promise = extractor(line, {
33 | pooling: "mean",
34 | normalize: true,
35 | }).then((output) => {
36 | embeddings.push({
37 | content: line,
38 | embedding: Array.from(output.data),
39 | });
40 | });
41 | promiseArray.push(promise);
42 | });
43 | });
44 |
45 | // Wait for all promises to resolve
46 | await Promise.all(promiseArray);
47 | return embeddings;
48 | }
49 |
50 | module.exports = {
51 | embed,
52 | };
53 |
--------------------------------------------------------------------------------
/src/service/logger.js:
--------------------------------------------------------------------------------
1 | const path = require("path");
2 | const os = require("os");
3 | const winston = require("winston");
4 |
5 | class Logger {
6 | static instance = null;
7 |
8 | constructor() {
9 | this.logger = winston.createLogger({
10 | format: winston.format.simple(),
11 | transports: [
12 | new winston.transports.Console(),
13 | new winston.transports.File({
14 | filename: path.join(os.homedir(), ".chatd", "service.log"),
15 | maxSize: 1000000, // 1 MB
16 | maxFiles: 1,
17 | })
18 | ]
19 | });
20 | }
21 |
22 | static getLogger() {
23 | if (this.instance === null) {
24 | this.instance = new this();
25 | }
26 | return this.instance;
27 | }
28 | }
29 |
30 | function info(msg) {
31 | console.log(msg);
32 | Logger.getLogger().logger.info(msg);
33 | }
34 |
35 | function error(msg) {
36 | console.log(msg);
37 | Logger.getLogger().logger.error(msg);
38 | }
39 |
40 | module.exports = {
41 | logInfo: info,
42 | logErr: error
43 | }
--------------------------------------------------------------------------------
/src/service/ollama/ollama.js:
--------------------------------------------------------------------------------
1 | const fs = require("fs");
2 | const os = require("os");
3 | const path = require("path");
4 | const { exec } = require("child_process");
5 | const { logInfo, logErr } = require("../logger.js");
6 |
7 | var OllamaServeType = {
8 | SYSTEM: "system", // ollama is installed on the system
9 | PACKAGED: "packaged", // ollama is packaged with the app
10 | };
11 |
12 | class OllamaOrchestrator {
13 | static instance = null;
14 |
15 | constructor(ollamaModule) {
16 | this.childProcess = null;
17 | this.messages = []; // stores the chat message history for the current session
18 | this.host = process.env.OLLAMA_HOST || "http://127.0.0.1:11434";
19 | this.ollama = new ollamaModule.Ollama({ host: this.host });
20 | }
21 |
22 | static async getOllama() {
23 | if (this.instance === null) {
24 | const ollamaModule = await import("ollama");
25 | this.instance = new this(ollamaModule);
26 | }
27 | return this.instance;
28 | }
29 |
30 | /**
31 | * Start Ollama to serve an LLM.
32 | *
33 | * @throws {Error}
34 | * @return {OllamaStatus} The status of the Ollama server.
35 | */
36 | async serve() {
37 | try {
38 | // see if ollama is already running
39 | await this.ping();
40 | return OllamaServeType.SYSTEM;
41 | } catch (err) {
42 | // this is fine, we just need to start ollama
43 | logInfo(`Ollama is not running: ${err}`);
44 | }
45 |
46 | try {
47 | // See if 'ollama serve' command is available on the system
48 | await this.execServe("ollama");
49 | return OllamaServeType.SYSTEM;
50 | } catch (err) {
51 | // ollama is not installed, run the binary directly
52 | logInfo(`Ollama is not installed on the system: ${err}`);
53 | }
54 |
55 | // start the packaged ollama server
56 | let exe = "";
57 | let appDataPath = "";
58 | switch (process.platform) {
59 | case "win32":
60 | exe = "ollama.exe";
61 | appDataPath = path.join(os.homedir(), "AppData", "Local", "chatd");
62 | break;
63 | case "darwin":
64 | exe = "ollama-darwin";
65 | appDataPath = path.join(
66 | os.homedir(),
67 | "Library",
68 | "Application Support",
69 | "chatd"
70 | );
71 | break;
72 | case "linux":
73 | exe = "ollama-linux"; // x64 only
74 | appDataPath = path.join(os.homedir(), ".config", "chatd");
75 | break;
76 | default:
77 | logErr(`unsupported platform: ${process.platform}`);
78 | reject(new Error(`Unsupported platform: ${process.platform}`));
79 | return;
80 | }
81 |
82 | const pathToBinary = path.join(__dirname, "runners", exe);
83 | try {
84 | await this.execServe(pathToBinary, appDataPath);
85 | return OllamaServeType.PACKAGED;
86 | } catch (err) {
87 | logErr(`Failed to start Ollama: ${err}`);
88 | throw new Error(`Failed to start Ollama: ${err}`);
89 | }
90 | }
91 |
92 | // execServe runs the serve command, and waits for a response
93 | async execServe(path, appDataDirectory) {
94 | return new Promise((resolve, reject) => {
95 | if (!fs.existsSync(appDataDirectory)) {
96 | fs.mkdirSync(appDataDirectory, { recursive: true });
97 | }
98 | const env = {
99 | ...process.env,
100 | OLLAMA_MODELS: appDataDirectory,
101 | };
102 | this.childProcess = exec(
103 | path + " serve",
104 | { env },
105 | (err, stdout, stderr) => {
106 | if (err) {
107 | reject(`exec error: ${err}`);
108 | return;
109 | }
110 |
111 | if (stderr) {
112 | reject(`ollama stderr: ${stderr}`);
113 | return;
114 | }
115 |
116 | reject(`ollama stdout: ${stdout}`);
117 | }
118 | );
119 |
120 | // Once the process is started, try to ping Ollama server.
121 | this.waitForPing()
122 | .then(() => {
123 | resolve();
124 | })
125 | .catch((pingError) => {
126 | if (this.childProcess && !this.childProcess.killed) {
127 | this.childProcess.kill();
128 | }
129 | reject(pingError);
130 | });
131 | });
132 | }
133 |
134 | async pull(model, fn) {
135 | logInfo("pulling model: " + model);
136 | const stream = await this.ollama.pull({model: model, stream: true});
137 | for await (const part of stream) {
138 | fn(part);
139 | }
140 | }
141 |
142 | async run(model, fn) {
143 | try {
144 | await this.pull(model, fn);
145 | } catch (err) {
146 | logErr('failed to pull before run: ' + err);
147 | if (!err.message.includes("pull model manifest")) {
148 | throw err;
149 | }
150 | logInfo('chatd is running offline, failed to pull');
151 | }
152 | // load the model
153 | const loaded = await this.ollama.chat({model: model});
154 | // all done, return the loaded event to the callback
155 | fn(loaded);
156 | }
157 |
158 | stop() {
159 | if (!this.childProcess) {
160 | return;
161 | }
162 |
163 | if (os.platform() === "win32") {
164 | // Windows: Use taskkill to force kill the process tree
165 | // This makes sure the child process isn't left running
166 | exec(`taskkill /pid ${this.childProcess.pid} /f /t`, (err) => {
167 | if (err) {
168 | logErr(
169 | `Failed to kill process ${this.childProcess.pid}: ${err}`
170 | );
171 | }
172 | });
173 | } else {
174 | this.childProcess.kill();
175 | }
176 |
177 | this.childProcess = null;
178 | }
179 |
180 | /**
181 | * Sends a ping to the LLM to see if it is running.
182 | * @throws {Error}
183 | * @return {Promise} True if the server is running.
184 | */
185 | async ping() {
186 | const response = await fetch(this.host, {
187 | method: "GET",
188 | cache: "no-store",
189 | });
190 |
191 | if (response.status !== 200) {
192 | throw new Error(`failed to ping ollama server: ${response.status}`);
193 | }
194 |
195 | logInfo("ollama server is running");
196 |
197 | return true;
198 | }
199 |
200 | /**
201 | * Waits for the Ollama server to respond to ping.
202 | * @param {number} delay Time in ms to wait between retries.
203 | * @param {number} retries Maximum number of retries.
204 | * @return {Promise}
205 | */
206 | async waitForPing(delay = 1000, retries = 5) {
207 | for (let i = 0; i < retries; i++) {
208 | try {
209 | await this.ping();
210 | return;
211 | } catch (err) {
212 | logInfo("waiting for ollama server...");
213 | logInfo(err);
214 | await new Promise((resolve) => setTimeout(resolve, delay));
215 | }
216 | }
217 | logErr("max retries reached. Ollama server didn't respond.");
218 | throw new Error("Max retries reached. Ollama server didn't respond.");
219 | }
220 |
221 | /**
222 | * Sends a chat to the LLM and runs a callback.
223 | *
224 | * @param {string} model One of the installed models to use, e.g: 'llama2'.
225 | * @param {string} prompt The user message for the LLM.
226 | * @param {function} fn The callback to run on each line of the response.
227 | *
228 | * @throws {Error|AbortError}
229 | *
230 | * @return {Promise}
231 | */
232 | async chat(model, prompt, fn) {
233 | this.messages.push({
234 | "role": "user",
235 | "content": prompt
236 | });
237 |
238 | let assistant = {
239 | "role": "assistant",
240 | "content": ""
241 | }
242 | try {
243 | const stream = await this.ollama.chat({model: model, messages: this.messages, stream: true});
244 | for await (const part of stream) {
245 | assistant.content += part.message.content;
246 | fn(part);
247 | }
248 | } catch (error) {
249 | if (error instanceof Error && error.name === 'AbortError') {
250 | // this is expected if user presses the stop button, continue
251 | } else {
252 | // rethrow
253 | throw error;
254 | }
255 | }
256 | this.messages.push(assistant);
257 | }
258 |
259 | /**
260 | * Aborts the current request.
261 | */
262 | abortRequest() {
263 | this.ollama.abort();
264 | }
265 | }
266 |
267 | async function run(model, fn) {
268 | const ollama = await OllamaOrchestrator.getOllama();
269 | return await ollama.run(model, fn);
270 | }
271 |
272 | async function chat(model, prompt, fn) {
273 | const ollama = await OllamaOrchestrator.getOllama();
274 | return await ollama.chat(model, prompt, fn);
275 | }
276 |
277 | async function abort() {
278 | const ollama = await OllamaOrchestrator.getOllama();
279 | return ollama.abortRequest();
280 | }
281 |
282 | async function stop() {
283 | const ollama = await OllamaOrchestrator.getOllama();
284 | return ollama.stop();
285 | }
286 |
287 | async function serve() {
288 | const ollama = await OllamaOrchestrator.getOllama();
289 | return ollama.serve();
290 | }
291 |
292 | module.exports = {
293 | run,
294 | chat,
295 | abort,
296 | stop,
297 | serve,
298 | };
299 |
--------------------------------------------------------------------------------
/src/service/ollama/runners/README.md:
--------------------------------------------------------------------------------
1 | Runners in this directory are packaged with chatd at build time.
2 |
3 | MacOS: `ollama-darwin`
4 | Linux: `ollama-linux`
5 | Windows: `ollama.exe`
6 |
--------------------------------------------------------------------------------
/src/service/vector.js:
--------------------------------------------------------------------------------
1 | const { create, count, insertMultiple, search: oramaSearch } = require("@orama/orama")
2 |
3 | class VectorStore {
4 | static instance = null;
5 |
6 | constructor(db) {
7 | this.db = db;
8 | }
9 |
10 | static async getVectorStore() {
11 | if (this.instance === null) {
12 | const db = await create({
13 | schema: {
14 | text: 'string',
15 | embedding: 'vector[384]', // vector size must be expressed during schema initialization, all-MiniLM-L6-v2 is 384
16 | // TODO: add meta data to schema
17 | },
18 | });
19 | this.instance = new this(db);
20 | }
21 | return this.instance;
22 | }
23 |
24 | async addEmbeddings(embeddings) {
25 | const items = embeddings.map(embedding => ({
26 | content: embedding.content,
27 | embedding: embedding.embedding,
28 | // ...meta data...
29 | }));
30 |
31 | await insertMultiple(this.db, items);
32 | }
33 |
34 | async search(embedding, limit) {
35 | const searchResult = await oramaSearch(this.db, {
36 | mode: 'vector',
37 | vector: {
38 | value: embedding,
39 | property: 'embedding',
40 | },
41 | similarity: 0.1, // get as many results as possible
42 | limit: limit,
43 | });
44 | // parse the search result to a text array
45 | let results = [];
46 | for (const hit of searchResult.hits) {
47 | results.push(hit.document.content);
48 | }
49 | return results;
50 | }
51 |
52 | clear() {
53 | this.instance = null;
54 | }
55 |
56 | async size() {
57 | return await count(this.db);
58 | }
59 | }
60 |
61 | async function clearVectorStore() {
62 | const store = await VectorStore.getVectorStore();
63 | store.clear();
64 | }
65 |
66 | async function store(embeddings) {
67 | const vectorStore = await VectorStore.getVectorStore();
68 | await vectorStore.addEmbeddings(embeddings);
69 | }
70 |
71 | async function search(embedding, limit) {
72 | const vectorStore = await VectorStore.getVectorStore();
73 | const result = await vectorStore.search(embedding, limit);
74 | return result;
75 | }
76 |
77 | async function vectorStoreSize() {
78 | const vectorStore = await VectorStore.getVectorStore();
79 | const size = await vectorStore.size();
80 | return size;
81 | }
82 |
83 | module.exports = {
84 | clearVectorStore,
85 | vectorStoreSize,
86 | store,
87 | search,
88 | };
89 |
--------------------------------------------------------------------------------
/src/service/worker.js:
--------------------------------------------------------------------------------
1 | const { parentPort } = require('worker_threads');
2 | const { loadFile } = require("./document/reader.js");
3 | const { embed } = require("./embedding.js");
4 |
5 | parentPort.on('message', async (filePath) => {
6 | console.log('worker received:', filePath);
7 | // open the file and read the contents
8 | const doc = await loadFile(filePath); // TODO: batch read the file in chunks to avoid loading the entire file into memory
9 | // generate embeddings for each section
10 | const embeddings = await embed(doc);
11 |
12 | // Respond back to the main process
13 | parentPort.postMessage({ success: true, embeddings: embeddings });
14 | });
15 |
--------------------------------------------------------------------------------