├── src
├── __init__.py
├── integrations
│ ├── __init__.py
│ └── website_reader.py
├── utility
│ ├── __init__.py
│ ├── system.py
│ ├── profile_settings.py
│ ├── audio_recorder.py
│ ├── pip.py
│ └── website_scraper.py
├── handlers
│ ├── rag
│ │ └── __init__.py
│ ├── websearch
│ │ ├── __init__.py
│ │ ├── websearch.py
│ │ └── duckduckgo_handler.py
│ ├── memory
│ │ ├── __init__.py
│ │ ├── memory_handler.py
│ │ ├── summary_memoripy_handler.py
│ │ └── user_summary_handler.py
│ ├── __init__.py
│ ├── embeddings
│ │ ├── __init__.py
│ │ ├── gemini_handler.py
│ │ ├── embedding.py
│ │ ├── wordllama_handler.py
│ │ └── ollama_handler.py
│ ├── llm
│ │ ├── mistral_handler.py
│ │ ├── deepseek_handler.py
│ │ ├── __init__.py
│ │ ├── groq_handler.py
│ │ ├── openrouter_handler.py
│ │ ├── gpt3any_handler.py
│ │ ├── custom_handler.py
│ │ └── newelle_handler.py
│ ├── stt
│ │ ├── stt.py
│ │ ├── sphinx_handler.py
│ │ ├── __init__.py
│ │ ├── witai_handler.py
│ │ ├── custom_handler.py
│ │ ├── vosk_handler.py
│ │ ├── groqsr_handler.py
│ │ ├── googlesr_handler.py
│ │ ├── openaisr_handler.py
│ │ └── whisper_handler.py
│ ├── tts
│ │ ├── __init__.py
│ │ ├── gtts_handler.py
│ │ ├── custom_handler.py
│ │ ├── groq_tts_handler.py
│ │ ├── espeak_handler.py
│ │ ├── custom_openai_tts.py
│ │ ├── openai_tts_handler.py
│ │ ├── kokoro_handler.py
│ │ ├── elevenlabs_handler.py
│ │ └── tts.py
│ └── descriptors.py
├── newelle.gresource.xml
├── ui
│ ├── widgets
│ │ ├── __init__.py
│ │ ├── barchart.py
│ │ ├── comborow.py
│ │ ├── website.py
│ │ ├── profilerow.py
│ │ ├── markuptextview.py
│ │ └── terminal_dialog.py
│ ├── shortcuts.py
│ ├── __init__.py
│ ├── screenrecorder.py
│ ├── thread_editing.py
│ └── mini_window.py
├── newelle.in
└── ui_controller.py
├── .gitignore
├── po
├── meson.build
├── LINGUAS
└── POTFILES
├── screenshots
├── 1b.png
├── 1w.png
├── 2b.png
├── 2w.png
├── 3b.png
├── 3w.png
├── 4b.png
└── 4w.png
├── data
├── screenshots
│ ├── screenshot1.png
│ ├── screenshot1b.png
│ ├── screenshot2.png
│ ├── screenshot2b.png
│ ├── screenshot3.png
│ └── screenshot3b.png
├── icons
│ ├── plus-symbolic.svg
│ ├── detach-symbolic.svg
│ ├── document-edit-symbolic.svg
│ ├── attach-symbolic.svg
│ ├── emblem-default-symbolic.svg
│ ├── info-outline-symbolic.svg
│ ├── avatar-symbolic.svg
│ ├── user-trash-symbolic.svg
│ ├── go-home-symbolic.svg
│ ├── sidebar-show-left-symbolic.svg
│ ├── sidebar-show-right-symbolic.svg
│ ├── circle-crossed-symbolic.svg
│ ├── zoom-out-symbolic.svg
│ ├── check-plain-symbolic.svg
│ ├── zoom-in-symbolic.svg
│ ├── magic-wand-symbolic.svg
│ ├── controls-big-symbolic.svg
│ ├── hicolor
│ │ ├── symbolic
│ │ │ └── apps
│ │ │ │ └── io.github.qwersyk.Newelle-symbolic.svg
│ │ └── scalable
│ │ │ └── apps
│ │ │ └── io.github.qwersyk.Newelle.svg
│ ├── warning-outline-symbolic.svg
│ ├── search-folder-symbolic.svg
│ ├── question-round-outline-symbolic.svg
│ ├── update-symbolic.svg
│ ├── view-show-symbolic.svg
│ ├── gnome-terminal-symbolic.svg
│ ├── settings-symbolic.svg
│ ├── vcard-symbolic.svg
│ ├── meson.build
│ ├── star-filled-rounded-symbolic.svg
│ ├── brain-augemnted-symbolic.svg
│ ├── right-large-symbolic.svg
│ └── left-large-symbolic.svg
├── io.github.qwersyk.Newelle.desktop.in
├── meson.build
└── images
│ └── extension.svg
├── meson_options.txt
├── modules
├── requirements.txt
├── vte.json
├── portaudio.json
├── python3-pillow.json
├── python3-pygame.json
├── python3-pyaudio.json
├── python3-lxml.json
├── python3-pydub.json
├── python3-pylatexenc.json
├── python3-expandvars.json
├── python3-six.json
├── git.json
├── python3-lxml_html_clean.json
├── python3-gpt4all.json
├── python3-markdownify.json
├── python3-requests.json
├── python3-speechrecognition.json
└── python3-gtts.json
├── install.sh
├── meson.build
├── .github
└── workflows
│ ├── flatpak-builder.yml
│ └── build-locales.yml
├── flake.lock
├── flake.nix
├── overlay.nix
├── package.nix
└── io.github.qwersyk.Newelle.json
/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/integrations/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .flatpak-builder
2 | flatpak-pip-generator
--------------------------------------------------------------------------------
/po/meson.build:
--------------------------------------------------------------------------------
1 | i18n.gettext('newelle', preset: 'glib')
2 |
--------------------------------------------------------------------------------
/screenshots/1b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/1b.png
--------------------------------------------------------------------------------
/screenshots/1w.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/1w.png
--------------------------------------------------------------------------------
/screenshots/2b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/2b.png
--------------------------------------------------------------------------------
/screenshots/2w.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/2w.png
--------------------------------------------------------------------------------
/screenshots/3b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/3b.png
--------------------------------------------------------------------------------
/screenshots/3w.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/3w.png
--------------------------------------------------------------------------------
/screenshots/4b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/4b.png
--------------------------------------------------------------------------------
/screenshots/4w.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/screenshots/4w.png
--------------------------------------------------------------------------------
/data/screenshots/screenshot1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/data/screenshots/screenshot1.png
--------------------------------------------------------------------------------
/data/screenshots/screenshot1b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/data/screenshots/screenshot1b.png
--------------------------------------------------------------------------------
/data/screenshots/screenshot2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/data/screenshots/screenshot2.png
--------------------------------------------------------------------------------
/data/screenshots/screenshot2b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/data/screenshots/screenshot2b.png
--------------------------------------------------------------------------------
/data/screenshots/screenshot3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/data/screenshots/screenshot3.png
--------------------------------------------------------------------------------
/data/screenshots/screenshot3b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qwersyk/Newelle/HEAD/data/screenshots/screenshot3b.png
--------------------------------------------------------------------------------
/src/utility/__init__.py:
--------------------------------------------------------------------------------
1 | from .util import convert_history_openai, get_streaming_extra_setting, override_prompts
2 |
--------------------------------------------------------------------------------
/meson_options.txt:
--------------------------------------------------------------------------------
1 | option('profile',
2 | type: 'combo',
3 | description: 'Build profile',
4 | choices: ['development', 'release'],
5 | value: 'release'
6 | )
7 |
--------------------------------------------------------------------------------
/src/handlers/rag/__init__.py:
--------------------------------------------------------------------------------
1 | from .rag_handler import RAGHandler
2 | from .llamaindex_handler import LlamaIndexHanlder
3 | __all__ = [
4 | "RAGHandler",
5 | "LlamaIndexHanlder",
6 | ]
7 |
--------------------------------------------------------------------------------
/po/LINGUAS:
--------------------------------------------------------------------------------
1 | ar
2 | bn
3 | cs
4 | de
5 | en
6 | es
7 | fr
8 | hi
9 | hu
10 | id
11 | it
12 | ja
13 | ko
14 | nl
15 | pl
16 | pt
17 | pt_BR
18 | ro
19 | ru
20 | sv
21 | tr
22 | uk
23 | zh_CN
24 | zh_TW
--------------------------------------------------------------------------------
/data/icons/plus-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/modules/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | expandvars
3 | curl_cffi
4 | pillow
5 | gtts
6 | pyaudio
7 | speechrecognition
8 | openai
9 | pygame
10 | tiktoken
11 | newspaper3k
12 | lxml
13 | lxml_html_clean
14 | pylatexenc
15 | matplotlib
16 | gpt4all # Optional
17 |
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | APPID="io.github.qwersyk.Newelle"
3 | BUNDLENAME="newelle.flatpak"
4 | flatpak-builder --install --user --force-clean flatpak-app "$APPID".json
5 |
6 | if [ "$1" = "bundle" ]; then
7 | flatpak build-bundle ~/.local/share/flatpak/repo "$BUNDLENAME" "$APPID"
8 | fi
9 |
--------------------------------------------------------------------------------
/data/icons/detach-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/data/io.github.qwersyk.Newelle.desktop.in:
--------------------------------------------------------------------------------
1 | [Desktop Entry]
2 | Name=Newelle
3 | Comment=Newelle: Your advanced chat bot
4 | Exec=newelle
5 | Icon=io.github.qwersyk.Newelle
6 | Terminal=false
7 | Type=Application
8 | Categories=Utility;
9 | StartupNotify=true
10 | Keywords=ai;assistant;chat;chatgpt;gpt;llm;ollama;
11 |
--------------------------------------------------------------------------------
/data/icons/document-edit-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
5 |
--------------------------------------------------------------------------------
/src/handlers/websearch/__init__.py:
--------------------------------------------------------------------------------
1 | from .websearch import WebSearchHandler
2 | from .searxng import SearXNGHandler
3 | from .duckduckgo_handler import DDGSeachHandler
4 | from .tavily import TavilyHandler
5 | __all__ = [
6 | "WebSearchHandler",
7 | "SearXNGHandler",
8 | "DDGSeachHandler",
9 | "TavilyHandler",
10 | ]
11 |
--------------------------------------------------------------------------------
/src/handlers/memory/__init__.py:
--------------------------------------------------------------------------------
1 | from .memory_handler import MemoryHandler
2 | from .memoripy_handler import MemoripyHandler
3 | from .user_summary_handler import UserSummaryHandler
4 | from .summary_memoripy_handler import SummaryMemoripyHanlder
5 |
6 | __all__ = ["MemoryHandler", "MemoripyHandler", "UserSummaryHandler", "SummaryMemoripyHanlder"]
7 |
--------------------------------------------------------------------------------
/data/icons/attach-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
5 |
--------------------------------------------------------------------------------
/src/handlers/__init__.py:
--------------------------------------------------------------------------------
1 | from .handler import Handler, ErrorSeverity
2 | from .extra_settings import ExtraSettings
3 | from .descriptors import HandlerDescription, PromptDescription, TabButtonDescription
4 |
5 | __all__ = [
6 | "Handler",
7 | "ExtraSettings",
8 | "ErrorSeverity",
9 | "HandlerDescription",
10 | "PromptDescription",
11 | "TabButtonDescription",
12 | ]
13 |
--------------------------------------------------------------------------------
/src/newelle.gresource.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ../data/images/illustration.svg
5 | ../data/images/extension.svg
6 | ../data/images/error.svg
7 |
8 |
9 |
--------------------------------------------------------------------------------
/data/icons/emblem-default-symbolic.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/src/handlers/embeddings/__init__.py:
--------------------------------------------------------------------------------
1 | from .embedding import EmbeddingHandler
2 | from .wordllama_handler import WordLlamaHandler
3 | from .openai_handler import OpenAIEmbeddingHandler
4 | from .gemini_handler import GeminiEmbeddingHanlder
5 | from .ollama_handler import OllamaEmbeddingHandler
6 |
7 | __ALL__ = ["EmbeddingHandler", "WordLlamaHandler", "OpenAIEmbeddingHandler", "GeminiEmbeddingHanlder", "OllamaEmbeddingHandler"]
8 |
--------------------------------------------------------------------------------
/meson.build:
--------------------------------------------------------------------------------
1 | project('newelle',
2 | version: '1.0.2',
3 | meson_version: '>= 0.62.0',
4 | default_options: [ 'warning_level=2', 'werror=false', ],
5 | )
6 |
7 | i18n = import('i18n')
8 | gnome = import('gnome')
9 |
10 | subdir('data')
11 | subdir('src')
12 | subdir('po')
13 |
14 | gnome.post_install(
15 | glib_compile_schemas: true,
16 | gtk_update_icon_cache: true,
17 | update_desktop_database: true,
18 | )
--------------------------------------------------------------------------------
/modules/vte.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "vte",
3 | "buildsystem": "meson",
4 | "config-opts": ["-Dvapi=false"],
5 | "sources": [
6 | {
7 | "type": "archive",
8 | "url": "https://gitlab.gnome.org/GNOME/vte/-/archive/0.78.0/vte-0.78.0.tar.gz",
9 | "sha256": "82e19d11780fed4b66400f000829ce5ca113efbbfb7975815f26ed93e4c05f2d"
10 | }
11 | ]
12 | }
13 |
--------------------------------------------------------------------------------
/data/icons/info-outline-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
7 |
--------------------------------------------------------------------------------
/data/icons/avatar-symbolic.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modules/portaudio.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "libportaudio",
3 | "sources": [
4 | {
5 | "type": "archive",
6 | "url": "http://www.portaudio.com/archives/pa_stable_v190600_20161030.tgz",
7 | "sha256": "f5a21d7dcd6ee84397446fa1fa1a0675bb2e8a4a6dceb4305a8404698d8d1513"
8 | }
9 | ],
10 | "cleanup": [
11 | "/include",
12 | "/lib/*.a",
13 | "/lib/*.la",
14 | "/lib/pkgconfig",
15 | "/man",
16 | "/share/aclocal",
17 | "/share/doc",
18 | "/share/gtk-doc",
19 | "/share/man",
20 | "/share/pkgconfig"
21 | ]
22 | }
23 |
--------------------------------------------------------------------------------
/data/icons/user-trash-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/data/icons/go-home-symbolic.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/data/icons/sidebar-show-left-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/data/icons/sidebar-show-right-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/modules/python3-pillow.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-pillow",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"pillow\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz",
11 | "sha256": "3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/modules/python3-pygame.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-pygame",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"pygame\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/49/cc/08bba60f00541f62aaa252ce0cfbd60aebd04616c0b9574f755b583e45ae/pygame-2.6.1.tar.gz",
11 | "sha256": "56fb02ead529cee00d415c3e007f75e0780c655909aaa8e8bf616ee09c9feb1f"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/modules/python3-pyaudio.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-pyaudio",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"pyaudio\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/26/1d/8878c7752febb0f6716a7e1a52cb92ac98871c5aa522cba181878091607c/PyAudio-0.2.14.tar.gz",
11 | "sha256": "78dfff3879b4994d1f4fc6485646a57755c6ee3c19647a491f790a0895bd2f87"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/data/icons/circle-crossed-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/modules/python3-lxml.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-lxml",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"lxml\" --no-build-isolation --ignore-installed"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/80/61/d3dc048cd6c7be6fe45b80cedcbdd4326ba4d550375f266d9f4246d0f4bc/lxml-5.3.2.tar.gz",
11 | "sha256": "773947d0ed809ddad824b7b14467e1a481b8976e87278ac4a730c2f7c7fcddc1"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/modules/python3-pydub.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-pydub",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"pydub\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/a6/53/d78dc063216e62fc55f6b2eebb447f6a4b0a59f55c8406376f76bf959b08/pydub-0.25.1-py2.py3-none-any.whl",
11 | "sha256": "65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/modules/python3-pylatexenc.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-pylatexenc",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"pylatexenc\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/5d/ab/34ec41718af73c00119d0351b7a2531d2ebddb51833a36448fc7b862be60/pylatexenc-2.10.tar.gz",
11 | "sha256": "3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/modules/python3-expandvars.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-expandvars",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"expandvars\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/df/b3/072c28eace372ba7630ea187b7efd7f09cc8bcebf847a96b5e03e9cc0828/expandvars-0.12.0-py3-none-any.whl",
11 | "sha256": "7432c1c2ae50c671a8146583177d60020dd210ada7d940e52af91f1f84f753b2"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/modules/python3-six.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-six",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"six\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl",
11 | "sha256": "8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/src/handlers/llm/mistral_handler.py:
--------------------------------------------------------------------------------
1 | from .openai_handler import OpenAIHandler
2 |
3 | class MistralHandler(OpenAIHandler):
4 | key = "mistral"
5 | default_models = (("open-mixtral-8x7b", "open-mixtral-8x7b"), )
6 | def __init__(self, settings, path):
7 | super().__init__(settings, path)
8 | self.set_setting("endpoint", "https://api.mistral.ai/v1/")
9 | self.set_setting("advanced_params", False)
10 |
11 | def get_extra_settings(self) -> list:
12 | return self.build_extra_settings("Mistral", True, True, False, True, True, None, "https://docs.mistral.ai/getting-started/models/models_overview/", False, True)
13 |
14 |
--------------------------------------------------------------------------------
/.github/workflows/flatpak-builder.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches: [ master ]
4 | name: Flatpak Build
5 | jobs:
6 | flatpak:
7 | name: "Flatpak"
8 | runs-on: ubuntu-latest
9 | container:
10 | image: ghcr.io/flathub-infra/flatpak-github-actions:gnome-48
11 | options: --privileged
12 | steps:
13 | - uses: actions/checkout@v4
14 | with:
15 | fetch-depth: 0
16 | - name: Flatpak build
17 | uses: flatpak/flatpak-github-actions/flatpak-builder@v6
18 | with:
19 | bundle: newelle.flatpak
20 | manifest-path: io.github.qwersyk.Newelle.json
21 | cache-key: flatpak-builder-${{ github.sha }}
--------------------------------------------------------------------------------
/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "nixpkgs": {
4 | "locked": {
5 | "lastModified": 1747179050,
6 | "narHash": "sha256-qhFMmDkeJX9KJwr5H32f1r7Prs7XbQWtO0h3V0a0rFY=",
7 | "owner": "NixOS",
8 | "repo": "nixpkgs",
9 | "rev": "adaa24fbf46737f3f1b5497bf64bae750f82942e",
10 | "type": "github"
11 | },
12 | "original": {
13 | "owner": "NixOS",
14 | "ref": "nixos-unstable",
15 | "repo": "nixpkgs",
16 | "type": "github"
17 | }
18 | },
19 | "root": {
20 | "inputs": {
21 | "nixpkgs": "nixpkgs"
22 | }
23 | }
24 | },
25 | "root": "root",
26 | "version": 7
27 | }
28 |
--------------------------------------------------------------------------------
/src/handlers/stt/stt.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | from ...utility.pip import find_module
3 | from ..handler import Handler
4 |
5 |
6 | class STTHandler(Handler):
7 | """Every STT Handler should extend this class"""
8 | key = ""
9 | schema_key = "stt-settings"
10 |
11 | def is_installed(self) -> bool:
12 | """If the handler is installed"""
13 | for module in self.get_extra_requirements():
14 | if find_module(module) is None:
15 | return False
16 | return True
17 |
18 | @abstractmethod
19 | def recognize_file(self, path) -> str | None:
20 | """Recognize a given audio file"""
21 | pass
22 |
23 |
24 |
--------------------------------------------------------------------------------
/src/handlers/tts/__init__.py:
--------------------------------------------------------------------------------
1 | from .tts import TTSHandler
2 | from .custom_handler import CustomTTSHandler
3 | from .espeak_handler import EspeakHandler
4 | from .gtts_handler import gTTSHandler
5 | from .elevenlabs_handler import ElevenLabs
6 | from .kokoro_handler import KokoroTTSHandler
7 | from .openai_tts_handler import OpenAITTSHandler
8 | from .custom_openai_tts import CustomOpenAITTSHandler
9 | from .groq_tts_handler import GroqTTSHandler
10 |
11 | __all__ = [
12 | "TTSHandler",
13 | "CustomTTSHandler",
14 | "EspeakHandler",
15 | "gTTSHandler",
16 | "ElevenLabs",
17 | "KokoroTTSHandler",
18 | "OpenAITTSHandler",
19 | "CustomOpenAITTSHandler",
20 | "GroqTTSHandler"
21 | ]
22 |
23 |
--------------------------------------------------------------------------------
/src/handlers/embeddings/gemini_handler.py:
--------------------------------------------------------------------------------
1 | from .openai_handler import OpenAIEmbeddingHandler
2 | import numpy as np
3 | from ...utility.pip import find_module
4 |
5 |
6 | class GeminiEmbeddingHanlder(OpenAIEmbeddingHandler):
7 |
8 | key = "geminiembedding"
9 | models = (("text-embedding-004", "text-embedding-004"), )
10 |
11 | def __init__(self, settings, path):
12 | super().__init__(settings, path)
13 | self.set_setting("endpoint", "https://generativelanguage.googleapis.com/v1beta/openai/")
14 |
15 | def get_extra_settings(self) -> list:
16 | return self.build_extra_settings("Gemini", True, False, True, None, True)
17 |
18 | def get_embedding_size(self) -> int:
19 | return 768
20 |
--------------------------------------------------------------------------------
/src/handlers/stt/sphinx_handler.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from .stt import STTHandler
3 |
4 | class SphinxHandler(STTHandler):
5 | key = "sphinx"
6 |
7 | @staticmethod
8 | def get_extra_requirements() -> list:
9 | return ["pocketsphinx"]
10 |
11 | def recognize_file(self, path):
12 | r = sr.Recognizer()
13 | with sr.AudioFile(path) as source:
14 | audio = r.record(source)
15 |
16 | try:
17 | res = r.recognize_sphinx(audio)
18 | except sr.UnknownValueError:
19 | res = _("Could not understand the audio")
20 | except Exception as e:
21 | print(e)
22 | return None
23 | return res
24 |
25 |
--------------------------------------------------------------------------------
/data/icons/zoom-out-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/src/handlers/tts/gtts_handler.py:
--------------------------------------------------------------------------------
1 | from gtts import gTTS, lang
2 |
3 | from .tts import TTSHandler
4 |
5 | class gTTSHandler(TTSHandler):
6 | key = "gtts"
7 |
8 | def get_voices(self):
9 | if len(self.voices) > 0:
10 | return self.voices
11 | x = lang.tts_langs()
12 | res = tuple()
13 | for l in x:
14 | t = (x[l], l)
15 | res += (t,)
16 | self.voices = res
17 | return res
18 |
19 | def save_audio(self, message, file):
20 | voice = self.get_current_voice()
21 | if not self.voice_available(voice):
22 | voice = self.get_voices()[0][1]
23 | tts = gTTS(message, lang=voice)
24 | tts.save(file)
25 |
26 |
--------------------------------------------------------------------------------
/data/icons/check-plain-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/src/handlers/llm/deepseek_handler.py:
--------------------------------------------------------------------------------
1 | from .openai_handler import OpenAIHandler
2 |
3 | class DeepseekHandler(OpenAIHandler):
4 | key = "deepseek"
5 | default_models = (("deepseek-chat", "deepseek-chat"),("deepseek-reasoner", "deepseek-reasoner") )
6 | def __init__(self, settings, path):
7 | super().__init__(settings, path)
8 | self.set_setting("endpoint", "https://api.deepseek.com")
9 | self.set_setting("advanced_params", False)
10 |
11 | def supports_vision(self) -> bool:
12 | return False
13 |
14 | def get_extra_settings(self) -> list:
15 | return self.build_extra_settings("Deepseek", True, True, False, True, True, None, "https://api-docs.deepseek.com/quick_start/pricing", False, True)
16 |
17 |
--------------------------------------------------------------------------------
/data/icons/zoom-in-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/src/handlers/stt/__init__.py:
--------------------------------------------------------------------------------
1 | from .stt import STTHandler
2 | from .custom_handler import CustomSRHandler
3 | from .sphinx_handler import SphinxHandler
4 | from .witai_handler import WitAIHandler
5 | from .googlesr_handler import GoogleSRHandler
6 | from .vosk_handler import VoskHandler
7 | from .whisper_handler import WhisperHandler
8 | from .groqsr_handler import GroqSRHandler
9 | from .openaisr_handler import OpenAISRHandler
10 | from .whispercpp_handler import WhisperCPPHandler
11 |
12 | __all__ = [
13 | "STTHandler",
14 | "CustomSRHandler",
15 | "SphinxHandler",
16 | "WitAIHandler",
17 | "GoogleSRHandler",
18 | "VoskHandler",
19 | "WhisperHandler",
20 | "GroqSRHandler",
21 | "OpenAISRHandler",
22 | "WhisperCPPHandler"
23 | ]
24 |
--------------------------------------------------------------------------------
/data/icons/magic-wand-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/data/icons/controls-big-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/modules/git.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "git",
3 | "make-args": [
4 | "INSTALL_SYMLINKS=1"
5 | ],
6 | "make-install-args": [
7 | "INSTALL_SYMLINKS=1"
8 | ],
9 | "cleanup": [
10 | "/lib/pkgconfig",
11 | "/man"
12 | ],
13 | "sources": [
14 | {
15 | "type": "archive",
16 | "url": "https://www.kernel.org/pub/software/scm/git/git-2.49.1.tar.gz",
17 | "sha256": "84a8383ffc77146133bc128a544450cf8ce5166cbea5056c98033d2f0c454794",
18 | "x-checker-data": {
19 | "type": "anitya",
20 | "project-id": 5350,
21 | "stable-only": true,
22 | "url-template": "https://www.kernel.org/pub/software/scm/git/git-$version.tar.xz"
23 | }
24 | }
25 | ]
26 | }
27 |
--------------------------------------------------------------------------------
/src/handlers/memory/memory_handler.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 |
3 | from ...handlers.embeddings.embedding import EmbeddingHandler
4 | from ...handlers.llm.llm import LLMHandler
5 | from ...handlers import Handler
6 |
7 | class MemoryHandler(Handler):
8 |
9 | key = ""
10 | schema_key = "memory-settings"
11 | memory_size = 0
12 |
13 | def set_memory_size(self, length: int):
14 | self.memory_size = length
15 |
16 | def set_handlers(self, llm: LLMHandler, embedding: EmbeddingHandler):
17 | self.llm = llm
18 | self.embedding = embedding
19 |
20 |
21 | @abstractmethod
22 | def get_context(self, prompt:str, history: list[dict[str, str]]) -> list[str]:
23 | return []
24 |
25 | @abstractmethod
26 | def register_response(self, bot_response:str, history:list[dict[str, str]]):
27 | pass
28 |
29 | @abstractmethod
30 | def reset_memory(self):
31 | pass
32 |
--------------------------------------------------------------------------------
/data/icons/hicolor/symbolic/apps/io.github.qwersyk.Newelle-symbolic.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/modules/python3-lxml_html_clean.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-lxml_html_clean",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"lxml_html_clean\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/80/61/d3dc048cd6c7be6fe45b80cedcbdd4326ba4d550375f266d9f4246d0f4bc/lxml-5.3.2.tar.gz",
11 | "sha256": "773947d0ed809ddad824b7b14467e1a481b8976e87278ac4a730c2f7c7fcddc1"
12 | },
13 | {
14 | "type": "file",
15 | "url": "https://files.pythonhosted.org/packages/4e/0b/942cb7278d6caad79343ad2ddd636ed204a47909b969d19114a3097f5aa3/lxml_html_clean-0.4.2-py3-none-any.whl",
16 | "sha256": "74ccfba277adcfea87a1e9294f47dd86b05d65b4da7c5b07966e3d5f3be8a505"
17 | }
18 | ]
19 | }
--------------------------------------------------------------------------------
/modules/python3-gpt4all.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-gpt4all",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"gpt4all\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/93/60/ef1efd83c5757a69e8ed842b8a12b091e94a9bb2ceacc8b6e1c2e6487a77/gpt4all-2.8.2-py3-none-manylinux1_x86_64.whl",
11 | "sha256": "c61e977afa72475c076211fd9b59a88334fc0062615be8c2f7716363a21fbafa"
12 | },
13 | {
14 | "type": "file",
15 | "url": "https://files.pythonhosted.org/packages/18/eb/fdb7eb9e48b7b02554e1664afd3bd3f117f6b6d6c5881438a0b055554f9b/tqdm-4.66.4-py3-none-any.whl",
16 | "sha256": "b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/src/handlers/embeddings/embedding.py:
--------------------------------------------------------------------------------
1 | from ..handler import Handler
2 | from abc import abstractmethod
3 | from numpy import ndarray
4 |
5 | class EmbeddingHandler(Handler):
6 | key = ""
7 | schema_key = "embedding-settings"
8 |
9 |
10 | def __init__(self, settings, path):
11 | super().__init__(settings, path)
12 | self.dim = None
13 |
14 | def load_model(self):
15 | """Load embedding model, called at every settings reload"""
16 | pass
17 |
18 | @abstractmethod
19 | def get_embedding(self, text: list[str]) -> ndarray:
20 | """
21 | Get the embedding for the given text
22 |
23 | Args:
24 | text: text to embed
25 |
26 | Returns:
27 | ndarray: embedding
28 | """
29 | pass
30 |
31 | def get_embedding_size(self) -> int:
32 | if self.dim is None:
33 | self.dim = self.get_embedding(["test"]).shape[1]
34 | return self.dim
35 |
--------------------------------------------------------------------------------
/src/handlers/stt/witai_handler.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from .stt import STTHandler
3 |
4 | class WitAIHandler(STTHandler):
5 |
6 | key = "witai"
7 |
8 | def get_extra_settings(self) -> list:
9 | return [
10 | {
11 | "key": "api",
12 | "title": _("API Key"),
13 | "description": _("Server Access Token for wit.ai"),
14 | "type": "entry",
15 | "default": "",
16 | "password": True,
17 | },
18 | ]
19 |
20 | def recognize_file(self, path):
21 | r = sr.Recognizer()
22 | with sr.AudioFile(path) as source:
23 | audio = r.record(source)
24 | key = self.get_setting("api")
25 | try:
26 | res = r.recognize_wit(audio, key=key)
27 | except sr.UnknownValueError:
28 | return None
29 | except Exception as e:
30 | print(e)
31 | return None
32 | return res
33 |
34 |
--------------------------------------------------------------------------------
/src/ui/widgets/__init__.py:
--------------------------------------------------------------------------------
1 | from .profilerow import ProfileRow
2 | from .multiline import MultilineEntry
3 | from .barchart import BarChartBox
4 | from .comborow import ComboRowHelper
5 | from .copybox import CopyBox
6 | from .file import File
7 | from .latex import DisplayLatex, LatexCanvas, InlineLatex
8 | from .markuptextview import MarkupTextView
9 | from .website import WebsiteButton
10 | from .websearch import WebSearchWidget
11 | from .thinking import ThinkingWidget
12 | from .documents_reader import DocumentReaderWidget
13 | from .tipscarousel import TipsCarousel
14 | from .browser import BrowserWidget
15 | from .terminal_dialog import Terminal, TerminalDialog
16 | from .code_editor import CodeEditorWidget
17 |
18 | __all__ = ["ProfileRow", "MultilineEntry", "BarChartBox", "ComboRowHelper", "CopyBox", "File", "DisplayLatex", "LatexCanvas", "MarkupTextView", "InlineLatex", "WebsiteButton", "WebSearchWidget", "ThinkingWidget", "DocumentReaderWidget", "TipsCarousel", "BrowserWidget", "Terminal", "TerminalDialog", "CodeEditorWidget"]
19 |
--------------------------------------------------------------------------------
/data/icons/warning-outline-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/src/handlers/llm/__init__.py:
--------------------------------------------------------------------------------
1 | from .llm import LLMHandler
2 | from .claude_handler import ClaudeHandler
3 | from .custom_handler import CustomLLMHandler
4 | from .g4f_handler import G4FHandler
5 | from .gemini_handler import GeminiHandler
6 | from .gpt3any_handler import GPT3AnyHandler
7 | from .groq_handler import GroqHandler
8 | from .gpt4all_handler import GPT4AllHandler
9 | from .mistral_handler import MistralHandler
10 | from .ollama_handler import OllamaHandler
11 | from .openai_handler import OpenAIHandler
12 | from .openrouter_handler import OpenRouterHandler
13 | from .newelle_handler import NewelleAPIHandler
14 | from .deepseek_handler import DeepseekHandler
15 |
16 | __all__ = [
17 | "LLMHandler",
18 | "ClaudeHandler",
19 | "CustomLLMHandler",
20 | "G4FHandler",
21 | "GeminiHandler",
22 | "GPT3AnyHandler",
23 | "GPT4AllHandler",
24 | "GroqHandler",
25 | "MistralHandler",
26 | "OllamaHandler",
27 | "OpenAIHandler",
28 | "OpenRouterHandler",
29 | "NewelleAPIHandler",
30 | "DeepseekHandler",
31 | ]
32 |
--------------------------------------------------------------------------------
/src/handlers/stt/custom_handler.py:
--------------------------------------------------------------------------------
1 | from subprocess import check_output
2 | from .stt import STTHandler
3 | from ...utility.system import get_spawn_command
4 |
5 | class CustomSRHandler(STTHandler):
6 |
7 | key = "custom_command"
8 |
9 | def get_extra_settings(self) -> list:
10 | return [
11 | {
12 | "key": "command",
13 | "title": _("Command to execute"),
14 | "description": _("{0} will be replaced with the model fullpath"),
15 | "type": "entry",
16 | "default": ""
17 | },
18 | ]
19 |
20 | @staticmethod
21 | def requires_sandbox_escape() -> bool:
22 | """If the handler requires to run commands on the user host system"""
23 | return True
24 |
25 | def recognize_file(self, path):
26 | command = self.get_setting("command")
27 | if command is not None:
28 | res = check_output(get_spawn_command() + ["bash", "-c", command.replace("{0}", path)]).decode("utf-8")
29 | return str(res)
30 | return None
31 |
32 |
--------------------------------------------------------------------------------
/src/ui/widgets/barchart.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, Pango
2 |
3 | class BarChartBox(Gtk.Box):
4 | def __init__(self, data_dict,percentages):
5 | Gtk.Box.__init__(self,orientation=Gtk.Orientation.VERTICAL, margin_top=10, margin_start=10,
6 | margin_bottom=10, margin_end=10, css_classes=["card","chart"])
7 |
8 | self.data_dict = data_dict
9 | max_value = max(self.data_dict.values())
10 | if percentages and max_value<=100:
11 | max_value = 100
12 | for label, value in self.data_dict.items():
13 | bar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,margin_top=10, margin_start=10,
14 | margin_bottom=10, margin_end=10)
15 |
16 | bar = Gtk.ProgressBar()
17 | bar.set_fraction(value / max_value)
18 |
19 | label = Gtk.Label(label=label,wrap=True, wrap_mode=Pango.WrapMode.WORD_CHAR)
20 | label.set_halign(Gtk.Align.CENTER)
21 | bar_box.append(label)
22 | bar_box.append(bar)
23 | self.append(bar_box)
24 |
25 |
--------------------------------------------------------------------------------
/src/handlers/tts/custom_handler.py:
--------------------------------------------------------------------------------
1 | from subprocess import check_output
2 | from ...handlers import ExtraSettings
3 | from .tts import TTSHandler
4 | from ...utility.system import get_spawn_command
5 |
6 |
7 | class CustomTTSHandler(TTSHandler):
8 | key = "custom_command"
9 |
10 | @staticmethod
11 | def requires_sandbox_escape() -> bool:
12 | """If the handler requires to run commands on the user host system"""
13 | return True
14 |
15 | def get_extra_settings(self) -> list:
16 | return [
17 | ExtraSettings.EntrySetting("command", _("Command to execute"), _("{0} will be replaced with the file fullpath, {1} with the text"), "")
18 | ]
19 |
20 | def is_installed(self):
21 | return True
22 |
23 | def save_audio(self, message, file):
24 | command = self.get_setting("command")
25 | if command is not None:
26 | print(["bash", "-c", command.replace("{0}", file).replace("{1}", message)])
27 | check_output(get_spawn_command() + ["bash", "-c", command.replace("{0}", file).replace("{1}", message)])
28 | return
29 |
--------------------------------------------------------------------------------
/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "Newelle";
3 |
4 | inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
5 |
6 | outputs = { self, nixpkgs, }:
7 | let
8 | overlay = import ./overlay.nix;
9 | supportedSystems = [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
10 | forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
11 | nixpkgsFor = forAllSystems (system: import nixpkgs { inherit system; config.allowUnfree = true; overlays = [ overlay ]; });
12 | in {
13 | packages = forAllSystems (system:
14 | let
15 | pkgs = nixpkgsFor.${system};
16 | in
17 | {
18 | newelle = pkgs.callPackage ./package.nix { };
19 | });
20 |
21 | defaultPackage = forAllSystems (system: self.packages.${system}.newelle);
22 |
23 | devShells = forAllSystems (system:
24 | let
25 | pkgs = nixpkgsFor.${system};
26 | in
27 | {
28 | default = pkgs.mkShell {
29 | buildInputs = [
30 | self.packages.${system}.newelle
31 | ];
32 | };
33 | });
34 | };
35 | }
36 |
--------------------------------------------------------------------------------
/src/handlers/websearch/websearch.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | from collections.abc import Callable
3 | from ...handlers import Handler
4 |
5 |
6 | class WebSearchHandler(Handler):
7 | schema_key = "websearch-settings"
8 |
9 | @abstractmethod
10 | def query(self, keywords: str) -> tuple[str, list]:
11 | """Return the result for a query and the sources
12 |
13 | Args:
14 | keywords: the query
15 |
16 | Returns:
17 | - str: the text to send to the LLM
18 | - list: the list of sources (URL)
19 | """
20 | return "", []
21 |
22 | def supports_streaming_query(self) -> bool:
23 | return False
24 |
25 | @abstractmethod
26 | def query_streaming(self,keywords: str, add_website: Callable) -> tuple[str, list]:
27 | """Return the result for a query in streaming mode
28 |
29 | Args:
30 | keywords: the query
31 | add_website: the function to add a website, takes (title, link, favicon_path)
32 |
33 | Returns:
34 | - str: the text to send to the LLM
35 | - list: the list of sources (URL)
36 | """
37 | return "", []
38 |
--------------------------------------------------------------------------------
/src/handlers/stt/vosk_handler.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | import json
3 | from .stt import STTHandler
4 |
5 |
6 | class VoskHandler(STTHandler):
7 | key = "vosk"
8 |
9 | @staticmethod
10 | def get_extra_requirements() -> list:
11 | return ["vosk"]
12 |
13 | def get_extra_settings(self) -> list:
14 | return [
15 | {
16 | "key": "path",
17 | "title": _("Model Path"),
18 | "description": _("Absolute path to the VOSK model (unzipped)"),
19 | "type": "entry",
20 | "website": "https://alphacephei.com/vosk/models",
21 | "default": ""
22 | },
23 | ]
24 |
25 | def recognize_file(self, path):
26 | from vosk import Model
27 | r = sr.Recognizer()
28 | with sr.AudioFile(path) as source:
29 | audio = r.record(source)
30 | path = self.get_setting("path")
31 | r.vosk_model = Model(path)
32 | try:
33 | res = json.loads(r.recognize_vosk(audio))["text"]
34 | except sr.UnknownValueError:
35 | return None
36 | except Exception as e:
37 | print(e)
38 | return None
39 | return res
40 |
41 |
--------------------------------------------------------------------------------
/data/icons/search-folder-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/data/icons/question-round-outline-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/.github/workflows/build-locales.yml:
--------------------------------------------------------------------------------
1 | name: Rebuild Locales
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | jobs:
7 | run-script:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Checkout code
11 | uses: actions/checkout@v3
12 | with:
13 | ref: ${{ github.head_ref }}
14 | fetch-depth: 0
15 |
16 | - name: Rebuild locales
17 | run: |
18 | sudo apt update
19 | sudo apt install gettext
20 | (
21 | find src -type f -name "*.py" -exec grep -lE '_\(|N\(_' {} \;
22 | find data -type f \( -name "*.xml" -o -name "*.in" \)
23 | ) >po/POTFILES
24 | xgettext -o po/newelle.pot $(cat po/POTFILES)
25 | cd po
26 | for file in $(ls *.po); do
27 | msgmerge -U "$file" newelle.pot
28 | done
29 | rm -f *~
30 | cd ..
31 |
32 | - name: Commit changes
33 | run: |
34 | if [ -n "$(git status --porcelain)" ]; then
35 | git config --local user.name "github-actions[bot]"
36 | git config --local user.email "github-actions[bot]@users.noreply.github.com"
37 | git add .
38 | git commit -m "Automated changes by github actions"
39 | git push origin ${GITHUB_REF##*/}
40 | fi
41 |
--------------------------------------------------------------------------------
/data/icons/update-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/src/handlers/embeddings/wordllama_handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .embedding import EmbeddingHandler
3 | from ...handlers import ExtraSettings
4 |
5 | class WordLlamaHandler(EmbeddingHandler):
6 | key = "wordllama"
7 |
8 |
9 | def __init__(self, settings, path):
10 | super().__init__(settings, path)
11 | self.wl = None
12 |
13 | @staticmethod
14 | def get_extra_requirements() -> list:
15 | return ["wordllama"]
16 |
17 | def get_extra_settings(self) -> list:
18 | return [
19 | ExtraSettings.ComboSetting("model_size", "Model Size", "Size of the embedding", ["128", "256", "512", "1024"],"512"),
20 | ]
21 |
22 | def load_model(self):
23 | if not self.is_installed():
24 | return
25 | if self.wl is not None:
26 | return
27 | from wordllama import WordLlama
28 | size = self.get_embedding_size()
29 | if (size >= 256):
30 | self.wl = WordLlama.load(dim=size)
31 | else:
32 | self.wl = WordLlama.load(truncate_dim=size)
33 |
34 | def get_embedding(self, text: list[str]) -> np.ndarray:
35 | if self.wl is not None:
36 | return self.wl.embed(text)
37 | else:
38 | return np.array([])
39 |
40 | def get_embedding_size(self) -> int:
41 | return int(self.get_setting("model_size"))
42 |
--------------------------------------------------------------------------------
/data/meson.build:
--------------------------------------------------------------------------------
1 | desktop_file = i18n.merge_file(
2 | input: 'io.github.qwersyk.Newelle.desktop.in',
3 | output: 'io.github.qwersyk.Newelle.desktop',
4 | type: 'desktop',
5 | po_dir: '../po',
6 | install: true,
7 | install_dir: join_paths(get_option('datadir'), 'applications')
8 | )
9 |
10 | desktop_utils = find_program('desktop-file-validate', required: false)
11 | if desktop_utils.found()
12 | test('Validate desktop file', desktop_utils, args: [desktop_file])
13 | endif
14 |
15 | appstream_file = i18n.merge_file(
16 | input: 'io.github.qwersyk.Newelle.appdata.xml.in',
17 | output: 'io.github.qwersyk.Newelle.appdata.xml',
18 | po_dir: '../po',
19 | install: true,
20 | install_dir: join_paths(get_option('datadir'), 'appdata')
21 | )
22 |
23 | appstream_util = find_program('appstream-util', required: false)
24 | if appstream_util.found()
25 | test('Validate appstream file', appstream_util, args: ['validate', appstream_file])
26 | endif
27 |
28 | install_data('io.github.qwersyk.Newelle.gschema.xml',
29 | install_dir: join_paths(get_option('datadir'), 'glib-2.0/schemas')
30 | )
31 |
32 | compile_schemas = find_program('glib-compile-schemas', required: false)
33 | if compile_schemas.found()
34 | test('Validate schema file',
35 | compile_schemas,
36 | args: ['--strict', '--dry-run', meson.current_source_dir()])
37 | endif
38 | subdir('icons')
39 |
--------------------------------------------------------------------------------
/src/handlers/llm/groq_handler.py:
--------------------------------------------------------------------------------
1 | from .openai_handler import OpenAIHandler
2 |
3 |
4 | class GroqHandler(OpenAIHandler):
5 | key = "groq"
6 | default_models = (("llama-3.3-70B-versatile", "llama-3.3-70B-versatile" ), )
7 |
8 | def supports_vision(self) -> bool:
9 | return any(x in self.get_setting("model") for x in ["llama-4", "vision"])
10 |
11 | def __init__(self, settings, path):
12 | super().__init__(settings, path)
13 | self.set_setting("endpoint", "https://api.groq.com/openai/v1/")
14 |
15 | def get_extra_settings(self) -> list:
16 | return self.build_extra_settings("Groq", True, True, False, False, True, "https://groq.com/privacy-policy/", "https://console.groq.com/docs/models", False, True)
17 |
18 | def convert_history(self, history: list, prompts: list | None = None) -> list:
19 | # Remove system prompt if history contains image prompt
20 | # since it is not supported by groq
21 | h = super().convert_history(history, prompts)
22 | contains_image = False
23 | for message in h:
24 | if type(message["content"]) is list:
25 | if any(content["type"] == "image_url" for content in message["content"]):
26 | contains_image = True
27 | break
28 | if contains_image and (prompts is None or len(prompts) > 0):
29 | h.pop(0)
30 | return h
31 |
32 |
--------------------------------------------------------------------------------
/src/handlers/stt/groqsr_handler.py:
--------------------------------------------------------------------------------
1 | from .openaisr_handler import OpenAISRHandler
2 |
3 | class GroqSRHandler(OpenAISRHandler):
4 | key = "groq_sr"
5 |
6 | def __init__(self, settings, path):
7 | super().__init__(settings, path)
8 | self.set_setting("endpoint", "https://api.groq.com/openai/v1/")
9 | def get_extra_settings(self) -> list:
10 | return [
11 | {
12 | "key": "api",
13 | "title": _("API Key"),
14 | "description": _("API Key for Groq SR, write 'default' to use the default one"),
15 | "type": "entry",
16 | "default": "default",
17 | "password": True,
18 | },
19 | {
20 | "key": "model",
21 | "title": _("Groq Model"),
22 | "description": _("Name of the Groq Model"),
23 | "type": "entry",
24 | "default": "whisper-large-v3-turbo",
25 | "website": "https://console.groq.com/docs/models",
26 | },
27 | {
28 | "key": "language",
29 | "title": _("Language"),
30 | "description": _("Specify the language for transcription. Use ISO 639-1 language codes (e.g. \"en\" for English, \"fr\" for French, etc.). "),
31 | "type": "entry",
32 | "default": "",
33 | }
34 | ]
35 |
--------------------------------------------------------------------------------
/data/icons/view-show-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
12 |
--------------------------------------------------------------------------------
/src/newelle.in:
--------------------------------------------------------------------------------
1 | #!@PYTHON@
2 |
3 | # newelle.in
4 | #
5 | # Copyright 2023 Yehor Qwersyk
6 | #
7 | # This program is free software: you can redistribute it and/or modify
8 | # it under the terms of the GNU General Public License as published by
9 | # the Free Software Foundation, either version 3 of the License, or
10 | # (at your option) any later version.
11 | #
12 | # This program is distributed in the hope that it will be useful,
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | # GNU General Public License for more details.
16 | #
17 | # You should have received a copy of the GNU General Public License
18 | # along with this program. If not, see .
19 | #
20 | # SPDX-License-Identifier: GPL-3.0-or-later
21 |
22 | import os
23 | import sys
24 | import signal
25 | import locale
26 | import gettext
27 |
28 | VERSION = '@VERSION@'
29 | pkgdatadir = '@pkgdatadir@'
30 | localedir = '@localedir@'
31 |
32 | sys.path.insert(1, pkgdatadir)
33 | signal.signal(signal.SIGINT, signal.SIG_DFL)
34 | locale.bindtextdomain('newelle', localedir)
35 | locale.textdomain('newelle')
36 | gettext.install('newelle', localedir)
37 |
38 | if __name__ == '__main__':
39 | import gi
40 |
41 | from gi.repository import Gio
42 | resource = Gio.Resource.load(os.path.join(pkgdatadir, 'newelle.gresource'))
43 | resource._register()
44 |
45 | from newelle import main
46 | sys.exit(main.main(VERSION))
47 |
--------------------------------------------------------------------------------
/data/icons/gnome-terminal-symbolic.svg:
--------------------------------------------------------------------------------
1 |
8 |
--------------------------------------------------------------------------------
/src/ui/shortcuts.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, Adw
2 |
3 |
4 | class Shortcuts(Gtk.Window):
5 | def __init__(self,app, *args, **kwargs):
6 | super().__init__(*args, **kwargs,title=_('Help'))
7 | self.set_transient_for(app.win)
8 | self.set_modal(True)
9 | self.set_titlebar(Adw.HeaderBar(css_classes=["flat"]))
10 |
11 | sect_main = Gtk.Box(margin_top=10,margin_start=10,margin_bottom=10,margin_end=10,valign=Gtk.Align.START,halign=Gtk.Align.CENTER)
12 | gr = Gtk.ShortcutsGroup(title=_("Shortcuts"))
13 | gr.append(Gtk.ShortcutsShortcut(title=_("Reload chat"), accelerator='r'))
14 | gr.append(Gtk.ShortcutsShortcut(title=_("Reload folder"), accelerator='r'))
15 | gr.append(Gtk.ShortcutsShortcut(title=_("New tab"), accelerator='t'))
16 | gr.append(Gtk.ShortcutsShortcut(title=_("Paste Image"), accelerator='v'))
17 | gr.append(Gtk.ShortcutsShortcut(title=_("Focus message box"), accelerator='l'))
18 | gr.append(Gtk.ShortcutsShortcut(title=_("Start/stop recording"), accelerator='g'))
19 | gr.append(Gtk.ShortcutsShortcut(title=_("Save"), accelerator='s'))
20 | gr.append(Gtk.ShortcutsShortcut(title=_("Stop TTS"), accelerator='k'))
21 | gr.append(Gtk.ShortcutsShortcut(title=_("Zoom in"), accelerator='plus'))
22 | gr.append(Gtk.ShortcutsShortcut(title=_("Zoom out"), accelerator='minus'))
23 |
24 | sect_main.append(gr)
25 | self.set_child(sect_main)
26 |
--------------------------------------------------------------------------------
/modules/python3-markdownify.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-markdownify",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"markdownify\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl",
11 | "sha256": "9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"
12 | },
13 | {
14 | "type": "file",
15 | "url": "https://files.pythonhosted.org/packages/64/11/b751af7ad41b254a802cf52f7bc1fca7cabe2388132f2ce60a1a6b9b9622/markdownify-1.1.0-py3-none-any.whl",
16 | "sha256": "32a5a08e9af02c8a6528942224c91b933b4bd2c7d078f9012943776fc313eeef"
17 | },
18 | {
19 | "type": "file",
20 | "url": "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl",
21 | "sha256": "6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"
22 | },
23 | {
24 | "type": "file",
25 | "url": "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl",
26 | "sha256": "a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"
27 | }
28 | ]
29 | }
--------------------------------------------------------------------------------
/src/handlers/stt/googlesr_handler.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from .stt import STTHandler
3 |
4 | class GoogleSRHandler(STTHandler):
5 |
6 | key = "google_sr"
7 |
8 | def get_extra_settings(self) -> list:
9 | return [
10 |
11 | {
12 | "key": "api",
13 | "title": _("API Key"),
14 | "description": _("API Key for Google SR, write 'default' to use the default one"),
15 | "type": "entry",
16 | "default": "default",
17 | "password": True,
18 | },
19 | {
20 | "key": "language",
21 | "title": _("Language"),
22 | "description": _("The language of the text to recgnize in IETF"),
23 | "type": "entry",
24 | "default": "en-US",
25 | "website": "https://stackoverflow.com/questions/14257598/what-are-language-codes-in-chromes-implementation-of-the-html5-speech-recogniti"
26 | }
27 | ]
28 |
29 | def recognize_file(self, path):
30 | r = sr.Recognizer()
31 | with sr.AudioFile(path) as source:
32 | audio = r.record(source)
33 | key = self.get_setting("api")
34 | language = self.get_setting("language")
35 | try:
36 | if key == "default":
37 | res = r.recognize_google(audio, language=language)
38 | else:
39 | res = r.recognize_google(audio, key=key, language=language)
40 | except sr.UnknownValueError:
41 | return None
42 | except Exception as e:
43 | print(e)
44 | return None
45 | return res
46 |
47 |
--------------------------------------------------------------------------------
/overlay.nix:
--------------------------------------------------------------------------------
1 | (self0: super0:
2 | let
3 | myOverride = {
4 | packageOverrides = self: super: {
5 |
6 | "pip-install-test" = super.buildPythonPackage rec {
7 | pname = "pip-install-test";
8 | version = "0.5";
9 | src = super0.fetchurl {
10 | url = "https://files.pythonhosted.org/packages/15/8e/4fbc92846184e1080af77da38d55928a5486e0bc5e2ec8342c7db378d7f1/pip_install_test-0.5-py3-none-any.whl";
11 | sha256 =
12 | "623887f5ce0b4695ec3c0503aa4f394253a403e2bb952417b3a778f0802dbe0b";
13 | };
14 | format = "wheel";
15 | doCheck = false;
16 | buildInputs = [ ];
17 | checkInputs = [ ];
18 | nativeBuildInputs = [ ];
19 | propagatedBuildInputs = [
20 | ];
21 | };
22 |
23 | "newspaper3k" = super.buildPythonPackage rec {
24 | pname = "newspaper3k";
25 | version = "0.2.8";
26 | src = super0.fetchurl {
27 | url =
28 | "https://files.pythonhosted.org/packages/d7/b9/51afecb35bb61b188a4b44868001de348a0e8134b4dfa00ffc191567c4b9/newspaper3k-0.2.8-py3-none-any.whl";
29 | sha256 =
30 | "44a864222633d3081113d1030615991c3dbba87239f6bbf59d91240f71a22e3e";
31 | };
32 | format = "wheel";
33 | doCheck = false;
34 | buildInputs = [ ];
35 | checkInputs = [ ];
36 | nativeBuildInputs = [ ];
37 | propagatedBuildInputs = [
38 | super.feedparser
39 | super.tldextract
40 | ];
41 | };
42 |
43 | };
44 | };
45 | in {
46 | python3 = super0.python3.override myOverride;
47 | }
48 | )
49 |
--------------------------------------------------------------------------------
/src/handlers/tts/groq_tts_handler.py:
--------------------------------------------------------------------------------
1 | from .custom_openai_tts import CustomOpenAITTSHandler
2 | from ...handlers import ExtraSettings
3 |
4 | class GroqTTSHandler(CustomOpenAITTSHandler):
5 |
6 | key = "groq_tts"
7 |
8 | def __init__(self, a, b) -> None:
9 | super().__init__(a, b)
10 | self.set_setting("endpoint", "https://api.groq.com/openai/v1/")
11 | self.set_setting("instructions", "")
12 |
13 | def get_models(self):
14 | models = ["playai-tts", "playai-tts-arabic"]
15 | m = tuple()
16 | for model in models:
17 | m += ((model, model),)
18 | return m
19 |
20 | def get_voices(self):
21 | if self.get_setting("model", False, "playai-tts") == "playai-tts":
22 | voices = "Arista-PlayAI, Atlas-PlayAI, Basil-PlayAI, Briggs-PlayAI, Calum-PlayAI, Celeste-PlayAI, Cheyenne-PlayAI, Chip-PlayAI, Cillian-PlayAI, Deedee-PlayAI, Fritz-PlayAI, Gail-PlayAI, Indigo-PlayAI, Mamaw-PlayAI, Mason-PlayAI, Mikail-PlayAI, Mitch-PlayAI, Quinn-PlayAI, Thunder-PlayAI".split(", ")
23 | else:
24 | voices = "Ahmad-PlayAI, Amira-PlayAI, Khalid-PlayAI, Nasser-PlayAI".split(", ")
25 | v = tuple()
26 | for voice in voices:
27 | v += ((voice.capitalize(), voice),)
28 | return v
29 |
30 | def get_extra_settings(self) -> list:
31 | return [
32 | ExtraSettings.EntrySetting("api_key", _("API Key"), _("The API key to use"), "", password=True),
33 | ExtraSettings.ComboSetting("voice", _("Voice"), _("The voice to use"), self.get_voices(), "Arista-PlayAI"),
34 | ExtraSettings.ComboSetting("model", _("Model"), _("The model to use"), self.get_models(), "playai-tts", update_settings=True),
35 | ]
36 |
--------------------------------------------------------------------------------
/src/utility/system.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 |
4 | def is_wayland() -> bool:
5 | """
6 | Check if we are in a Wayland environment
7 |
8 | Returns:
9 | bool: True if we are in a Wayland environment
10 | """
11 | if os.getenv("WAYLAND_DISPLAY"):
12 | return True
13 | return False
14 |
15 | def is_flatpak() -> bool:
16 | """
17 | Check if we are in a flatpak
18 |
19 | Returns:
20 | bool: True if we are in a flatpak
21 | """
22 | if os.getenv("container"):
23 | return True
24 | return False
25 |
26 | def can_escape_sandbox() -> bool:
27 | """
28 | Check if we can escape the sandbox
29 |
30 | Returns:
31 | bool: True if we can escape the sandbox
32 | """
33 | if not is_flatpak():
34 | return True
35 | try:
36 | r = subprocess.check_output(["flatpak-spawn", "--host", "echo", "test"])
37 | except subprocess.CalledProcessError as _:
38 | return False
39 | return True
40 |
41 | def get_spawn_command() -> list:
42 | """
43 | Get the spawn command to run commands on the user system
44 |
45 | Returns:
46 | list: space diveded command
47 | """
48 | if is_flatpak():
49 | return ["flatpak-spawn", "--host"]
50 | else:
51 | return []
52 |
53 | def open_website(website):
54 | """Opens a website using xdg-open
55 |
56 | Args:
57 | website (): url of the website
58 | """
59 | subprocess.Popen(get_spawn_command() + ["xdg-open", website])
60 |
61 | def open_folder(folder):
62 | """Opens a website using xdg-open
63 |
64 | Args:
65 | folder (): location of the folder
66 | """
67 | subprocess.Popen(get_spawn_command() + ["xdg-open", folder])
68 |
69 |
--------------------------------------------------------------------------------
/src/handlers/tts/espeak_handler.py:
--------------------------------------------------------------------------------
1 | from subprocess import check_output
2 |
3 | from .tts import TTSHandler
4 | from ...utility.system import can_escape_sandbox, get_spawn_command
5 |
6 | class EspeakHandler(TTSHandler):
7 |
8 | key = "espeak"
9 |
10 | @staticmethod
11 | def requires_sandbox_escape() -> bool:
12 | """If the handler requires to run commands on the user host system"""
13 | return True
14 |
15 | def get_voices(self):
16 | if len(self.voices) > 0:
17 | return self.voices
18 | if not self.is_installed():
19 | return self.voices
20 | output = check_output(get_spawn_command() + ["espeak", "--voices"]).decode("utf-8")
21 | # Extract the voice names from the output
22 | lines = output.strip().split("\n")[1:]
23 | voices = tuple()
24 | for line in lines:
25 | spl = line.split()
26 | voices += ((spl[3], spl[4]),)
27 | self.voices = voices
28 | return voices
29 |
30 | def play_audio(self, message):
31 | self._play_lock.acquire()
32 | check_output(get_spawn_command() + ["espeak", "-v" + str(self.get_current_voice()), message])
33 | self._play_lock.release()
34 |
35 | def save_audio(self, message, file):
36 | r = check_output(get_spawn_command() + ["espeak", "-f", "-v" + str(self.get_current_voice()), message, "--stdout"])
37 | f = open(file, "wb")
38 | f.write(r)
39 |
40 | def is_installed(self):
41 | if not can_escape_sandbox():
42 | return False
43 | output = check_output(get_spawn_command() + ["whereis", "espeak"]).decode("utf-8")
44 | paths = []
45 | if ":" in output:
46 | paths = output.split(":")[1].split()
47 | if len(paths) > 0:
48 | return True
49 | return False
50 |
51 |
--------------------------------------------------------------------------------
/src/handlers/llm/openrouter_handler.py:
--------------------------------------------------------------------------------
1 | from .openai_handler import OpenAIHandler
2 | from ...handlers import ExtraSettings
3 |
4 | class OpenRouterHandler(OpenAIHandler):
5 | key = "openrouter"
6 | default_models = (("meta-llama/llama-3.1-70b-instruct:free", "meta-llama/llama-3.1-70b-instruct:free"), )
7 | def __init__(self, settings, path):
8 | super().__init__(settings, path)
9 | self.set_setting("endpoint", "https://openrouter.ai/api/v1/")
10 |
11 | def get_extra_settings(self) -> list:
12 | r = self.build_extra_settings("OpenRouter", True, True, False, False, True, "https://openrouter.ai/privacy", "https://openrouter.ai/docs/models", False, True)
13 | r += [
14 | ExtraSettings.ComboSetting("sorting", _("Provider Sorting"), _("Choose providers based on pricing/throughput or latency"), ((_("Price"), "price"), (_("Throughput"), "throughput"),(_("Latency"), "latency")), "price"),
15 | ExtraSettings.EntrySetting("order", _("Providers Order"), _("Add order of providers to use, names separated by a comma.\nEmpty to not specify"), ""),
16 | ExtraSettings.ToggleSetting("fallback", _("Allow Fallbacks"), _("Allow fallbacks to other providers"), True),
17 | ]
18 | return r
19 |
20 | def get_extra_headers(self):
21 | return {
22 | "HTTP-Referer": "https://github.com/qwersyk/Newelle",
23 | "X-Title": "Newelle"
24 | }
25 |
26 | def get_extra_body(self):
27 | r = {}
28 | p = {}
29 | p["sort"] = self.get_setting("sorting")
30 | if self.get_setting("order") and self.get_setting("order") != "":
31 | p["order"] = self.get_setting("order").split(",")
32 | p["allow_fallbacks"] = self.get_setting("fallback")
33 | r["provider"] = p
34 | r["include_reasoning"] = True
35 | return r
36 |
--------------------------------------------------------------------------------
/data/icons/settings-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/modules/python3-requests.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-requests",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"requests\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl",
11 | "sha256": "c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"
12 | },
13 | {
14 | "type": "file",
15 | "url": "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz",
16 | "sha256": "f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"
17 | },
18 | {
19 | "type": "file",
20 | "url": "https://files.pythonhosted.org/packages/e5/3e/741d8c82801c347547f8a2a06aa57dbb1992be9e948df2ea0eda2c8b79e8/idna-3.7-py3-none-any.whl",
21 | "sha256": "82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"
22 | },
23 | {
24 | "type": "file",
25 | "url": "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl",
26 | "sha256": "70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"
27 | },
28 | {
29 | "type": "file",
30 | "url": "https://files.pythonhosted.org/packages/ca/1c/89ffc63a9605b583d5df2be791a27bc1a42b7c32bab68d3c8f2f73a98cd4/urllib3-2.2.2-py3-none-any.whl",
31 | "sha256": "a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"
32 | }
33 | ]
34 | }
--------------------------------------------------------------------------------
/src/ui/__init__.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, GdkPixbuf, GLib
2 | import requests
3 |
4 | def apply_css_to_widget(widget, css_string):
5 | provider = Gtk.CssProvider()
6 | context = widget.get_style_context()
7 |
8 | # Load the CSS from the string
9 | provider.load_from_data(css_string.encode())
10 |
11 | # Add the provider to the widget's style context
12 | context.add_provider(provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
13 |
14 | def load_image_with_callback(url, callback, error_callback=None):
15 | """
16 | Load an image from URL and call the callback with the pixbuf loader when complete.
17 |
18 | Args:
19 | url (str): The URL of the image to load
20 | callback (callable): Function to call when image is loaded successfully.
21 | Should accept (pixbuf_loader) as argument
22 | error_callback (callable, optional): Function to call on error.
23 | Should accept (exception) as argument
24 | """
25 | def _load_image():
26 | pixbuf_loader = GdkPixbuf.PixbufLoader()
27 | try:
28 | response = requests.get(url, stream=True)
29 | response.raise_for_status()
30 |
31 | for chunk in response.iter_content(chunk_size=1024):
32 | pixbuf_loader.write(chunk)
33 |
34 | pixbuf_loader.close()
35 |
36 | # Schedule callback on main thread
37 | GLib.idle_add(callback, pixbuf_loader)
38 |
39 | except Exception as e:
40 | print(f"Exception loading image: {e}")
41 | if error_callback:
42 | GLib.idle_add(error_callback, e)
43 |
44 | # Run the loading in a separate thread to avoid blocking the UI
45 | import threading
46 | thread = threading.Thread(target=_load_image)
47 | thread.daemon = True
48 | thread.start()
49 |
--------------------------------------------------------------------------------
/modules/python3-speechrecognition.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-speechrecognition",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"speechrecognition\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/38/53/946db57842a50b2da2e0c1e34bd37f36f5aadba1a929a3971c5d7841dbca/audioop_lts-0.2.2.tar.gz",
11 | "sha256": "64d0c62d88e67b98a1a5e71987b7aa7b5bcffc7dcee65b635823dbdd0a8dbbd0"
12 | },
13 | {
14 | "type": "file",
15 | "url": "https://files.pythonhosted.org/packages/aa/cd/4b5f5d04c8a4e25c376858d0ad28c325f079f17c82bf379185abf45e41bf/speechrecognition-3.14.3-py3-none-any.whl",
16 | "sha256": "1859fbb09ae23fa759200f5b0677307f1fb16e2c5c798f4259fcc41dd5399fe6"
17 | },
18 | {
19 | "type": "file",
20 | "url": "https://files.pythonhosted.org/packages/c3/52/5fbb203394cc852334d1575cc020f6bcec768d2265355984dfd361968f36/standard_aifc-3.13.0-py3-none-any.whl",
21 | "sha256": "f7ae09cc57de1224a0dd8e3eb8f73830be7c3d0bc485de4c1f82b4a7f645ac66"
22 | },
23 | {
24 | "type": "file",
25 | "url": "https://files.pythonhosted.org/packages/7a/90/a5c1084d87767d787a6caba615aa50dc587229646308d9420c960cb5e4c0/standard_chunk-3.13.0-py3-none-any.whl",
26 | "sha256": "17880a26c285189c644bd5bd8f8ed2bdb795d216e3293e6dbe55bbd848e2982c"
27 | },
28 | {
29 | "type": "file",
30 | "url": "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl",
31 | "sha256": "f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"
32 | }
33 | ]
34 | }
--------------------------------------------------------------------------------
/data/icons/vcard-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/src/handlers/stt/openaisr_handler.py:
--------------------------------------------------------------------------------
1 | from .stt import STTHandler
2 |
3 | class OpenAISRHandler(STTHandler):
4 | key = "openai_sr"
5 |
6 | def get_extra_settings(self) -> list:
7 | return [
8 | {
9 | "key": "endpoint",
10 | "title": _("API Endpoint"),
11 | "description": _("Endpoint for OpenAI requests"),
12 | "type": "entry",
13 | "default": "https://api.openai.com/v1/"
14 | },
15 | {
16 | "key": "api",
17 | "title": _("API Key"),
18 | "description": _("API Key for OpenAI"),
19 | "type": "entry",
20 | "default": "",
21 | "password": True,
22 | },
23 | {
24 | "key": "model",
25 | "title": _("Whisper Model"),
26 | "description": _("Name of the OpenAI model"),
27 | "type": "entry",
28 | "default": "whisper-1",
29 | },
30 | {
31 | "key": "language",
32 | "title": _("Language"),
33 | "description": _("Optional: Specify the language for transcription. Use ISO 639-1 language codes (e.g. \"en\" for English, \"fr\" for French, etc.). "),
34 | "type": "entry",
35 | "default": "",
36 | }
37 | ]
38 |
39 | def recognize_file(self, path) -> str | None:
40 | import openai
41 | key = self.get_setting("api")
42 | model = self.get_setting("model")
43 | language = str(self.get_setting("language"))
44 | if language == "":
45 | language = openai.NOT_GIVEN
46 | client = openai.Client(api_key=key, base_url=self.get_setting("endpoint"))
47 | with open(path, "rb") as audio_file:
48 | transcription = client.audio.transcriptions.create(
49 | file=(path, audio_file.read()),
50 | model=model,
51 | language=language
52 | )
53 | return transcription.text
54 |
--------------------------------------------------------------------------------
/data/icons/meson.build:
--------------------------------------------------------------------------------
1 | application_id = 'io.github.qwersyk.Newelle'
2 |
3 | scalable_dir = join_paths('hicolor', 'scalable', 'apps')
4 | install_data(
5 | join_paths(scalable_dir, ('@0@.svg').format(application_id)),
6 | install_dir: join_paths(get_option('datadir'), 'icons', scalable_dir)
7 | )
8 |
9 | symbolic_dir = join_paths('hicolor', 'symbolic', 'apps')
10 | install_data(
11 | join_paths(symbolic_dir, ('@0@-symbolic.svg').format(application_id)),
12 | install_dir: join_paths(get_option('datadir'), 'icons', symbolic_dir)
13 | )
14 |
15 | symbolic_dir = join_paths(get_option('datadir'), 'icons/hicolor/symbolic/apps')
16 | install_data (
17 | 'internet-symbolic.svg',
18 | 'view-show-symbolic.svg',
19 | 'plus-symbolic.svg',
20 | 'attach-symbolic.svg',
21 | 'circle-crossed-symbolic.svg',
22 | 'check-plain-symbolic.svg',
23 | 'user-trash-symbolic.svg',
24 | 'info-outline-symbolic.svg',
25 | 'document-edit-symbolic.svg',
26 | 'zoom-in-symbolic.svg',
27 | 'zoom-out-symbolic.svg',
28 | 'brain-augemnted-symbolic.svg',
29 | 'question-round-outline-symbolic.svg',
30 | 'controls-big-symbolic.svg',
31 | 'vcard-symbolic.svg',
32 | 'settings-symbolic.svg',
33 | 'sidebar-show-right-symbolic.svg',
34 | 'go-home-symbolic.svg',
35 | 'detach-symbolic.svg',
36 | 'sidebar-show-left-symbolic.svg',
37 | 'magic-wand-symbolic.svg',
38 | 'emblem-default-symbolic.svg',
39 | install_dir: symbolic_dir
40 | )
41 | install_data (
42 | 'warning-outline-symbolic.svg',
43 | install_dir: symbolic_dir
44 | )
45 | install_data (
46 | 'star-filled-rounded-symbolic.svg',
47 | install_dir: symbolic_dir
48 | )
49 | install_data (
50 | 'update-symbolic.svg',
51 | install_dir: symbolic_dir
52 | )
53 | install_data (
54 | 'search-folder-symbolic.svg',
55 | install_dir: symbolic_dir
56 | )
57 | install_data (
58 | 'right-large-symbolic.svg',
59 | install_dir: symbolic_dir
60 | )
61 | install_data (
62 | 'left-large-symbolic.svg',
63 | install_dir: symbolic_dir
64 | )
65 | install_data (
66 | 'gnome-terminal-symbolic.svg',
67 | install_dir: symbolic_dir
68 | )
69 |
--------------------------------------------------------------------------------
/src/handlers/stt/whisper_handler.py:
--------------------------------------------------------------------------------
1 | from .stt import STTHandler
2 | from ...utility.pip import find_module
3 | from ..handler import ErrorSeverity
4 |
5 | class WhisperHandler(STTHandler):
6 | key = "whisper"
7 |
8 | def __init__(self, settings, path):
9 | super().__init__(settings, path)
10 | self.model = None
11 | def get_extra_settings(self) -> list:
12 | return [
13 | {
14 | "key": "model",
15 | "title": _("Model"),
16 | "description": _("Name of the Whisper model"),
17 | "type": "combo",
18 | "values": self.get_models(),
19 | "default": "tiny",
20 | "website": "https://github.com/openai/whisper/blob/main/model-card.md#model-details",
21 | },
22 | ]
23 |
24 | def get_models(self):
25 | if self.is_installed():
26 | import whisper
27 | models = whisper._MODELS.keys()
28 | result = tuple()
29 | for model in models:
30 | result = result + ((model, model),)
31 | return result
32 | else:
33 | return (("tiny", "tiny"), )
34 |
35 | @staticmethod
36 | def get_extra_requirements() -> list:
37 | return ["openai-whisper"]
38 |
39 | def is_installed(self) -> bool:
40 | return True if find_module("whisper") is not None else False
41 |
42 | def install(self):
43 | print("Installing whisper...")
44 | super().install()
45 | try:
46 | import whisper
47 | print("Whisper installed, installing tiny model...")
48 | whisper.load_model("tiny")
49 | except Exception as e:
50 | return
51 | self.throw("Error installing Whisper: " + str(e), ErrorSeverity.ERROR)
52 |
53 | def recognize_file(self, path):
54 | import whisper
55 | if self.model is None:
56 | self.model = whisper.load_model(self.get_setting("model"))
57 | res = self.model.transcribe(path)
58 | if res["text"] is None:
59 | return ""
60 | return res["text"]
61 |
--------------------------------------------------------------------------------
/src/handlers/tts/custom_openai_tts.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from .tts import TTSHandler
3 | from ...utility.pip import install_module, find_module
4 | from ...handlers import ErrorSeverity, ExtraSettings
5 |
6 | class CustomOpenAITTSHandler(TTSHandler):
7 | key = "custom_openai_tts"
8 |
9 | def install(self):
10 | # Assuming pip_path is available from the base class or context
11 | install_module("openai",self.pip_path)
12 | if not self.is_installed():
13 | self.throw("OpenAI installation failed", ErrorSeverity.ERROR)
14 |
15 | def get_extra_settings(self) -> list:
16 | return [
17 | ExtraSettings.EntrySetting("endpoint", _("Endpoint"), _("Custom endpoint of the service to use"), "https://api.openai.com/v1/"),
18 | ExtraSettings.EntrySetting("api_key", _("API Key"), _("The API key to use"), "", password=True),
19 | ExtraSettings.EntrySetting("voice", _("Voice"), _("The voice to use"), "alloy"),
20 | ExtraSettings.EntrySetting("model", _("Model"), _("The model to use"), "tts-1"),
21 | ExtraSettings.EntrySetting("instructions", _("Instructions"), _("Instructions for the voice generation. Leave it blank to avoid this field"), "")
22 | ]
23 | def is_installed(self) -> bool:
24 | return find_module("openai") is not None
25 |
26 | def save_audio(self, message, file):
27 | from openai import OpenAI
28 | from openai import NOT_GIVEN
29 | speech_file_path = file
30 |
31 | try:
32 | client = OpenAI(api_key=self.get_setting("api_key"), base_url=self.get_setting("endpoint"))
33 | response = client.audio.speech.create(
34 | model=self.get_setting("model"),
35 | voice=self.get_setting("voice"),
36 | input=message,
37 | response_format="mp3",
38 | instructions=self.get_setting("instructions") if self.get_setting("instructions") != "" else NOT_GIVEN
39 | )
40 | response.write_to_file(speech_file_path)
41 | except Exception as e:
42 | self.throw(f"TTS error: {e}", ErrorSeverity.ERROR)
43 |
--------------------------------------------------------------------------------
/package.nix:
--------------------------------------------------------------------------------
1 | {
2 | stdenv,
3 | libadwaita,
4 | lib,
5 | python3,
6 | meson,
7 | ninja,
8 | pkg-config,
9 | wrapGAppsHook4,
10 | docutils,
11 | desktopToDarwinBundle,
12 | vte-gtk4,
13 | dconf,
14 | gobject-introspection,
15 | glib-networking,
16 | gsettings-desktop-schemas,
17 | adwaita-icon-theme,
18 | gtksourceview5,
19 | desktop-file-utils,
20 | lsb-release,
21 | webkitgtk_6_0
22 | }:
23 |
24 | let
25 | pythonDependencies = with python3.pkgs; [
26 | pygobject3
27 | libxml2
28 | requests
29 | pydub
30 | gtts
31 | speechrecognition
32 | numpy
33 | matplotlib
34 | newspaper3k
35 | lxml
36 | lxml-html-clean
37 | pylatexenc
38 | pyaudio
39 | tiktoken
40 | openai
41 | ollama
42 | llama-index-core
43 | llama-index-readers-file
44 | pip-install-test
45 | cssselect
46 | markdownify
47 | ];
48 | in
49 | stdenv.mkDerivation rec {
50 | pname = "newelle";
51 | version = "0.9.6";
52 |
53 | format = "other";
54 |
55 | src = ./.;
56 |
57 | strictDeps = true;
58 |
59 | nativeBuildInputs = [
60 | meson
61 | ninja
62 | gobject-introspection # for setup hook populating GI_TYPELIB_PATH
63 | docutils
64 | wrapGAppsHook4
65 | desktop-file-utils
66 | pkg-config
67 | ] ++ lib.optional stdenv.hostPlatform.isDarwin desktopToDarwinBundle;
68 |
69 | buildInputs =
70 | [
71 | libadwaita
72 | python3
73 | gobject-introspection
74 | vte-gtk4
75 | dconf
76 | adwaita-icon-theme
77 | gsettings-desktop-schemas
78 | gtksourceview5
79 | desktop-file-utils
80 | lsb-release
81 | webkitgtk_6_0
82 | glib-networking
83 | ];
84 |
85 | preFixup = ''
86 | glib-compile-schemas $out/share/gsettings-schemas/${pname}-${version}/glib-2.0/schemas
87 | gappsWrapperArgs+=(--set PYTHONPATH "${python3.pkgs.makePythonPath pythonDependencies}")
88 | patchShebangs $out/bin
89 | '';
90 |
91 | meta = with lib; {
92 | homepage = "https://github.com/qwersyk/Newelle";
93 | description = "Newelle - Your Ultimate Virtual Assistant ";
94 | mainProgram = "newelle";
95 | license = licenses.gpl3;
96 | platforms = platforms.unix;
97 | };
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/src/ui/widgets/comborow.py:
--------------------------------------------------------------------------------
1 | from gi.repository import GObject, Adw, Gio, Gtk
2 |
3 | class ComboRowHelper(GObject.Object):
4 | __gsignals__ = {
5 | "changed": (GObject.SignalFlags.RUN_FIRST, None, (str,)),
6 | }
7 | def __init__(
8 | self,
9 | combo: Adw.ComboRow,
10 | options: tuple[tuple[str, str]],
11 | selected_value: str,
12 | ):
13 | super().__init__()
14 | self.combo = combo
15 | self.__combo = combo
16 | self.__factory = Gtk.SignalListItemFactory()
17 | self.__factory.connect("setup", self.__on_setup_listitem)
18 | self.__factory.connect("bind", self.__on_bind_listitem)
19 | combo.set_factory(self.__factory)
20 | self.__store = Gio.ListStore(item_type=self.ItemWrapper)
21 | i = 0
22 | selected_index = 0
23 | for option in options:
24 | if option[1] == selected_value:
25 | selected_index = i
26 | i += 1
27 | self.__store.append(self.ItemWrapper(option[0], option[1]))
28 | combo.set_model(self.__store)
29 | combo.set_selected(selected_index)
30 | combo.connect("notify::selected-item", self.__on_selected)
31 |
32 | if len(options) > 10:
33 | expression = Gtk.PropertyExpression.new(self.ItemWrapper, None, "name")
34 | combo.set_expression(expression)
35 | combo.set_enable_search(True)
36 |
37 | class ItemWrapper(GObject.Object):
38 | name = GObject.Property(type=str)
39 | value = GObject.Property(type=str)
40 |
41 | def __init__(self, name: str, value: str):
42 | super().__init__()
43 | self.name = name
44 | self.value = value
45 |
46 | def __on_selected(self, combo: Adw.ComboRow, selected_item: GObject.ParamSpec) -> None:
47 | value = self.__combo.get_selected_item().value
48 | self.emit("changed", value)
49 |
50 | def __on_setup_listitem(self, factory: Gtk.ListItemFactory, list_item: Gtk.ListItem) -> None:
51 | label = Gtk.Label()
52 | list_item.set_child(label)
53 | list_item.row_w = label
54 |
55 | def __on_bind_listitem(self, factory: Gtk.ListItemFactory, list_item: Gtk.ListItem) -> None:
56 | label = list_item.get_child()
57 | label.set_text(list_item.get_item().name)
58 |
--------------------------------------------------------------------------------
/io.github.qwersyk.Newelle.json:
--------------------------------------------------------------------------------
1 | {
2 | "app-id" : "io.github.qwersyk.Newelle",
3 | "runtime" : "org.gnome.Platform",
4 | "runtime-version" : "49",
5 | "sdk" : "org.gnome.Sdk",
6 | "command" : "newelle",
7 | "finish-args" : [
8 | "--share=network",
9 | "--share=ipc",
10 | "--socket=fallback-x11",
11 | "--device=dri",
12 | "--socket=wayland",
13 | "--talk-name=org.freedesktop.Flatpak",
14 | "--filesystem=home",
15 | "--socket=pulseaudio",
16 | "--talk-name=org.gnome.Shell.Screencast"
17 | ],
18 | "cleanup" : [
19 | "/include",
20 | "/lib/pkgconfig",
21 | "/man",
22 | "/share/doc",
23 | "/share/gtk-doc",
24 | "/share/man",
25 | "/share/pkgconfig",
26 | "*.la",
27 | "*.a"
28 | ],
29 | "modules" : [
30 | "modules/git.json",
31 | "modules/vte.json",
32 | "modules/python3-requests.json",
33 | "modules/python3-expandvars.json",
34 | "modules/python3-gpt4all.json",
35 | "modules/python3-gtts.json",
36 | "modules/portaudio.json",
37 | "modules/python3-pyaudio.json",
38 | "modules/python3-speechrecognition.json",
39 | "modules/python3-pydub.json",
40 | "modules/python3-six.json",
41 | "modules/python3-pillow.json",
42 | "modules/python3-matplotlib.json",
43 | "modules/python3-pylatexenc.json",
44 | "modules/python3-lxml.json",
45 | "modules/python3-lxml_html_clean.json",
46 | "modules/python3-newspaper3k.json",
47 | "modules/python3-markdownify.json",
48 | "modules/python3-openai.json",
49 | {
50 | "name" : "newelle",
51 | "builddir" : true,
52 | "buildsystem" : "meson",
53 | "config-opts" : [
54 | "-Dprofile=development",
55 | "--libdir=lib"
56 | ],
57 | "build-options" : {
58 | "append-path" : "/usr/lib/sdk/llvm19/bin",
59 | "prepend-ld-library-path" : "/usr/lib/sdk/llvm19/lib"
60 | },
61 | "sources" : [
62 | {
63 | "type" : "git",
64 | "url" : "."
65 | }
66 | ]
67 | }
68 | ],
69 | "build-options" : {
70 | "env" : { }
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/handlers/descriptors.py:
--------------------------------------------------------------------------------
1 | from gi.repository import GdkPixbuf, Gtk
2 | from .handler import Handler
3 |
4 |
5 | def HandlerDescription(key: str, title: str, description: str, handler_class: Handler, website:str|None=None):
6 | """Generate Handler description, used by Newelle to generate settings and use handlers
7 |
8 | Args:
9 | key: unique key of the handler
10 | title: Name of the handler
11 | description: Small description about the handler
12 | handler_class: Hanlder class
13 |
14 | Returns:
15 | dict that contains the description
16 | """
17 | desc = {
18 | "key": key,
19 | "title": title,
20 | "description": description,
21 | "class": handler_class
22 | }
23 | if website is not None:
24 | desc["website"] = website
25 | return desc
26 |
27 | def PromptDescription(key: str, title: str, description: str, text:str, setting_name:str|None=None, editable:bool=True, default:bool=True, show_in_settings:bool=True):
28 | """Generate a Prompt description, used by Newelle to generate settings of the prompt and add it
29 |
30 | Args:
31 | key: unique key of the prompt
32 | title: Title of the prompt
33 | description: Smal description of the prompt
34 | text: Actual text of the prompt
35 | setting_name (optional): Setting name, in case on/off depends on another prompt, by default equal to the key
36 | editable: if the prompt is editable by the user, defaults to true
37 | default: if the prompt is enabled by default
38 | show_in_settings: if the prompt is shown in the settings
39 |
40 | Returns:
41 | dict that contains the description
42 | """
43 | return {
44 | "key": key,
45 | "title": title,
46 | "description": description,
47 | "text": text,
48 | "setting_name": setting_name if setting_name is not None else key,
49 | "editable": editable,
50 | "default": default,
51 | "show_in_settings": show_in_settings
52 | }
53 |
54 | def TabButtonDescription(title: str, icon: GdkPixbuf.Pixbuf | str | Gtk.IconPaintable, callback):
55 | """Generate a "new tab button"
56 |
57 | Args:
58 | title: Title of the button
59 | icon: Icon of the button
60 | callback: Callback of the button
61 | """
62 | return title, icon, callback
63 |
--------------------------------------------------------------------------------
/data/icons/star-filled-rounded-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/src/handlers/memory/summary_memoripy_handler.py:
--------------------------------------------------------------------------------
1 | from .memory_handler import MemoryHandler
2 | from .memoripy_handler import MemoripyHandler
3 | from .user_summary_handler import UserSummaryHandler
4 | from threading import Thread
5 |
6 | class SummaryMemoripyHanlder(MemoryHandler):
7 | key = "summary-memoripy"
8 |
9 | def __init__(self, settings, path):
10 | super().__init__(settings, path)
11 | self.memoripy = None
12 | self.user_summary = None
13 | self.llm = None
14 | self.embedding = None
15 |
16 | def is_installed(self) -> bool:
17 | memoripy, user_summary = self.initialize_handlers()
18 | return memoripy.is_installed() and user_summary.is_installed()
19 |
20 | def install(self) -> None:
21 | memoripy, user_summary = self.initialize_handlers()
22 | memoripy.install()
23 | user_summary.install()
24 |
25 | def initialize_handlers(self) -> tuple[MemoryHandler, MemoryHandler]:
26 | if self.memoripy is None or self.user_summary is None:
27 | self.memoripy = MemoripyHandler(self.settings, self.path)
28 | self.user_summary = UserSummaryHandler(self.settings, self.path)
29 | self.memoripy.set_handlers(self.llm, self.embedding)
30 | self.user_summary.set_handlers(self.llm, self.embedding)
31 | self.user_summary.set_memory_size(self.memory_size)
32 | self.memoripy.set_memory_size(self.memory_size)
33 | return self.memoripy, self.user_summary
34 |
35 | def get_context(self, prompt: str, history: list[dict[str, str]]) -> list[str]:
36 | memoripy, user_summary = self.initialize_handlers()
37 | r = []
38 | def run_memoripy():
39 | r.extend(memoripy.get_context(prompt, history))
40 | def run_user_summary():
41 | r.extend(user_summary.get_context(prompt, history))
42 | t1 = Thread(target=run_memoripy)
43 | t2 = Thread(target=run_user_summary)
44 | t1.start()
45 | t2.start()
46 | t1.join()
47 | t2.join()
48 | print(r)
49 | return r
50 |
51 | def register_response(self, bot_response: str, history: list[dict[str, str]]):
52 | memoripy, user_summary = self.initialize_handlers()
53 | memoripy.register_response(bot_response, history)
54 | user_summary.register_response(bot_response, history)
55 | user_summary.register_response(bot_response, history)
56 |
--------------------------------------------------------------------------------
/modules/python3-gtts.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "python3-gtts",
3 | "buildsystem": "simple",
4 | "build-commands": [
5 | "pip3 install --verbose --exists-action=i --no-index --find-links=\"file://${PWD}\" --prefix=${FLATPAK_DEST} \"gtts\" --no-build-isolation"
6 | ],
7 | "sources": [
8 | {
9 | "type": "file",
10 | "url": "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl",
11 | "sha256": "2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"
12 | },
13 | {
14 | "type": "file",
15 | "url": "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz",
16 | "sha256": "5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"
17 | },
18 | {
19 | "type": "file",
20 | "url": "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl",
21 | "sha256": "63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"
22 | },
23 | {
24 | "type": "file",
25 | "url": "https://files.pythonhosted.org/packages/e3/6c/8b8b1fdcaee7e268536f1bb00183a5894627726b54a9ddc6fc9909888447/gTTS-2.5.4-py3-none-any.whl",
26 | "sha256": "5dd579377f9f5546893bc26315ab1f846933dc27a054764b168f141065ca8436"
27 | },
28 | {
29 | "type": "file",
30 | "url": "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl",
31 | "sha256": "946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"
32 | },
33 | {
34 | "type": "file",
35 | "url": "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl",
36 | "sha256": "27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"
37 | },
38 | {
39 | "type": "file",
40 | "url": "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl",
41 | "sha256": "e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"
42 | }
43 | ]
44 | }
--------------------------------------------------------------------------------
/src/handlers/tts/openai_tts_handler.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from .tts import TTSHandler
4 | from ...utility.pip import install_module, find_module
5 | from ...handlers import ErrorSeverity, ExtraSettings
6 |
7 | class OpenAITTSHandler(TTSHandler):
8 | key = "openai_tts"
9 |
10 | def install(self):
11 | # Assuming pip_path is available from the base class or context
12 | install_module("openai",self.pip_path)
13 | if not self.is_installed():
14 | self.throw("OpenAI installation failed", ErrorSeverity.ERROR)
15 |
16 | def get_extra_settings(self) -> list:
17 | return [
18 | ExtraSettings.EntrySetting("api_key", _("API Key"), _("The API key to use"), "", password=True),
19 | ExtraSettings.ComboSetting("voice", _("Voice"), _("The voice to use"), self.get_voices(), "alloy"),
20 | ExtraSettings.ComboSetting("model", _("Model"), _("The model to use"), self.get_models(), "tts-1"),
21 | ExtraSettings.EntrySetting("instructions", _("Instructions"), _("Instructions for the voice generation. Leave it blank to avoid this field"), "")
22 | ]
23 | def is_installed(self) -> bool:
24 | return find_module("openai") is not None
25 |
26 | def get_models(self):
27 | models = ["tts-1", "tts-1-hd"]
28 | m = tuple()
29 | for model in models:
30 | m += ((model, model),)
31 | return m
32 |
33 | def get_voices(self):
34 | openai_voices = ["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
35 | v = tuple()
36 | for voice in openai_voices:
37 | v += ((voice.capitalize(), voice),)
38 | return v
39 |
40 | def save_audio(self, message, file):
41 | from openai import OpenAI
42 | from openai import NOT_GIVEN
43 | speech_file_path = file
44 |
45 | try:
46 | client = OpenAI(api_key=self.get_setting("api_key"))
47 | response = client.audio.speech.create(
48 | model=self.get_setting("model"),
49 | voice=self.get_setting("voice"),
50 | input=message,
51 | response_format="mp3",
52 | instructions=self.get_setting("instructions") if self.get_setting("instructions") != "" else NOT_GIVEN
53 | )
54 | response.write_to_file(speech_file_path)
55 | except Exception as e:
56 | self.throw(f"TTS error: {e}", ErrorSeverity.ERROR)
57 |
--------------------------------------------------------------------------------
/src/ui/widgets/website.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, Pango
2 |
3 | class WebsiteButton(Gtk.Button):
4 | """
5 | A custom GTK button widget that displays file information (icon, name, path).
6 | """
7 |
8 | def __init__(self, url, **kwargs):
9 | """
10 | Initializes the FileButton.
11 |
12 | Args:
13 | path (str): The path to the file/directory.
14 | main_path (str, optional): The main path to resolve relative paths. Defaults to ".".
15 | **kwargs: Keyword arguments passed to the Gtk.Button constructor.
16 | """
17 | super().__init__(**kwargs)
18 | self.url = url
19 | self.set_css_classes(["flat"])
20 | self.set_margin_top(5)
21 | self.set_margin_start(5)
22 | self.set_margin_bottom(5)
23 | self.set_margin_end(5)
24 |
25 | # Normalize and expand the path
26 | self.set_name(self.url) # Set the path as the button's name
27 | self._build_ui()
28 |
29 | def _build_ui(self):
30 | """
31 | Constructs the user interface elements within the button.
32 | """
33 | box = Gtk.Box()
34 | vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
35 |
36 | icon_name = "internet-symbolic"
37 |
38 | icon = Gtk.Image(icon_name=icon_name)
39 | icon.set_css_classes(["large"])
40 | self.icon = icon
41 |
42 | box.append(icon)
43 | box.append(vbox)
44 | vbox.set_size_request(250, -1)
45 | vbox.set_margin_start(5)
46 | self.title =Gtk.Label(
47 | label=self.url,
48 | css_classes=["title-3"],
49 | halign=Gtk.Align.START,
50 | wrap=True,
51 | wrap_mode=Pango.WrapMode.WORD_CHAR,
52 | )
53 | vbox.append(
54 | self.title
55 | )
56 | self.description = Gtk.Label(
57 | label="",
58 | css_classes=["body"],
59 | halign=Gtk.Align.START,
60 | wrap=True,
61 | wrap_mode=Pango.WrapMode.WORD_CHAR,
62 | )
63 |
64 | vbox.append(
65 | self.description
66 | )
67 |
68 | self.url_text = Gtk.Label(
69 | label=f"{self.url}",
70 | css_classes=["body"],
71 | halign=Gtk.Align.START,
72 | wrap=True,
73 | wrap_mode=Pango.WrapMode.WORD_CHAR,
74 | use_markup=True
75 | )
76 | vbox.append(self.url_text)
77 | self.set_child(box)
78 |
--------------------------------------------------------------------------------
/src/ui/screenrecorder.py:
--------------------------------------------------------------------------------
1 | from gi.repository import GLib, Gio, Adw
2 | import os
3 | import time
4 |
5 |
6 | class ScreenRecorder:
7 | def __init__(self, parent_window):
8 | self.window = parent_window
9 | self.recording = False
10 | timestamp = time.strftime("%Y%m%d_%H%M%S")
11 | self.output_path = os.path.join(GLib.get_user_cache_dir(), "screen_records", f"{timestamp}")
12 | self.init_proxy()
13 |
14 | def init_proxy(self):
15 | try:
16 | self.proxy = Gio.DBusProxy.new_for_bus_sync(
17 | Gio.BusType.SESSION,
18 | Gio.DBusProxyFlags.NONE,
19 | None,
20 | 'org.gnome.Shell.Screencast',
21 | '/org/gnome/Shell/Screencast',
22 | 'org.gnome.Shell.Screencast',
23 | None
24 | )
25 | except GLib.Error as e:
26 | self.show_error(str(e))
27 |
28 | def start(self):
29 | if not self.recording:
30 | try:
31 | old_file = self.output_path + ".mp4"
32 | if os.path.exists(old_file):
33 | try:
34 | os.remove(old_file)
35 | except OSError as e:
36 | self.show_error(str(e))
37 | return False
38 |
39 | success, path = self.proxy.call_sync(
40 | 'Screencast',
41 | GLib.Variant(
42 | '(sa{sv})',
43 | (self.output_path, {})
44 | ),
45 | Gio.DBusCallFlags.NONE,
46 | -1,
47 | None
48 | )
49 | if success:
50 | self.recording = True
51 | return True
52 |
53 | except GLib.Error as e:
54 | self.show_error(str(e))
55 | return False
56 |
57 | def stop(self, *args):
58 | if self.recording:
59 | try:
60 | self.proxy.call_sync(
61 | 'StopScreencast',
62 | None,
63 | Gio.DBusCallFlags.NONE,
64 | -1,
65 | None
66 | )
67 | self.recording = False
68 | except GLib.Error as e:
69 | self.show_error(str(e))
70 |
71 | def show_error(self, message):
72 | dialog = Adw.MessageDialog.new(self.window)
73 | dialog.set_heading("Error")
74 | dialog.set_body(str(message))
75 | dialog.set_modal(True)
76 | dialog.add_response("ok", "OK")
77 | dialog.present()
78 |
--------------------------------------------------------------------------------
/po/POTFILES:
--------------------------------------------------------------------------------
1 | src/handlers/embeddings/embedding.py
2 | src/handlers/embeddings/gemini_handler.py
3 | src/handlers/embeddings/ollama_handler.py
4 | src/handlers/embeddings/openai_handler.py
5 | src/handlers/embeddings/wordllama_handler.py
6 | src/handlers/handler.py
7 | src/handlers/llm/claude_handler.py
8 | src/handlers/llm/custom_handler.py
9 | src/handlers/llm/gpt3any_handler.py
10 | src/handlers/llm/gpt4all_handler.py
11 | src/handlers/llm/groq_handler.py
12 | src/handlers/llm/llm.py
13 | src/handlers/llm/g4f_handler.py
14 | src/handlers/llm/newelle_handler.py
15 | src/handlers/llm/ollama_handler.py
16 | src/handlers/llm/deepseek_handler.py
17 | src/handlers/llm/gemini_handler.py
18 | src/handlers/llm/mistral_handler.py
19 | src/handlers/llm/openai_handler.py
20 | src/handlers/llm/openrouter_handler.py
21 | src/handlers/memory/memoripy_handler.py
22 | src/handlers/memory/summary_memoripy_handler.py
23 | src/handlers/memory/user_summary_handler.py
24 | src/handlers/rag/rag_handler.py
25 | src/handlers/rag/llamaindex_handler.py
26 | src/handlers/stt/custom_handler.py
27 | src/handlers/stt/googlesr_handler.py
28 | src/handlers/stt/groqsr_handler.py
29 | src/handlers/stt/openaisr_handler.py
30 | src/handlers/stt/vosk_handler.py
31 | src/handlers/stt/whisper_handler.py
32 | src/handlers/stt/witai_handler.py
33 | src/handlers/stt/sphinx_handler.py
34 | src/handlers/stt/whispercpp_handler.py
35 | src/handlers/tts/custom_openai_tts.py
36 | src/handlers/tts/groq_tts_handler.py
37 | src/handlers/tts/openai_tts_handler.py
38 | src/handlers/tts/custom_handler.py
39 | src/handlers/tts/elevenlabs_handler.py
40 | src/handlers/tts/tts.py
41 | src/handlers/websearch/tavily.py
42 | src/handlers/websearch/duckduckgo_handler.py
43 | src/integrations/website_reader.py
44 | src/integrations/websearch.py
45 | src/ui/profile.py
46 | src/ui/screenrecorder.py
47 | src/ui/thread_editing.py
48 | src/ui/widgets/barchart.py
49 | src/ui/widgets/comborow.py
50 | src/ui/widgets/documents_reader.py
51 | src/ui/widgets/latex.py
52 | src/ui/widgets/markuptextview.py
53 | src/ui/widgets/multiline.py
54 | src/ui/widgets/profilerow.py
55 | src/ui/widgets/terminal_dialog.py
56 | src/ui/widgets/browser.py
57 | src/ui/widgets/code_editor.py
58 | src/ui/widgets/copybox.py
59 | src/ui/widgets/website.py
60 | src/ui/widgets/websearch.py
61 | src/ui/widgets/thinking.py
62 | src/ui/widgets/file.py
63 | src/ui/widgets/tipscarousel.py
64 | src/ui/explorer.py
65 | src/ui/shortcuts.py
66 | src/ui/stdout_monitor.py
67 | src/ui/extension.py
68 | src/ui/mini_window.py
69 | src/ui/presentation.py
70 | src/ui/settings.py
71 | src/utility/audio_recorder.py
72 | src/utility/util.py
73 | src/utility/website_scraper.py
74 | src/utility/message_chunk.py
75 | src/utility/stdout_capture.py
76 | src/utility/replacehelper.py
77 | src/utility/strings.py
78 | src/ui_controller.py
79 | src/constants.py
80 | src/controller.py
81 | src/extensions.py
82 | src/main.py
83 | src/window.py
84 | data/io.github.qwersyk.Newelle.appdata.xml.in
85 | data/io.github.qwersyk.Newelle.desktop.in
86 | data/io.github.qwersyk.Newelle.gschema.xml
87 |
--------------------------------------------------------------------------------
/src/handlers/tts/kokoro_handler.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from .tts import TTSHandler
4 | from ...utility.pip import install_module, find_module
5 | from ..handler import ErrorSeverity
6 | from ...handlers import ExtraSettings
7 |
8 | class KokoroTTSHandler(TTSHandler):
9 | key = "kokoro"
10 | def install(self):
11 | cache_dir = os.path.join(self.path, "kokoro_cache")
12 | os.makedirs(cache_dir, exist_ok=True)
13 | extra_deps = "fugashi jaconv mojimoji mecab-python3 unidic-lite"
14 | index_url = " --extra-index-url https://download.pytorch.org/whl/cpu --trusted-host download.pytorch.org"
15 | install_module("kokoro==0.9.4 soundfile espeakng-loader " + extra_deps + index_url, self.pip_path, update=False, cache_dir=cache_dir)
16 | install_module("transformers", self.pip_path)
17 | if not self.is_installed():
18 | self.throw("Kokoro installation failed", ErrorSeverity.ERROR)
19 |
20 | def is_installed(self) -> bool:
21 | return find_module("kokoro") is not None and find_module("soundfile") is not None
22 |
23 | def get_voices(self):
24 | voices = "af_alloy, af_aoede, af_bella, af_heart, af_jessica, af_kore, af_nicole, af_nova, af_river, af_sarah, af_sky".split(", ")
25 | voices += "am_adam, am_echo, am_eric, am_fenrir, am_liam, am_michael, am_onyx, am_puck".split(", ")
26 | voices += "bf_alice, bf_emma, bf_isabella, bf_lily, bm_daniel, bm_fable, bm_george, bm_lewis".split(", ")
27 | # Espeak required for non english to work
28 | voices += ["ef_dora", "em_alex", "em_santa"]
29 | voices += ["hf_alpha", "hf_beta", "hm_omega", "hm_psi"]
30 | voices += ["ff_siwis"]
31 | voices += "if_sara, im_nicola".split(", ")
32 | voices += ["pf_dora", "pm_alex", "pm_santa"]
33 | voices += "jf_alpha, jf_gongitsune, jf_nezumi, jf_tebukuro, jm_kumo".split(", ")
34 | voices += "zf_xiaobei, zf_xiaoni, zf_xiaoxiao, zf_xiaoyi, zm_yunjian, zm_yunxi, zm_yunxia, zm_yunyang".split(", ")
35 | flags = {"a": "🇺🇸", "b": "🇬🇧","e": "🇪🇸", "f": "🇫🇷", "h": "🇮🇳", "p": "🇧🇷", "i": "🇮🇹", "j": "🇯🇵", "z": "🇨🇳"}
36 | genders = {"m": "🚹", "f": "🚺"}
37 | v = tuple()
38 | for voice in voices:
39 | nationality = voice[0]
40 | gender = voice[1]
41 | name = voice[3:]
42 | v += ((flags[nationality] + genders[gender] + " " + name.capitalize(), voice),)
43 | return v
44 |
45 | def save_audio(self, message, file):
46 | from kokoro import KPipeline
47 | import soundfile as sf
48 | voice = self.get_current_voice()
49 | pipeline = KPipeline(lang_code=self.get_current_voice()[0]) # <= make sure lang_code matches voice
50 | text = message
51 |
52 | generator = pipeline(
53 | text, voice=self.get_current_voice(), # <= change voice here
54 | speed=1, split_pattern=r'\n+'
55 | )
56 | for i, (gs, ps, audio) in enumerate(generator):
57 | sf.write(file, audio, 24000)
58 |
--------------------------------------------------------------------------------
/src/utility/profile_settings.py:
--------------------------------------------------------------------------------
1 | from gi.repository import GLib
2 |
3 | def get_settings_dict(settings, blacklisted_keys:list = []):
4 | """
5 | Return a dictionary containing all settings from a Gio.Settings object.
6 | """
7 | settings_dict = {}
8 | for key in settings.list_keys():
9 | if key in blacklisted_keys:
10 | continue
11 | value = settings.get_value(key)
12 | settings_dict[key] = value.unpack()
13 | return settings_dict
14 |
15 | def restore_settings_from_dict(settings, settings_dict):
16 | """
17 | Restore settings from a dictionary into a Gio.Settings object.
18 | """
19 | for key, value in settings_dict.items():
20 | current_value = settings.get_value(key)
21 | variant = GLib.Variant(current_value.get_type_string(), value)
22 | settings.set_value(key, variant)
23 |
24 | def get_settings_dict_by_groups(settings, groups: list, settings_groups: dict, blacklisted_keys: list = []):
25 | """
26 | Return a dictionary containing settings from specified groups from a Gio.Settings object.
27 |
28 | Args:
29 | settings: Gio.Settings object
30 | groups: List of group names to include (e.g. ["LLM", "TTS"])
31 | settings_groups: Dictionary mapping group names to their settings
32 | blacklisted_keys: List of keys to exclude
33 | """
34 | if len(groups) == 0:
35 | groups = list(settings_groups.keys())
36 | # Get all settings keys for the specified groups
37 | allowed_keys = set()
38 | for group in groups:
39 | if group in settings_groups:
40 | allowed_keys.update(settings_groups[group]["settings"])
41 |
42 | settings_dict = {}
43 | for key in settings.list_keys():
44 | if key in blacklisted_keys or key not in allowed_keys:
45 | continue
46 | value = settings.get_value(key)
47 | settings_dict[key] = value.unpack()
48 | return settings_dict
49 |
50 | def restore_settings_from_dict_by_groups(settings, settings_dict, groups: list, settings_groups: dict):
51 | """
52 | Restore settings from a dictionary into a Gio.Settings object, but only for specified groups.
53 |
54 | Args:
55 | settings: Gio.Settings object
56 | settings_dict: Dictionary of settings to restore
57 | groups: List of group names to include (e.g. ["LLM", "TTS"])
58 | settings_groups: Dictionary mapping group names to their settings
59 | """
60 | # Get all settings keys for the specified groups
61 | if len(groups) == 0:
62 | groups = list(settings_groups.keys())
63 | allowed_keys = set()
64 | for group in groups:
65 | if group in settings_groups:
66 | allowed_keys.update(settings_groups[group]["settings"])
67 |
68 | for key, value in settings_dict.items():
69 | if key not in allowed_keys:
70 | continue
71 | current_value = settings.get_value(key)
72 | variant = GLib.Variant(current_value.get_type_string(), value)
73 | settings.set_value(key, variant)
74 |
--------------------------------------------------------------------------------
/data/icons/brain-augemnted-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/src/ui/widgets/profilerow.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 | from gi.repository import Adw, Gtk, Gdk
3 |
4 | class ProfileRow(Adw.ActionRow):
5 | def __init__(self, profile, picture, selected, add=False, allow_delete=False, allow_edit=False):
6 | super().__init__(height_request=50, width_request=250, use_markup=False, activatable=False)
7 | self.profile = profile
8 | self.add = add
9 | # Set properties
10 | self.on_forget_f = lambda _: None
11 | self.on_edit_f = lambda _: None
12 | self.set_name(profile)
13 | self.set_title(profile)
14 | # Create prefix widget (GtkOverlay)
15 | overlay = Gtk.Overlay(width_request=40)
16 | self.add_prefix(overlay)
17 |
18 | # Create avatar widget
19 | if add:
20 | avatar = Adw.Avatar(size=36, text=profile, icon_name="plus-symbolic")
21 | elif picture is not None:
22 | avatar = Adw.Avatar(custom_image=Gdk.Texture.new_from_filename(picture), text=profile, show_initials=True, size=36)
23 | avatar.get_last_child().get_last_child().set_icon_size(Gtk.IconSize.NORMAL)
24 | else:
25 | avatar = Adw.Avatar(text=profile, show_initials=True, size=36)
26 | avatar.set_tooltip_text(_("Select profile"))
27 | # Signal handler for avatar clicked
28 | overlay.add_overlay(avatar)
29 |
30 | # Create checkmark widget
31 | if selected:
32 | checkmark = Gtk.Image(focusable=False, halign=Gtk.Align.END, valign=Gtk.Align.END)
33 | checkmark.set_from_icon_name("check-plain-symbolic")
34 | checkmark.set_pixel_size(11)
35 | # Apply style to checkmark
36 | checkmark.add_css_class("blue-checkmark")
37 | overlay.add_overlay(checkmark)
38 |
39 | if allow_edit:
40 | edit_button = Gtk.Button()
41 | edit_button.set_icon_name("document-edit-symbolic")
42 | edit_button.set_valign(Gtk.Align.CENTER)
43 | edit_button.set_tooltip_text(_("Edit Profile"))
44 | edit_button.connect("clicked", self.on_edit)
45 | edit_button.add_css_class("circular")
46 | self.add_suffix(edit_button)
47 |
48 | if allow_delete:
49 | # Create suffix widget (GtkButton)
50 | forget_button = Gtk.Button()
51 | forget_button.set_icon_name("user-trash-symbolic")
52 | forget_button.set_valign(Gtk.Align.CENTER)
53 | forget_button.set_tooltip_text(_("Delete Profile"))
54 | # Signal handler for forget button clicked
55 | forget_button.connect("clicked", self.on_forget)
56 | # Apply style to forget button
57 | forget_button.add_css_class("circular")
58 | self.add_suffix(forget_button)
59 |
60 |
61 | def set_on_forget(self, f : Callable):
62 | self.on_forget_f = f
63 |
64 | def set_on_edit(self, f: Callable):
65 | self.on_edit_f = f
66 |
67 | def on_forget(self, widget):
68 | self.on_forget_f(self.profile)
69 |
70 | def on_edit(self, widget):
71 | self.on_edit_f(self.profile)
72 |
--------------------------------------------------------------------------------
/src/utility/audio_recorder.py:
--------------------------------------------------------------------------------
1 | import pyaudio
2 | import wave
3 | import struct
4 | from typing import Callable
5 | import os
6 | import math
7 |
8 | class AudioRecorder:
9 | """Record audio with optional auto-stop on silence detection."""
10 | def __init__(self, auto_stop: bool = False, stop_function: Callable = lambda _: (), silence_threshold_percent: float = 0.01, silence_duration: int = 2):
11 | self.recording = False
12 | self.frames = []
13 | self.auto_stop = auto_stop
14 | self.stop_function = stop_function
15 | self.silence_threshold_percent = silence_threshold_percent
16 | self.silence_duration = silence_duration
17 | self.sample_format = pyaudio.paInt16
18 | self.channels = 1
19 | self.sample_rate = 44100
20 | self.chunk_size = 1024
21 | self.silent_chunks = 0
22 | self.max_rms = 1000 # Max reasonable value for rms
23 |
24 | def start_recording(self, output_file):
25 | if os.path.exists(output_file):
26 | os.remove(output_file)
27 | self.recording = True
28 | self.frames = []
29 | self.silent_chunks = 0
30 | p = pyaudio.PyAudio()
31 | stream = p.open(format=self.sample_format,
32 | channels=self.channels,
33 | rate=self.sample_rate,
34 | frames_per_buffer=self.chunk_size,
35 | input=True)
36 | silence_threshold = self.max_rms * self.silence_threshold_percent
37 | required_chunks = math.ceil(self.silence_duration * (self.sample_rate / self.chunk_size))
38 | while self.recording:
39 | data = stream.read(self.chunk_size)
40 | self.frames.append(data)
41 | if self.auto_stop:
42 | rms = self._calculate_rms(data)
43 | if rms < silence_threshold:
44 | self.silent_chunks += 1
45 | else:
46 | self.silent_chunks = 0
47 | if self.silent_chunks >= required_chunks:
48 | self.recording = False
49 | stream.stop_stream()
50 | stream.close()
51 | p.terminate()
52 | self.save_recording(output_file)
53 |
54 | def stop_recording(self, output_file):
55 | self.recording = False
56 |
57 | def save_recording(self, output_file):
58 | p = pyaudio.PyAudio()
59 | wf = wave.open(output_file, 'wb')
60 | wf.setnchannels(self.channels)
61 | wf.setsampwidth(p.get_sample_size(self.sample_format))
62 | wf.setframerate(self.sample_rate)
63 | wf.writeframes(b''.join(self.frames))
64 | wf.close()
65 | p.terminate()
66 | self.stop_function()
67 |
68 | def _calculate_rms(self, data):
69 | """Calculate the root mean square of the audio data."""
70 | count = len(data) // 2 # Each sample is 2 bytes (16-bit)
71 | format = "<" + str(count) + "h" # little-endian signed shorts
72 | shorts = struct.unpack(format, data)
73 | mean = sum(shorts) / count
74 | shorts_demeaned = [sample - mean for sample in shorts]
75 | sum_squares = sum(sample * sample for sample in shorts_demeaned)
76 | rms = (sum_squares / count) ** 0.5
77 | return rms
78 |
--------------------------------------------------------------------------------
/src/ui_controller.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Adw, Gtk
2 |
3 | from .utility.system import open_website
4 |
5 | class UIController:
6 | """Interface exposed to Extensions in order to modify the UI"""
7 | def __init__(self, window):
8 | self.window = window
9 |
10 | def add_tab(self, child: Gtk.Widget, focus=True) -> Adw.TabPage:
11 | """Add a custom Adw.TabPage
12 |
13 | Args:
14 | child (): Widget
15 | focus: if true, set the tab as the current one
16 | """
17 | tab = self.window.canvas_tabs.append(child)
18 | if focus:
19 | self.window.show_sidebar()
20 | self.window.canvas_tabs.set_selected_page(tab)
21 | return tab
22 |
23 | def new_browser_tab(self, url:str|None = None, new:bool=True) -> Adw.TabPage:
24 | """Add a new browser tab
25 |
26 | Args:
27 | url (): url to open
28 | new (bool): if false an browser tab is focused, open in that tab, otherwise create a new one
29 | """
30 | if not new:
31 | browser = self.window.get_current_browser_panel()
32 | if browser is not None:
33 | browser.navigate_to(url)
34 | return browser.get_parent()
35 | return self.window.add_browser_tab(url=url)
36 |
37 | def open_link(self, url:str|None, new:bool=False, use_integrated_browser : bool = True):
38 | """Open a link
39 |
40 | Args:
41 | url (): url to open
42 | new (bool): if false an browser tab is focused, open in that tab, otherwise create a new one
43 | use_integrated_browser (bool): if true, use the integrated browser, otherwise open the link in the default browser
44 | """
45 | if use_integrated_browser:
46 | return self.new_browser_tab(url=url, new=new)
47 | else:
48 | open_website(url)
49 |
50 | def new_explorer_tab(self, path:str, new:bool=True) -> Adw.TabPage:
51 | """Add a new explorer tab
52 |
53 | Args:
54 | path (str): path to open (full)
55 | new (bool): if false an explorer tab is focused, open in that tab, otherwise create a new one
56 | """
57 | if not new:
58 | explorer = self.window.get_current_explorer_panel()
59 | if explorer is not None:
60 | explorer.go_to_path(path)
61 | return explorer.get_parent()
62 | return self.window.add_explorer_tab(path=path)
63 |
64 | def new_editor_tab(self, file:str) -> Adw.TabPage:
65 | """Add a new editor tab
66 |
67 | Args:
68 | file (): path to open (full), None if editing some custom text
69 | """
70 | return self.window.add_editor_tab(file=file)
71 |
72 | def new_terminal_tab(self, command:str|None=None) -> Adw.TabPage:
73 | """Add a new terminal tab
74 |
75 | Args:
76 | command (): command to execute
77 | """
78 | return self.window.add_terminal_tab(command=command)
79 |
80 | def add_text_to_input(self, text:str, focus_input:bool=False):
81 | """Add text to the input
82 |
83 | Args:
84 | text (): text to add
85 | """
86 | self.window.add_text_to_input(text, focus_input)
87 |
--------------------------------------------------------------------------------
/src/handlers/tts/elevenlabs_handler.py:
--------------------------------------------------------------------------------
1 | from ...utility.pip import find_module, install_module
2 | from .tts import TTSHandler
3 |
4 | class ElevenLabs(TTSHandler):
5 | key = "elevenlabs"
6 | def get_extra_settings(self) -> list:
7 | return [
8 | {
9 | "key": "api",
10 | "title": _("API Key"),
11 | "description": _("API Key for ElevenLabs"),
12 | "type": "entry",
13 | "default": "",
14 | "password": True,
15 | },
16 | {
17 | "key": "voice",
18 | "title": _("Voice"),
19 | "description": _("Voice ID to use"),
20 | "type": "entry",
21 | "default": "21m00Tcm4TlvDq8ikWAM"
22 | },
23 | {
24 | "key": "model",
25 | "title": _("Model"),
26 | "description": _("Name of the model to use"),
27 | "type": "combo",
28 | "values": (("eleven_turbo_v2_5", "eleven_turbo_v2_5"), ("eleven_multilingual_v2", "eleven_multilingual_v2"), ("eleven_flash_v2_5", "eleven_flash_v2_5"), ("eleven_v3", "eleven_v3"), ("eleven_ttv_v3", "eleven_ttv_v3")),
29 | "default": "eleven_turbo_v2_5"
30 | },
31 | {
32 | "key": "stability",
33 | "title": _("Stability"),
34 | "description": _("stability of the voice"),
35 | "type": "range",
36 | "min": 0,
37 | "max": 1,
38 | "round-digits": 2,
39 | "default": 0.50
40 | },
41 | {
42 | "key": "similarity",
43 | "title": _("Similarity boost"),
44 | "description": _("Boosts overall voice clarity and speaker similarity"),
45 | "type": "range",
46 | "min": 0,
47 | "max": 1,
48 | "round-digits": 2,
49 | "default": 0.75
50 | },
51 | {
52 | "key": "style_exaggeration",
53 | "title": _("Style exaggeration"),
54 | "description": _("High values are reccomended if the style of the speech must be exaggerated"),
55 | "type": "range",
56 | "min": 0,
57 | "max": 1,
58 | "round-digits": 2,
59 | "default": 0
60 | },
61 |
62 | ]
63 |
64 | def install(self):
65 | install_module("elevenlabs==2.9.1", self.pip_path, True)
66 |
67 | def is_installed(self) -> bool:
68 | return find_module("elevenlabs") is not None
69 |
70 | def save_audio(self, message, file):
71 | from elevenlabs.client import ElevenLabs
72 | from elevenlabs import save
73 | from elevenlabs.types import VoiceSettings
74 | client = ElevenLabs(api_key=self.get_setting("api"))
75 | sett = VoiceSettings(stability=self.get_setting("stability"), similarity_boost=self.get_setting("similarity"), style=self.get_setting("style_exaggeration"))
76 | audio = client.text_to_speech.convert(text=message, voice_id=self.get_setting("voice"), model_id=self.get_setting("model"), output_format="mp3_44100_128", voice_settings=sett)
77 | save(audio, file)
78 |
--------------------------------------------------------------------------------
/src/handlers/llm/gpt3any_handler.py:
--------------------------------------------------------------------------------
1 | from typing import Callable, Any
2 |
3 | from .g4f_handler import G4FHandler
4 |
5 | class GPT3AnyHandler(G4FHandler):
6 | """
7 | Use any GPT3.5-Turbo providers
8 | - History is supported by almost all of them
9 | - System prompts are not well supported, so the prompt is put on top of the message
10 | """
11 | key = "GPT3Any"
12 |
13 | def __init__(self, settings, path):
14 | super().__init__(settings, path)
15 | self.client = None
16 | if self.is_installed():
17 | self.init_client()
18 |
19 | def init_client(self):
20 | import g4f
21 | from g4f.Provider import RetryProvider
22 | good_providers = [g4f.Provider.DDG, g4f.Provider.Pizzagpt, g4f.Provider.DarkAI, g4f.Provider.Koala, g4f.Provider.AmigoChat]
23 | good_nongpt_providers = [g4f.Provider.ReplicateHome,g4f.Provider.RubiksAI, g4f.Provider.TeachAnything, g4f.Provider.Free2GPT, g4f.Provider.DeepInfraChat, g4f.Provider.PerplexityLabs]
24 | acceptable_providers = [g4f.Provider.Blackbox, g4f.Provider.Upstage, g4f.Provider.Upstage]
25 | self.client = g4f.client.Client(provider=RetryProvider([RetryProvider(good_providers), RetryProvider(good_nongpt_providers), RetryProvider(acceptable_providers)], shuffle=False))
26 | self.n = 0
27 |
28 | def generate_text(self, prompt: str, history: list[dict[str, str]] = [], system_prompt: list[str] = []) -> str:
29 | if self.client is None:
30 | self.init_client()
31 | message = prompt
32 | history = self.convert_history(history, system_prompt)
33 | user_prompt = {"role": "user", "content": message}
34 | history.append(user_prompt)
35 | response = self.client.chat.completions.create(
36 | model="",
37 | messages=history,
38 | )
39 | return response.choices[0].message.content
40 |
41 | def generate_text_stream(self, prompt: str, history: list[dict[str, str]] = [], system_prompt: list[str] = [], on_update: Callable[[str], Any] = lambda _: None, extra_args: list = []) -> str:
42 | if self.client is None:
43 | self.init_client()
44 | history = self.convert_history(history, system_prompt)
45 | message = prompt
46 | user_prompt = {"role": "user", "content": message}
47 | history.append(user_prompt)
48 | response = self.client.chat.completions.create(
49 | model="",
50 | messages=history,
51 | stream=True,
52 | )
53 | full_message = ""
54 | prev_message = ""
55 | for chunk in response:
56 | if chunk.choices[0].delta.content:
57 | full_message += chunk.choices[0].delta.content
58 | args = (full_message.strip(), ) + tuple(extra_args)
59 | if len(full_message) - len(prev_message) > 1:
60 | on_update(*args)
61 | prev_message = full_message
62 | return full_message.strip()
63 |
64 | def generate_chat_name(self, request_prompt: str = "") -> str:
65 | history = ""
66 | for message in self.history[-4:] if len(self.history) >= 4 else self.history:
67 | history += message["User"] + ": " + message["Message"] + "\n"
68 | name = self.generate_text(history + "\n\n" + request_prompt)
69 | return name
70 |
--------------------------------------------------------------------------------
/src/utility/pip.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import subprocess
3 | import sys
4 | import os
5 | import threading
6 |
7 | LOCK_SEMAPHORE = threading.Semaphore(1)
8 | LOCKS = {}
9 | INSTALLING_PACKAGES = []
10 | PIP_INSTALLED = False
11 |
12 | def is_module_available(module_name: str) -> bool:
13 | """
14 | Checks if a module can be found by the import system without importing it.
15 |
16 | This is generally faster and safer than trying to import the module directly,
17 | as it avoids executing the module's initialization code and potential side effects.
18 |
19 | Args:
20 | module_name: The full name of the module (e.g., 'os', 'requests.api').
21 |
22 | Returns:
23 | True if the module specification can be found, False otherwise.
24 | Returns True immediately if the module is already imported.
25 | """
26 | if module_name in sys.modules:
27 | return True
28 | try:
29 | spec = importlib.util.find_spec(module_name)
30 | return spec is not None
31 | except ModuleNotFoundError:
32 | return False
33 | except ImportError:
34 | return False
35 |
36 | def find_module(full_module_name):
37 | """
38 | Returns module object if module `full_module_name` can be imported.
39 |
40 | Returns None if module does not exist.
41 |
42 | Exception is raised if (existing) module raises exception during its import.
43 | """
44 | return is_module_available(full_module_name) if is_module_available(full_module_name) else None
45 |
46 | def runtime_find_module(full_module_name):
47 | try:
48 | return importlib.import_module(full_module_name)
49 | except Exception as _:
50 | return None
51 |
52 | def install_module(module, path, update=True, cache_dir=None):
53 | # Avoid reinstalling the same package multiple times
54 | if module in INSTALLING_PACKAGES:
55 | return
56 | INSTALLING_PACKAGES.append(module)
57 | # Manage pip path locking
58 | global PIP_INSTALLED
59 | LOCK_SEMAPHORE.acquire()
60 | lock = LOCKS.get(path, None)
61 | if lock is None:
62 | lock = threading.Semaphore(1)
63 | LOCKS[path] = lock
64 | LOCK_SEMAPHORE.release()
65 | lock.acquire()
66 |
67 | # Set temp path
68 | origTemp = os.environ.get("TMPDIR")
69 | if not cache_dir:
70 | os.environ["TMPDIR"] = os.path.join(os.getcwd(), "tmp")
71 | else:
72 | os.environ["TMPDIR"] = cache_dir
73 | try:
74 | if find_module("pip") is None and not PIP_INSTALLED:
75 | print("Downloading pip...")
76 | subprocess.check_output(["bash", "-c", "cd " + os.path.dirname(path) + " && wget https://bootstrap.pypa.io/get-pip.py && python get-pip.py"])
77 | subprocess.check_output(["bash", "-c", "cd " + os.path.dirname(path) + " && rm get-pip.py || true"])
78 | PIP_INSTALLED = True
79 | command = [sys.executable, "-m", "pip", "install","--target", path]
80 | if update:
81 | command.append("--upgrade")
82 | r = subprocess.run(command + module.split(" ") , capture_output=False)
83 | print(module + " installed")
84 | except Exception as e:
85 | PIP_INSTALLED = False
86 | print("Error installing " + module + " " + str(e))
87 | r = None
88 | if origTemp:
89 | os.environ["TMPDIR"] = origTemp
90 | lock.release()
91 | return r
92 |
--------------------------------------------------------------------------------
/src/ui/widgets/markuptextview.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, Pango, Gdk
2 | import xml.etree.ElementTree as ET
3 | from .. import apply_css_to_widget
4 |
5 | class MarkupTextView(Gtk.TextView):
6 | def __init__(self, **kwargs):
7 | super().__init__(**kwargs)
8 | self.set_wrap_mode(Gtk.WrapMode.WORD)
9 | self.set_editable(False)
10 | self.set_cursor_visible(False)
11 |
12 | self.buffer = self.get_buffer()
13 | self.create_tags()
14 | #self.buffer.connect("changed", lambda b: self.update_textview_size(parent))
15 | self.add_css_class("scroll")
16 | apply_css_to_widget(
17 | self, ".scroll { background-color: rgba(0,0,0,0);}"
18 | )
19 |
20 | def update_textview_size(self, parent=None):
21 | if parent is not None:
22 | s = parent.get_size(Gtk.Orientation.HORIZONTAL)
23 | else:
24 | s = 300
25 | buffer = self.get_buffer()
26 | layout = self.create_pango_layout(buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter(), True))
27 | layout.set_width(s * Pango.SCALE)
28 | layout.set_wrap(Pango.WrapMode.WORD)
29 | width, height = layout.get_pixel_size()
30 | self.set_size_request(width, height)
31 |
32 | def create_tags(self):
33 | self.tags = {
34 | 'b': self.buffer.create_tag("bold", weight=Pango.Weight.BOLD),
35 | 'i': self.buffer.create_tag("italic", style=Pango.Style.ITALIC),
36 | 'tt': self.buffer.create_tag("tt", family="monospace"),
37 | 'sub': self.buffer.create_tag("sub", rise=-5000, size_points=8),
38 | 'sup': self.buffer.create_tag("sup", rise=5000, size_points=8),
39 | 'a': self.buffer.create_tag("link", foreground="blue", underline=Pango.Underline.SINGLE)
40 | }
41 |
42 | def add_markup_text(self, iter, text: str):
43 | wrapped_markup = f"{text}"
44 | try:
45 | root = ET.fromstring(wrapped_markup)
46 | except ET.ParseError as e:
47 | print("Parse error:", e)
48 | self.buffer.insert(iter, text)
49 | return
50 | self._insert_markup_recursive(root, iter, [])
51 |
52 | def set_markup(self, markup: str):
53 | # Wrap in a root tag for parsing
54 | wrapped_markup = f"{markup}"
55 | try:
56 | root = ET.fromstring(wrapped_markup)
57 | except ET.ParseError as e:
58 | print("Parse error:", e)
59 | return
60 |
61 | self.buffer.set_text("")
62 | self._insert_markup_recursive(root, self.buffer.get_start_iter(), [])
63 |
64 | def _insert_markup_recursive(self, elem, iter, active_tags):
65 | # Add text before children
66 | if elem.text:
67 | self.buffer.insert_with_tags(iter, elem.text, *active_tags)
68 |
69 | # Process children recursively
70 | for child in elem:
71 | tag_name = child.tag.lower()
72 | tags_to_apply = list(active_tags)
73 |
74 | if tag_name == "a":
75 | tags_to_apply.append(self.tags["a"])
76 | elif tag_name in self.tags:
77 | tags_to_apply.append(self.tags[tag_name])
78 |
79 | self._insert_markup_recursive(child, iter, tags_to_apply)
80 |
81 | # Tail text after this child
82 | if child.tail:
83 | self.buffer.insert_with_tags(iter, child.tail, *active_tags)
84 |
--------------------------------------------------------------------------------
/src/handlers/llm/custom_handler.py:
--------------------------------------------------------------------------------
1 | import json
2 | from subprocess import PIPE, Popen, check_output
3 | from typing import Any, Callable
4 |
5 | from .llm import LLMHandler
6 | from ...utility.strings import quote_string
7 | from ...utility.system import get_spawn_command
8 | from ...handlers import ExtraSettings
9 |
10 | class CustomLLMHandler(LLMHandler):
11 | key = "custom_command"
12 |
13 | @staticmethod
14 | def requires_sandbox_escape() -> bool:
15 | """If the handler requires to run commands on the user host system"""
16 | return True
17 |
18 | def get_extra_settings(self):
19 | return [
20 | ExtraSettings.ToggleSetting("streaming", _("Message Streaming"), _("Gradually stream message output"), True),
21 | ExtraSettings.EntrySetting("command", _("Command to execute to get bot output"), _("Command to execute to get bot response, {0} will be replaced with a JSON file containing the chat, {1} with the system prompt"), ""),
22 | ExtraSettings.EntrySetting("suggestion", _("Command to execute to get bot's suggestions"), _("Command to execute to get chat suggestions, {0} will be replaced with a JSON file containing the chat, {1} with the extra prompts, {2} with the numer of suggestions to generate. Must return a JSON array containing the suggestions as strings"), "")
23 | ]
24 |
25 | def generate_text(self, prompt: str, history: list[dict[str, str]] = [], system_prompt: list[str] = []) -> str:
26 | command = self.get_setting("command")
27 | history.append({"User": "User", "Message": prompt})
28 | command = command.replace("{0}", quote_string(json.dumps(history)))
29 | command = command.replace("{1}", quote_string(json.dumps(system_prompt)))
30 | out = check_output(get_spawn_command() + ["bash", "-c", command])
31 | return out.decode("utf-8")
32 |
33 | def get_suggestions(self, request_prompt: str = "", amount: int = 1) -> list[str]:
34 | command = self.get_setting("suggestion")
35 | if command == "":
36 | return []
37 | self.history.append({"User": "User", "Message": request_prompt})
38 | command = command.replace("{0}", quote_string(json.dumps(self.history)))
39 | command = command.replace("{1}", quote_string(json.dumps(self.prompts)))
40 | command = command.replace("{2}", str(amount))
41 | out = check_output(get_spawn_command() + ["bash", "-c", command])
42 | return json.loads(out.decode("utf-8"))
43 |
44 | def generate_text_stream(self, prompt: str, history: list[dict[str, str]] = [], system_prompt: list[str] = [], on_update: Callable[[str], Any] = lambda _: None, extra_args: list = []) -> str:
45 | command = self.get_setting("command")
46 | history.append({"User": "User", "Message": prompt})
47 | command = command.replace("{0}", quote_string(json.dumps(history)))
48 | command = command.replace("{1}", quote_string(json.dumps(system_prompt)))
49 | process = Popen(get_spawn_command() + ["bash", "-c", command], stdout=PIPE)
50 | full_message = ""
51 | prev_message = ""
52 | while True:
53 | if process.stdout is None:
54 | break
55 | chunk = process.stdout.readline()
56 | if not chunk:
57 | break
58 | full_message += chunk.decode("utf-8")
59 | args = (full_message.strip(), ) + tuple(extra_args)
60 | if len(full_message) - len(prev_message) > 1:
61 | on_update(*args)
62 | prev_message = full_message
63 |
64 | process.wait()
65 | return full_message.strip()
66 |
67 |
--------------------------------------------------------------------------------
/data/icons/right-large-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/data/icons/left-large-symbolic.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/data/icons/hicolor/scalable/apps/io.github.qwersyk.Newelle.svg:
--------------------------------------------------------------------------------
1 |
30 |
--------------------------------------------------------------------------------
/src/handlers/memory/user_summary_handler.py:
--------------------------------------------------------------------------------
1 | from ...handlers.llm import LLMHandler
2 | from ...handlers.embeddings import EmbeddingHandler
3 | from .memory_handler import MemoryHandler
4 | from ...handlers import ExtraSettings
5 | from ...utility.strings import remove_thinking_blocks
6 |
7 | class UserSummaryHandler(MemoryHandler):
8 | key = "user-summary"
9 |
10 | def __init__(self, settings, path):
11 | super().__init__(settings, path)
12 | self.seen_messages = self.get_setting("seen_messages", False)
13 | if self.seen_messages is None:
14 | self.seen_messages = []
15 | self.DEFAULT_PROMPT = """For the duration of this conversation, please keep the following long-term memory summary in mind. This summary includes important details about the user's preferences, interests, and previous interactions. Use this context to ensure your responses are consistent and personalized.
16 | User Long-Term Memory Summary:
17 | {prompt}
18 | Continue with the conversation while considering the above context.
19 | """
20 |
21 | def restore_prompt(self, button=None):
22 | self.set_setting("prompt", self.DEFAULT_PROMPT)
23 | self.settings_update()
24 |
25 | def get_extra_settings(self) -> list:
26 | return [
27 | ExtraSettings.ButtonSetting("reset_memory", "Reset Memory", "Reset the memory", lambda x: self.reset_memory(), "Reset Memory"),
28 | ExtraSettings.ScaleSetting("update_freq", "Update Summary Frequency", "How often to update the summary", 5, 1, 10, 0),
29 | ExtraSettings.MultilineEntrySetting("user_summary", "User Summary", "Current summary of the interactions with the assistant", ""),
30 | ExtraSettings.MultilineEntrySetting("prompt", "Summary prompt", "Prompt to get the summary. {prompt} will be replaced with the user summary.", self.DEFAULT_PROMPT, refresh=self.restore_prompt, refresh_icon="star-filled-rounded-symbolic")
31 | ]
32 |
33 | def reset_memory(self):
34 | self.set_setting("user_summary", "")
35 | self.settings_update()
36 |
37 | def get_context(self, prompt: str, history: list[dict[str, str]]) -> list[str]:
38 | self.seen_messages.append(prompt)
39 | PROMPT = self.get_setting("prompt")
40 | return ["---"+PROMPT.format(prompt=self.get_setting("user_summary"))]
41 |
42 | def register_response(self, bot_response, history):
43 | self.seen_messages.append(bot_response)
44 | update_frequency = min(int(self.get_setting("update_freq")), self.memory_size)
45 | if update_frequency == 0:
46 | update_frequency = int(self.get_setting("update_freq"))
47 | PROMPT = """
48 | You are tasked with updating the user's long-term memory summary based on the latest chat history. The goal is to capture everything useful about the user that will improve future interactions. Retain all relevant details from the existing summary and incorporate new information from the provided chat history. Be sure to include the user's preferences, interests, recurring topics, and any personal context that could help tailor responses in the future.
49 |
50 | Chat History:
51 | {history}
52 |
53 | Existing Summary:
54 | {summary}
55 |
56 | Please generate an updated long-term memory summary that is clear, concise, and organized.
57 | Only output the summary with no other details.
58 | """
59 | if len(self.seen_messages) % update_frequency == 0:
60 | prompt = PROMPT.format(history="\n".join([i["User"] + ": " + i["Message"] for i in history[-update_frequency-2:]]), summary=self.get_setting("user_summary"))
61 | upd = self.llm.generate_text(prompt)
62 | upd = remove_thinking_blocks(upd)
63 | self.set_setting("user_summary", upd)
64 | self.set_setting("seen_messages", self.seen_messages)
65 |
--------------------------------------------------------------------------------
/src/handlers/tts/tts.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | from typing import Callable
3 |
4 | from subprocess import Popen
5 | import threading
6 | import time
7 | import os
8 | from ..handler import Handler
9 |
10 | class TTSHandler(Handler):
11 | """Every TTS handler should extend this class."""
12 | key = ""
13 | schema_key = "tts-voice"
14 | voices : tuple
15 | _play_lock : threading.Semaphore = threading.Semaphore(1)
16 | def __init__(self, settings, path):
17 | super().__init__(settings, path)
18 | self.settings = settings
19 | self.path = path
20 | self.voices = tuple()
21 | self.on_start = lambda : None
22 | self.on_stop = lambda : None
23 | self.play_process = None
24 |
25 | def get_extra_settings(self) -> list:
26 | """Get extra settings for the TTS"""
27 | voices = self.get_voices()
28 | default = "" if len(voices) == 0 else voices[0][1]
29 | return [
30 | {
31 | "key": "voice",
32 | "type": "combo",
33 | "title": _("Voice"),
34 | "description": _("Choose the preferred voice"),
35 | "default": default,
36 | "values": voices
37 | }
38 | ]
39 |
40 | def get_voices(self):
41 | """Return a tuple containing the available voices"""
42 | return tuple()
43 |
44 | def voice_available(self, voice):
45 | """Check fi a voice is available"""
46 | for l in self.get_voices():
47 | if l[1] == voice:
48 | return True
49 | return False
50 |
51 | @abstractmethod
52 | def save_audio(self, message, file):
53 | """Save an audio in a certain file path"""
54 | pass
55 |
56 | def play_audio(self, message):
57 | """Play an audio from the given message"""
58 | # Generate random name
59 | timestamp = str(int(time.time()))
60 | random_part = str(os.urandom(8).hex())
61 | file_name = f"{timestamp}_{random_part}.mp3"
62 | path = os.path.join(self.path, file_name)
63 | self.save_audio(message, path)
64 | self.playsound(path)
65 | try:
66 | os.remove(path)
67 | except Exception as e:
68 | print("Could not delete file: " + str(e))
69 |
70 | def connect(self, signal: str, callback: Callable):
71 | if signal == "start":
72 | self.on_start = callback
73 | elif signal == "stop":
74 | self.on_stop = callback
75 |
76 | def playsound(self, path):
77 | """Play an audio from the given path"""
78 | self.stop()
79 | self._play_lock.acquire()
80 | self.on_start()
81 | try:
82 | p = Popen(["ffplay", "-nodisp", "-autoexit", "-hide_banner", path])
83 | self.play_process = p
84 | p.wait()
85 | p.terminate()
86 | except Exception as e:
87 | print("Error playing the audio: " + str(e))
88 | pass
89 | self.on_stop()
90 | self.play_process = None
91 | self._play_lock.release()
92 |
93 | def stop(self):
94 | if self.play_process is not None:
95 | self.play_process.terminate()
96 |
97 | def get_current_voice(self):
98 | """Get the current selected voice"""
99 | voice = self.get_setting("voice")
100 | if voice is None:
101 | if self.voices == ():
102 | return None
103 | return self.voices[0][1]
104 | else:
105 | return voice
106 |
107 | def set_voice(self, voice):
108 | """Set the given voice"""
109 | self.set_setting("voice", voice)
110 |
111 |
112 |
--------------------------------------------------------------------------------
/src/handlers/websearch/duckduckgo_handler.py:
--------------------------------------------------------------------------------
1 | from ...utility.website_scraper import WebsiteScraper
2 | from .websearch import WebSearchHandler
3 | from ...handlers import ExtraSettings, ErrorSeverity
4 |
5 | class DDGSeachHandler(WebSearchHandler):
6 | key="ddgsearch"
7 |
8 | @staticmethod
9 | def get_extra_requirements() -> list:
10 | return ["ddgs"]
11 |
12 | def get_extra_settings(self) -> list:
13 | return [
14 | ExtraSettings.ScaleSetting("results", _("Max Results"), _("Number of results to consider"), 2, 1, 10, 0),
15 | ExtraSettings.EntrySetting("region", _("Region"), _("Region for the search results"), "us-en"),
16 | ]
17 |
18 | def query(self, keywords: str) -> tuple[str, list]:
19 | return self.query_streaming(keywords, lambda title, link, favicon: None)
20 |
21 | def query_streaming(self, keywords: str, add_website) -> tuple[str, list]:
22 | from duckduckgo_search import DDGS
23 | ddg = DDGS()
24 | try:
25 | results = ddg.text(keywords, max_results=self.get_setting("results"), region=self.get_setting("region"))
26 | except Exception as e:
27 | results = []
28 | if len(results) == 0:
29 | self.throw("Failed to query DDG: " + str(e), ErrorSeverity.WARNING)
30 | return "No results found", []
31 | results = [(result['href'], result['title']) for result in results]
32 | print(results)
33 | content, urls = self.scrape_websites(results, add_website)
34 | text = ""
35 | for result in content:
36 | text += "\nSource: " + result["url"]
37 | text += f"## {result['title']}\n{result['text'][:3000]}\n\n"
38 | return text, urls
39 |
40 | def scrape_websites(self, result_links, update):
41 | max_results = self.get_setting("results")
42 | if not result_links:
43 | print("No result links found on the DDG page.")
44 | return [],[]
45 | urls = []
46 | extracted_content = []
47 | processed_count = 0
48 |
49 | for url, initial_title in result_links:
50 | urls.append(url)
51 | if processed_count >= max_results:
52 | print(f"Reached maximum results limit ({max_results}).")
53 | break
54 |
55 | print(f"\nProcessing URL ({processed_count + 1}/{min(len(result_links), max_results)}): {url}")
56 | article_data = {'url': url, 'title': initial_title, 'text': ''} # Pre-populate with URL and initial title
57 |
58 | try:
59 | # Configure Article object
60 | article = WebsiteScraper(url)
61 |
62 | # Download and parse
63 | article.parse_article()
64 | update(article.get_title(), url, article.get_favicon())
65 | # Check if parsing was successful and text was extracted
66 | text = article.get_text()
67 | if text:
68 | article_data['title'] = article.get_title() or initial_title # Prefer newspaper's title if available
69 | article_data['text'] = text
70 | extracted_content.append(article_data)
71 | print(f" Successfully extracted content. Title: '{article_data['title']}'")
72 | processed_count += 1
73 | else:
74 | print(" Could not extract main text content from the page.")
75 | except Exception as e:
76 | # Catch other potential errors during download/parse
77 | print(f" An unexpected error occurred processing {url}: {e}")
78 |
79 | print(f"\nFinished processing. Successfully extracted content from {len(extracted_content)} URLs.")
80 | return extracted_content, urls
81 |
--------------------------------------------------------------------------------
/src/ui/thread_editing.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, Adw, Gio, Pango
2 |
3 |
4 | class ThreadEditing(Gtk.Window):
5 | def __init__(self, app, *args, **kwargs):
6 | super().__init__(*args, **kwargs,title=_('Thread editing'))
7 | self.set_default_size(400, 400)
8 | self.set_transient_for(app.win)
9 | self.set_modal(True)
10 | header = Adw.HeaderBar(css_classes=["flat"])
11 | self.set_titlebar(header)
12 |
13 | button_reload = Gtk.Button(css_classes=["flat"])
14 | icon = Gtk.Image.new_from_gicon(Gio.ThemedIcon(name="view-refresh-symbolic"))
15 | icon.set_icon_size(Gtk.IconSize.INHERIT)
16 | button_reload.set_child(icon)
17 | button_reload.connect("clicked", self.update_window)
18 |
19 | header.pack_end(button_reload)
20 | self.app = app
21 | self.update_window()
22 | def update_window(self,*a):
23 | scrolled_window = Gtk.ScrolledWindow()
24 | scrolled_window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
25 |
26 | main = Gtk.Box(margin_top=10,margin_start=10,margin_bottom=10,margin_end=10,valign=Gtk.Align.START,halign=Gtk.Align.CENTER,orientation=Gtk.Orientation.VERTICAL)
27 | if len(self.app.win.streams)==0:
28 | main.set_opacity(0.4)
29 | main.set_vexpand(True)
30 | main.set_valign(Gtk.Align.CENTER)
31 | icon = Gtk.Image.new_from_gicon(Gio.ThemedIcon(name="network-offline-symbolic"))
32 | icon.set_css_classes(["empty-folder"])
33 | icon.set_valign(Gtk.Align.END)
34 | icon.set_vexpand(True)
35 | main.append(icon)
36 | main.append(Gtk.Label(label=_("No threads are running"), vexpand=True,valign=Gtk.Align.START,css_classes=["empty-folder", "heading"]))
37 | else:
38 | for i in range(len(self.app.win.streams)):
39 | stream_menu = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,css_classes=["card"],margin_top=10,margin_start=10,margin_end=10,margin_bottom=10)
40 | stream_menu.set_size_request(300, -1)
41 | box = Gtk.Box(margin_top=10,margin_start=10,margin_end=10,margin_bottom=10)
42 | box.append(Gtk.Label(label=_("Thread number: ")+str(i+1)))
43 | button = Gtk.Button(margin_start=5, margin_end=5,
44 | valign=Gtk.Align.CENTER,halign=Gtk.Align.END, hexpand= True)
45 | button.connect("clicked", self.stop_flow)
46 | button.set_name(str(i))
47 | box.append(button)
48 | stream_menu.append(box)
49 | main.append(stream_menu)
50 | icon_name="media-playback-stop-symbolic"
51 | if self.app.win.streams[i].poll() != None:
52 | try:
53 | code = str(self.app.win.streams[i].communicate()[0].decode())
54 | except Exception:
55 | code = None
56 |
57 | icon_name = "emblem-ok-symbolic"
58 | button.set_sensitive(False)
59 | text_expander = Gtk.Expander(
60 | label="Console", css_classes=["toolbar", "osd"], margin_start=10, margin_bottom=10,
61 | margin_end=10
62 | )
63 | text_expander.set_child(Gtk.Label(wrap=True, wrap_mode=Pango.WrapMode.WORD_CHAR, label=code, selectable=True))
64 | stream_menu.append(text_expander)
65 | icon = Gtk.Image.new_from_gicon(Gio.ThemedIcon(name=icon_name))
66 | icon.set_icon_size(Gtk.IconSize.INHERIT)
67 | button.set_child(icon)
68 | scrolled_window.set_child(main)
69 | self.set_child(scrolled_window)
70 | def stop_flow(self,widget):
71 | self.app.win.streams[int(widget.get_name())].terminate()
72 | self.update_window()
73 |
--------------------------------------------------------------------------------
/src/handlers/embeddings/ollama_handler.py:
--------------------------------------------------------------------------------
1 | from .embedding import EmbeddingHandler
2 | from ...handlers import ExtraSettings
3 | from ...utility.system import get_spawn_command, can_escape_sandbox
4 | import threading
5 | import json
6 | import numpy as np
7 | import time
8 | from subprocess import Popen
9 |
10 | class OllamaEmbeddingHandler(EmbeddingHandler):
11 | key = "ollamaembedding"
12 |
13 | def __init__(self, settings, path):
14 | super().__init__(settings, path)
15 | models = self.get_setting("models", False)
16 | if models is None or len(models) == 0:
17 | self.models = ()
18 | lr = self.get_setting("last_request", False)
19 | if lr is None or time.time() - lr > 3600:
20 | self.set_setting("last_request", time.time())
21 | threading.Thread(target=self.get_models, args=()).start()
22 | else:
23 | self.models = json.loads(models)
24 |
25 | @staticmethod
26 | def get_extra_requirements() -> list:
27 | return ["ollama"]
28 |
29 | def get_extra_settings(self) -> list:
30 | default = self.models[0][1] if len(self.models) > 0 else ""
31 | settings = [
32 | ExtraSettings.EntrySetting("endpoint", _("API Endpoint"), _("API base url, change this to use interference APIs"), "http://localhost:11434"),
33 | ExtraSettings.ToggleSetting("serve", _("Automatically Serve"), _("Automatically run ollama serve in background when needed if it's not running. You can kill it with killall ollama"), False),
34 | ExtraSettings.ToggleSetting("custom_model", _("Custom Model"), _("Use a custom model"), False, update_settings=True),
35 | ]
36 | if not self.get_setting("custom_model", False):
37 | settings.append(
38 | ExtraSettings.ComboSetting(
39 | "model",
40 | _("Ollama Model"),
41 | _("Name of the Ollama Model"),
42 | self.models,
43 | default,
44 | refresh= lambda x: self.get_models(),
45 | )
46 | )
47 | else:
48 | settings.append(
49 | ExtraSettings.EntrySetting("model", _("Ollama Model"), _("Name of the Ollama Model"), default)
50 | )
51 | return settings
52 |
53 | def get_models(self):
54 | """Get the list of installed models in ollama"""
55 | if not self.is_installed():
56 | return
57 | from ollama import Client
58 | client = Client(
59 | host=self.get_setting("endpoint")
60 | )
61 | self.auto_serve(client)
62 | try:
63 | models = client.list()["models"]
64 | except Exception as e:
65 | print("Can't get Ollama models: ", e)
66 | return
67 | res = tuple()
68 | for model in models:
69 | res += ((model.model, model.model), )
70 | self.models = res
71 | self.set_setting("models", json.dumps(self.models))
72 | self.settings_update()
73 |
74 | def get_embedding(self, text: list[str]) -> np.ndarray:
75 | from ollama import Client
76 | client = Client(
77 | host=self.get_setting("endpoint")
78 | )
79 | self.auto_serve(client)
80 | arr = client.embed(model=self.get_setting("model"), input=text)
81 | return np.array(arr.embeddings)
82 |
83 | def auto_serve(self, client):
84 | """Automatically runs ollama serve on the user system if it's not running and the setting is toggles
85 |
86 | Args:
87 | client (): ollama client
88 | """
89 | if self.get_setting("serve") and can_escape_sandbox():
90 | try:
91 | client.ps()
92 | except Exception as e:
93 | Popen(get_spawn_command() + ["ollama", "serve"])
94 | time.sleep(1)
95 |
96 |
--------------------------------------------------------------------------------
/src/utility/website_scraper.py:
--------------------------------------------------------------------------------
1 | from os import wait
2 | import requests
3 | from newspaper import Article
4 |
5 | class WebsiteScraper:
6 | def __init__(self, url, fallback_word_threshold=100) -> None:
7 | self.url = url
8 | self.html = None
9 | self.article = None
10 | self.fallback_word_threshold = fallback_word_threshold
11 |
12 | def get_page_source(self):
13 | self.html = requests.get(self.url).text
14 | return self.html
15 |
16 | def set_html(self, html):
17 | self.html = html
18 |
19 | def parse_article(self):
20 | if self.article:
21 | return
22 | self.article = Article(url=self.url)
23 | if self.html is not None:
24 | self.article.set_html(self.html)
25 | else:
26 | self.article.download()
27 | self.html = self.article.html
28 |
29 | self.article.parse()
30 |
31 | def get_favicon(self):
32 | self.parse_article()
33 | if self.article is None:
34 | return ""
35 | favicon = self.article.meta_favicon
36 | if not favicon.startswith("http") and not favicon.startswith("https"):
37 | from urllib.parse import urlparse, urljoin
38 | base_url = urlparse(self.url).scheme + "://" + urlparse(self.url).netloc
39 | favicon = urljoin(base_url, favicon)
40 | return favicon
41 |
42 | def get_description(self):
43 | self.parse_article()
44 | if self.article is None:
45 | return ""
46 | return self.article.meta_description
47 |
48 | def get_title(self):
49 | self.parse_article()
50 | if self.article is None:
51 | return ""
52 | return self.article.title
53 |
54 | def get_text(self):
55 | self.parse_article()
56 | if self.article is None:
57 | return ""
58 | text = self.article.text
59 | if len(text.split()) > self.fallback_word_threshold:
60 | return self.article.text
61 | else:
62 | return self.clean_html_to_markdown(self.html)
63 |
64 | def clean_html_to_markdown(self, html_content, include_links=False):
65 | from bs4 import BeautifulSoup
66 | from markdownify import markdownify as md
67 | # Parse the HTML content
68 | soup = BeautifulSoup(html_content, 'html.parser')
69 | tags_whitelist = ['a', 'p', 'ul', 'ol', 'li', 'b', 'strong', 'i', 'em', 'table', 'tr', 'th', 'td', 'h1', 'h2', 'h3', 'h4', 'h5']
70 | if not include_links:
71 | tags_whitelist.remove("a")
72 | # Remove images
73 | for img in soup.find_all('img'):
74 | img.decompose()
75 |
76 | # Remove style and script tags
77 | for tag in soup(['style', 'script', 'iframe', 'meta', 'head']):
78 | tag.decompose()
79 |
80 | # Remove all tags except links, paragraphs, lists, bold, italic
81 | for tag in soup.find_all(True):
82 | if tag.name not in tags_whitelist:
83 | tag.unwrap()
84 |
85 | # Convert the cleaned HTML to Markdown
86 | markdown_content = md(str(soup))
87 |
88 | # Extract links and format them as a list
89 | links = []
90 | if include_links:
91 | for a_tag in soup.find_all('a', href=True):
92 | link_text = a_tag.get_text(strip=True)
93 | link_url = a_tag['href']
94 | links.append(f"- [{link_text}]({link_url})")
95 |
96 | # Join the links into a single string
97 | links_list = "\n".join(links)
98 |
99 | # Combine the Markdown content and the links list
100 | final_content = f"{markdown_content}"
101 | if len(links) > 0:
102 | final_content += "\n\n#### Link List: " + links_list
103 |
104 | return final_content
105 |
--------------------------------------------------------------------------------
/src/integrations/website_reader.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, GLib, GdkPixbuf
2 | from ..extensions import NewelleExtension
3 | from ..ui.widgets import WebsiteButton
4 | import threading
5 | from ..utility.message_chunk import get_message_chunks
6 | from ..ui import load_image_with_callback
7 | from ..utility.website_scraper import WebsiteScraper
8 |
9 | CHUNK_SIZE = 512
10 | MAX_CONTEXT = 5000
11 |
12 | class WebsiteReader(NewelleExtension):
13 | id = "website-reader"
14 | name = "Website Reader"
15 |
16 | def __init__(self, pip_path: str, extension_path: str, settings):
17 | super().__init__(pip_path, extension_path, settings)
18 | self.caches = {}
19 | def get_replace_codeblocks_langs(self) -> list:
20 | return ["website"]
21 |
22 | def preprocess_history(self, history: list, prompts: list) -> tuple[list, list]:
23 | user_message = history[-1]["Message"]
24 | lines = []
25 | for line in user_message.split("\n"):
26 | if line.startswith("#https://") or line.startswith("#http://"):
27 | # Extract just the URL without the hashtag
28 | urlline = line.split("#")[1].split()
29 | url = urlline[0]
30 | lines += ["```website", url, "```"]
31 | lines += [" ".join(urlline[1:])]
32 | else:
33 | lines += [line]
34 | history[-1]["Message"] = "\n".join(lines)
35 |
36 | # Find articles
37 | websites = []
38 | for message in history:
39 | for chunk in get_message_chunks(message["Message"]):
40 | if chunk.type == "codeblock" and chunk.lang == "website":
41 | websites.append(chunk.text)
42 | docs = []
43 | for website in websites:
44 | article = self.get_article_content(website)
45 | docs.append("-----\nSource: " + website + "\n" + article.get_text())
46 | if sum(len(doc) for doc in docs) < MAX_CONTEXT:
47 | prompts += docs
48 | elif self.rag is not None:
49 | print("Using RAG")
50 | rag_docs = ["text:" + doc for doc in docs]
51 | index = self.rag.build_index(rag_docs, CHUNK_SIZE)
52 | results = index.query(user_message)
53 | prompts += ["Content from previous websites:\n" + "\n".join(results)]
54 | else:
55 | prompts.append("Content from previous websites:\n" + "\n".join(docs)[:MAX_CONTEXT])
56 | return history, prompts
57 |
58 | def get_gtk_widget(self, codeblock: str, lang: str) -> Gtk.Widget | None:
59 | website_url = codeblock
60 |
61 | button = WebsiteButton(website_url)
62 | button.connect("clicked", self.open_website)
63 | threading.Thread(target=self.get_article, args=(button,)).start()
64 | return button
65 |
66 | def open_website(self, button: WebsiteButton):
67 | self.ui_controller.open_link(button.url, False, not self.settings.get_boolean("external-browser"))
68 |
69 | def restore_gtk_widget(self, codeblock: str, lang: str) -> Gtk.Widget | None:
70 | return super().restore_gtk_widget(codeblock, lang)
71 |
72 | def get_article_content(self, url: str):
73 | if url in self.caches:
74 | return self.caches[url]
75 | else:
76 | scraper = WebsiteScraper(url)
77 | scraper.parse_article()
78 | self.caches[url] = scraper
79 | return scraper
80 |
81 | def get_article(self, button: WebsiteButton):
82 | article = self.get_article_content(button.url)
83 | title = article.get_title()
84 | favicon = article.get_favicon()
85 | description = article.get_description()[:100]
86 | def update_button():
87 | button.title.set_text(title)
88 | button.description.set_text(description)
89 | GLib.idle_add(update_button)
90 | load_image_with_callback(favicon, lambda pixbuf_loader : button.icon.set_from_pixbuf(pixbuf_loader.get_pixbuf()))
91 |
92 |
93 |
--------------------------------------------------------------------------------
/src/ui/mini_window.py:
--------------------------------------------------------------------------------
1 | from gi.repository import Gtk, GLib, Adw, Gdk
2 |
3 | class MiniWindow(Gtk.Window):
4 | def __init__(self, application, main_window, **kwargs):
5 | super().__init__(**kwargs)
6 | self.main_window = main_window
7 | self.set_application(application)
8 | self.set_default_size(500, 100)
9 | self.set_title(_("Newelle"))
10 | self.set_decorated(False)
11 | self.add_css_class("mini-window")
12 |
13 | self.main_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
14 | self.set_child(self.main_box)
15 |
16 | self.placeholder = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
17 | self.placeholder.set_valign(Gtk.Align.CENTER)
18 | self.placeholder.set_halign(Gtk.Align.CENTER)
19 | self.placeholder.set_vexpand(True)
20 | placeholder_label = Gtk.Label(label=_("Chat is opened in mini window"))
21 | placeholder_label.add_css_class("dim-label")
22 | self.placeholder.append(placeholder_label)
23 |
24 | self.chat_panel = None
25 | if hasattr(main_window, 'secondary_message_chat_block'):
26 | self.chat_panel = main_window.secondary_message_chat_block
27 | if self.chat_panel.get_parent():
28 | self.original_parent = self.chat_panel.get_parent()
29 | self.chat_panel.unparent()
30 |
31 | for child in self.original_parent:
32 | if isinstance(child, Gtk.Box) and child != self.chat_panel:
33 | child.unparent()
34 |
35 | self.original_parent.append(self.placeholder)
36 |
37 | self.main_box.append(self.chat_panel)
38 |
39 | self.target_height = 100
40 | self.current_height = 100
41 | self.is_animating = False
42 |
43 | key_controller = Gtk.EventControllerKey()
44 | key_controller.connect('key-pressed', self._on_key_pressed)
45 | self.add_controller(key_controller)
46 | self.connect('close-request', self._on_close_request)
47 | GLib.timeout_add(100, self._check_size)
48 |
49 | def _on_key_pressed(self, controller, keyval, keycode, state):
50 | if keyval == Gdk.KEY_Escape:
51 | self.close()
52 | return True
53 | return False
54 |
55 | def _on_close_request(self, *args):
56 | if self.chat_panel:
57 | if self.chat_panel.get_parent():
58 | self.chat_panel.unparent()
59 |
60 | if self.placeholder.get_parent():
61 | self.placeholder.unparent()
62 |
63 | if hasattr(self, 'original_parent'):
64 | self.original_parent.append(self.chat_panel)
65 |
66 | self.chat_panel = None
67 | self.original_parent = None
68 |
69 | return False
70 |
71 | def _check_size(self):
72 | total_height = sum(
73 | getattr(self.main_window, block).get_allocated_height()
74 | for block in [
75 | 'chat_list_block',
76 | 'input_box',
77 | 'chat_controls_entry_block'
78 | ]
79 | if hasattr(self.main_window, block)
80 | )
81 |
82 | target = min(max(total_height, 100), 500)
83 |
84 | if abs(target - self.target_height) > 5:
85 | self.target_height = target
86 | if not self.is_animating:
87 | self.is_animating = True
88 | GLib.timeout_add(16, self._animate_size)
89 | return True
90 |
91 | def _animate_size(self):
92 | diff = self.target_height - self.current_height
93 | if abs(diff) < 1:
94 | self.current_height = self.target_height
95 | self.is_animating = False
96 | self.set_default_size(500, int(self.current_height))
97 | return False
98 |
99 | self.current_height += diff * 0.3
100 | self.set_default_size(500, int(self.current_height))
101 | return True
102 |
--------------------------------------------------------------------------------
/src/ui/widgets/terminal_dialog.py:
--------------------------------------------------------------------------------
1 | import gi
2 | from gi.repository import Gtk, Adw, GLib, Pango, Gdk
3 | import sys
4 |
5 | from gi.repository.GObject import GObject
6 | if sys.platform != 'win32':
7 | gi.require_version('Vte', '3.91')
8 | from gi.repository import Vte
9 |
10 | if sys.platform != 'win32':
11 | class Terminal(Vte.Terminal):
12 | def __init__(self, script:list):
13 | super().__init__(css_classes=["terminal"])
14 | self.set_font(Pango.FontDescription.from_string("Monospace 12"))
15 | self.set_clear_background(False)
16 | pty = Vte.Pty.new_sync(Vte.PtyFlags.DEFAULT, None)
17 | self.set_pty(pty)
18 | pty.spawn_async(
19 | GLib.get_current_dir(),
20 | script,
21 | None,
22 | GLib.SpawnFlags.DEFAULT,
23 | None,
24 | None,
25 | -1,
26 | None,
27 | None,
28 | None
29 | )
30 | key_controller = Gtk.EventControllerKey()
31 | key_controller.connect("key-pressed", self.on_key_press)
32 | self.add_controller(key_controller)
33 |
34 | def on_key_press(self, controller, keyval, keycode, state):
35 | ctrl = state & Gdk.ModifierType.CONTROL_MASK
36 | shift = state & Gdk.ModifierType.SHIFT_MASK
37 | if ctrl and keyval == Gdk.KEY_c:
38 | self.copy_clipboard()
39 | return True
40 | elif ctrl and keyval == Gdk.KEY_v:
41 | self.paste_clipboard()
42 | return True
43 | return False
44 | def get_output(self):
45 | txt = self.get_text_format(Vte.Format.TEXT)
46 | return txt
47 | else:
48 | class Terminal(Gtk.Box):
49 | def __init__(self, script:list):
50 | self.append(Gtk.Label(label="Terminal not supported"))
51 |
52 | def get_output(self):
53 | return ""
54 |
55 | class TerminalDialog(Adw.Dialog):
56 | def __init__(self, **kwargs):
57 | super().__init__(**kwargs)
58 |
59 | # Accessibility
60 | self.set_title("Terminal")
61 | self.connect("close-attempt", self.closing_terminal)
62 | self.set_can_close(False)
63 | self.output_func = lambda x: x
64 | # Toolbar View
65 | toolbar_view = Adw.ToolbarView()
66 | toolbar_view.add_css_class("osd")
67 |
68 | # Header Bar
69 | header_bar = Adw.HeaderBar()
70 | toolbar_view.add_top_bar(header_bar)
71 |
72 | # Scrolled Window
73 | self.terminal_scroller = Gtk.ScrolledWindow(
74 | propagate_natural_height=True,
75 | propagate_natural_width=True
76 | )
77 | toolbar_view.set_content(self.terminal_scroller)
78 | self.set_child(toolbar_view)
79 |
80 | def load_terminal(self, command:list[str]):
81 | self.set_terminal(Terminal(command))
82 |
83 | def save_output_func(self, output_func):
84 | self.output_func = output_func
85 |
86 | def set_terminal(self, terminal):
87 | self.terminal = terminal
88 | self.terminal_scroller.set_child(terminal)
89 |
90 | def close_window(self,dialog, response):
91 | self.set_can_close(True)
92 | self.close()
93 | if response == "save":
94 | self.output_func(self.terminal.get_output())
95 | else:
96 | self.output_func(None)
97 |
98 | def closing_terminal(self, *args):
99 | if self.get_can_close():
100 | return False
101 | dialog = Adw.AlertDialog(body="Do you want to send the output of the terminal to the LLM to get a response?\nNote: Only the visible text will be sent as response", title="Send output?")
102 | dialog.add_response("save", "Send output")
103 | dialog.add_response("close", "Discard output")
104 | dialog.set_response_appearance("close", Adw.ResponseAppearance.DESTRUCTIVE)
105 | dialog.connect("response", self.close_window)
106 | dialog.present()
107 | return True
108 |
--------------------------------------------------------------------------------
/data/images/extension.svg:
--------------------------------------------------------------------------------
1 |
34 |
--------------------------------------------------------------------------------
/src/handlers/llm/newelle_handler.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import Any, Callable
3 |
4 | from .llm import LLMHandler
5 | from ...utility.system import open_website
6 | from ...utility import convert_history_openai
7 |
8 | class NewelleAPIHandler(LLMHandler):
9 | key = "newelle"
10 | url = "https://llm.nyarchlinux.moe"
11 | api_key = "newelle"
12 | error_message = """Error calling Newelle API. Please note that Newelle API is **just for demo purposes.**\n\nTo know how to use a more reliable LLM [read our guide to llms](https://github.com/qwersyk/newelle/wiki/User-guide-to-the-available-LLMs). \n\nError: """
13 |
14 | def get_extra_settings(self) -> list:
15 | return [
16 | {
17 | "key": "privacy",
18 | "title": _("Privacy Policy"),
19 | "description": _("Open privacy policy website"),
20 | "type": "button",
21 | "icon": "internet-symbolic",
22 | "callback": lambda button: open_website("https://groq.com/privacy-policy/"),
23 | "default": True,
24 | },
25 | {
26 | "key": "streaming",
27 | "title": _("Message Streaming"),
28 | "description": _("Gradually stream message output"),
29 | "type": "toggle",
30 | "default": True,
31 | },
32 | ]
33 |
34 | def supports_vision(self) -> bool:
35 | return True
36 |
37 | def generate_text(self, prompt: str, history: list[dict[str, str]] = [], system_prompt: list[str] = []) -> str:
38 | return self.generate_text_stream(prompt, history, system_prompt)
39 |
40 | def generate_text_stream(self, prompt: str, history: list[dict[str, str]] = [], system_prompt: list[str] = [], on_update: Callable[[str], Any] = lambda _: None, extra_args : list = []) -> str:
41 | import requests
42 |
43 | if prompt.startswith("```image") or any(message.get("Message", "").startswith("```image") and message["User"] == "User" for message in history):
44 | url = self.url + "/vision"
45 | elif prompt.startswith("/chatname"):
46 | prompt = prompt.replace("/chatname", "")
47 | url = self.url + "/small"
48 | else:
49 | url = self.url
50 | history.append({"User": "User", "Message": prompt})
51 | headers = {
52 | "Authorization": f"Bearer {self.api_key}",
53 | "Content-Type": "application/json"
54 | }
55 | data = {
56 | "model": "llama",
57 | "messages": convert_history_openai(history, system_prompt, True),
58 | "stream": True
59 | }
60 |
61 | try:
62 | response = requests.post(url + "/chat/completions", headers=headers, json=data, stream=True)
63 | if response.status_code != 200:
64 | raise Exception("Rate limit reached or servers down")
65 | full_message = ""
66 | prev_message = ""
67 | for line in response.iter_lines():
68 | if line:
69 | decoded_line = line.decode('utf-8')
70 | if decoded_line.startswith("data: "):
71 | if decoded_line == "data: [DONE]":
72 | break
73 | json_data = json.loads(decoded_line[6:])
74 | if "choices" in json_data and len(json_data["choices"]) > 0:
75 | delta = json_data["choices"][0]["delta"]
76 | if "content" in delta:
77 | full_message += delta["content"]
78 | args = (full_message.strip(), ) + tuple(extra_args)
79 | if len(full_message) - len(prev_message) > 1:
80 | on_update(*args)
81 | prev_message = full_message
82 | return full_message.strip()
83 | except Exception as e:
84 | raise Exception(self.error_message + " " + str(e))
85 |
86 |
87 | def generate_chat_name(self, request_prompt:str = "") -> str | None:
88 | """Generate name of the current chat
89 |
90 | Args:
91 | request_prompt (str, optional): Extra prompt to generate the name. Defaults to None.
92 |
93 | Returns:
94 | str: name of the chat
95 | """
96 | request_prompt = "/chatname" + request_prompt
97 | return super().generate_chat_name(request_prompt)
98 |
--------------------------------------------------------------------------------