├── tests
└── __init__.py
├── elia_chat
├── __init__.py
├── database
│ ├── __init__.py
│ ├── database.py
│ ├── converters.py
│ ├── import_chatgpt.py
│ └── models.py
├── constants.py
├── launch_args.py
├── runtime_config.py
├── time_display.py
├── screens
│ ├── rename_chat_screen.py
│ ├── chat_screen.py
│ ├── chat_details.py
│ ├── home_screen.py
│ └── help_screen.py
├── locations.py
├── widgets
│ ├── agent_is_typing.py
│ ├── welcome.py
│ ├── token_analysis.py
│ ├── app_header.py
│ ├── prompt_input.py
│ ├── chat_header.py
│ ├── chat_options.py
│ ├── chat_list.py
│ ├── chat.py
│ └── chatbox.py
├── models.py
├── __main__.py
├── chats_manager.py
├── app.py
├── themes.py
├── config.py
└── elia.scss
├── .python-version
├── .vscode
└── settings.json
├── .pre-commit-config.yaml
├── pyproject.toml
├── .gitignore
├── README.md
└── LICENSE
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elia_chat/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.11
2 |
--------------------------------------------------------------------------------
/elia_chat/database/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/elia_chat/constants.py:
--------------------------------------------------------------------------------
1 | ERROR_NOTIFY_TIMEOUT_SECS = 15
2 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.analysis.include": [
3 | "elia_chat/**/*.py",
4 | ]
5 | }
6 |
--------------------------------------------------------------------------------
/elia_chat/launch_args.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 |
4 | class QuickLaunchArgs(BaseModel):
5 | launch_prompt: str
6 | launch_prompt_model_name: str
7 |
--------------------------------------------------------------------------------
/elia_chat/runtime_config.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, ConfigDict
2 |
3 | from elia_chat.config import EliaChatModel
4 |
5 |
6 | class RuntimeConfig(BaseModel):
7 | model_config = ConfigDict(frozen=True)
8 |
9 | selected_model: EliaChatModel
10 | system_prompt: str
11 |
--------------------------------------------------------------------------------
/elia_chat/time_display.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timezone
2 |
3 |
4 | def format_timestamp(dt: datetime) -> str:
5 | """Convert a datetime into the local timezone and format it.
6 |
7 | Args:
8 | dt: The datetime object to format.
9 |
10 | Returns:
11 | The string timestamp in the format "%Y-%m-%d %H:%M:%S".
12 | """
13 | local_dt = convert_to_local(dt)
14 | return local_dt.strftime("%Y-%m-%d %H:%M:%S")
15 |
16 |
17 | def convert_to_local(utc_dt: datetime) -> datetime:
18 | """Given a UTC datetime, return a datetime in the local timezone."""
19 | local_dt_now = datetime.now()
20 | local_tz = local_dt_now.astimezone().tzinfo
21 | local_dt = utc_dt.astimezone(local_tz)
22 | return local_dt
23 |
24 |
25 | def get_local_timezone():
26 | return datetime.now(timezone.utc).astimezone().tzinfo
27 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | repos:
4 | - repo: https://github.com/pre-commit/pre-commit-hooks
5 | rev: v4.4.0
6 | hooks:
7 | - id: check-added-large-files
8 | - id: check-ast
9 | - id: check-builtin-literals
10 | - id: check-case-conflict
11 | - id: check-docstring-first
12 | - id: check-merge-conflict
13 | - id: check-toml
14 | - id: debug-statements
15 | - id: end-of-file-fixer
16 | - id: forbid-new-submodules
17 | - id: mixed-line-ending
18 | - id: trailing-whitespace
19 | exclude_types: [ svg ]
20 | - repo: https://github.com/pre-commit/pygrep-hooks
21 | rev: v1.10.0
22 | hooks:
23 | - id: python-check-mock-methods
24 | - id: python-no-eval
25 | - id: python-no-log-warn
26 | - id: python-use-type-annotations
27 |
--------------------------------------------------------------------------------
/elia_chat/screens/rename_chat_screen.py:
--------------------------------------------------------------------------------
1 | from textual import on
2 | from textual.app import ComposeResult
3 | from textual.binding import Binding
4 | from textual.containers import Vertical
5 | from textual.screen import ModalScreen
6 | from textual.widgets import Input
7 |
8 |
9 | class RenameChat(ModalScreen[str]):
10 | BINDINGS = [
11 | Binding("escape", "app.pop_screen", "Cancel", key_display="esc"),
12 | Binding("enter", "app.pop_screen", "Save"),
13 | ]
14 |
15 | def compose(self) -> ComposeResult:
16 | with Vertical():
17 | title_input = Input(placeholder="Enter a title...")
18 | title_input.border_subtitle = (
19 | "[[white]enter[/]] Save [[white]esc[/]] Cancel"
20 | )
21 | yield title_input
22 |
23 | @on(Input.Submitted)
24 | def close_screen(self, event: Input.Submitted) -> None:
25 | self.dismiss(event.value)
26 |
--------------------------------------------------------------------------------
/elia_chat/locations.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from xdg_base_dirs import xdg_config_home, xdg_data_home
4 |
5 |
6 | def _elia_directory(root: Path) -> Path:
7 | directory = root / "elia"
8 | directory.mkdir(exist_ok=True, parents=True)
9 | return directory
10 |
11 |
12 | def data_directory() -> Path:
13 | """Return (possibly creating) the application data directory."""
14 | return _elia_directory(xdg_data_home())
15 |
16 |
17 | def config_directory() -> Path:
18 | """Return (possibly creating) the application config directory."""
19 | return _elia_directory(xdg_config_home())
20 |
21 |
22 | def config_file() -> Path:
23 | return config_directory() / "config.toml"
24 |
25 |
26 | def theme_directory() -> Path:
27 | """Return (possibly creating) the themes directory."""
28 | theme_dir = data_directory() / "themes"
29 | theme_dir.mkdir(exist_ok=True, parents=True)
30 | return theme_dir
31 |
--------------------------------------------------------------------------------
/elia_chat/widgets/agent_is_typing.py:
--------------------------------------------------------------------------------
1 | from textual.app import ComposeResult
2 | from textual.containers import Vertical
3 | from textual.reactive import Reactive, reactive
4 | from textual.widgets import LoadingIndicator, Label
5 |
6 |
7 | class ResponseStatus(Vertical):
8 | """
9 | A widget that displays the status of the response from the agent.
10 | """
11 |
12 | message: Reactive[str] = reactive("Agent is responding", recompose=True)
13 |
14 | def compose(self) -> ComposeResult:
15 | yield Label(f" {self.message}")
16 | yield LoadingIndicator()
17 |
18 | def set_awaiting_response(self) -> None:
19 | self.message = "Awaiting response"
20 | self.add_class("-awaiting-response")
21 | self.remove_class("-agent-responding")
22 |
23 | def set_agent_responding(self) -> None:
24 | self.message = "Agent is responding"
25 | self.add_class("-agent-responding")
26 | self.remove_class("-awaiting-response")
27 |
--------------------------------------------------------------------------------
/elia_chat/database/database.py:
--------------------------------------------------------------------------------
1 | from contextlib import asynccontextmanager
2 | from typing import AsyncGenerator
3 | from sqlmodel import SQLModel
4 | from elia_chat.locations import data_directory
5 |
6 | from sqlmodel.ext.asyncio.session import AsyncSession
7 | from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker
8 |
9 |
10 | sqlite_file_name = data_directory() / "elia.sqlite"
11 | sqlite_url = f"sqlite+aiosqlite:///{sqlite_file_name}"
12 | engine = create_async_engine(sqlite_url)
13 |
14 |
15 | async def create_database():
16 | async with engine.begin() as conn:
17 | # TODO - check if exists, use Alembic.
18 | await conn.run_sync(SQLModel.metadata.create_all)
19 |
20 |
21 | @asynccontextmanager
22 | async def get_session() -> AsyncGenerator[AsyncSession, None]:
23 | async_session = async_sessionmaker(
24 | engine, class_=AsyncSession, expire_on_commit=False
25 | )
26 | async with async_session() as session:
27 | yield session
28 |
--------------------------------------------------------------------------------
/elia_chat/widgets/welcome.py:
--------------------------------------------------------------------------------
1 | """Show a welcome box on the home page when the user has
2 | no chat history.
3 | """
4 |
5 | from rich.console import RenderableType
6 | from textual.widgets import Static
7 |
8 |
9 | class Welcome(Static):
10 | MESSAGE = """
11 | To get started, type a message in the box at the top of the
12 | screen and press [b u]ctrl+j[/] or [b u]alt+enter[/] to send it.
13 |
14 | Change the model and system prompt by pressing [b u]ctrl+o[/].
15 |
16 | Make sure you've set any required API keys first (e.g. [b]OPENAI_API_KEY[/])!
17 |
18 | If you have any issues or feedback, please let me know [@click='open_issues'][b r]on GitHub[/][/]!
19 |
20 | Finally, please consider starring the repo and sharing it with your friends and colleagues!
21 |
22 | [@click='open_repo'][b r]https://github.com/darrenburns/elia[/][/]
23 | """
24 |
25 | BORDER_TITLE = "Welcome to Elia!"
26 |
27 | def render(self) -> RenderableType:
28 | return self.MESSAGE
29 |
30 | def _action_open_repo(self) -> None:
31 | import webbrowser
32 |
33 | webbrowser.open("https://github.com/darrenburns/elia")
34 |
35 | def _action_open_issues(self) -> None:
36 | import webbrowser
37 |
38 | webbrowser.open("https://github.com/darrenburns/elia/issues")
39 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "elia_chat"
3 | version = "1.10.0"
4 | description = "A powerful terminal user interface for interacting with large language models."
5 | authors = [
6 | { name = "Darren Burns", email = "darrenb900@gmail.com" }
7 | ]
8 | dependencies = [
9 | "textual[syntax]==0.79.1",
10 | "sqlmodel>=0.0.9",
11 | "humanize>=4.6.0",
12 | "click>=8.1.6",
13 | "xdg-base-dirs>=6.0.1",
14 | "aiosqlite>=0.20.0",
15 | "click-default-group>=1.2.4",
16 | "greenlet>=3.0.3",
17 | "google-generativeai>=0.5.3",
18 | "pyperclip>=1.8.2",
19 | "litellm>=1.37.19",
20 | "pydantic>=2.9.0",
21 | ]
22 | readme = "README.md"
23 | requires-python = ">= 3.11"
24 |
25 | [project.scripts]
26 | elia = "elia_chat.__main__:cli"
27 |
28 | [build-system]
29 | requires = ["hatchling"]
30 | build-backend = "hatchling.build"
31 |
32 | [tool.uv]
33 | managed = true
34 | dev-dependencies = [
35 | "black>=23.3.0",
36 | "mypy>=1.3.0",
37 | "types-peewee>=3.16.0.0",
38 | "pre-commit>=3.3.2",
39 | "textual-dev>=1.0.1",
40 | "pyinstrument>=4.6.2",
41 | ]
42 |
43 | [tool.uv.sources]
44 | textual = { path = "../textual", editable = true }
45 |
46 | [tool.mypy]
47 | ignore_missing_imports = true
48 |
49 | [tool.hatch.metadata]
50 | allow-direct-references = true
51 |
52 | [tool.hatch.build.targets.wheel]
53 | packages = ["elia_chat"]
54 |
--------------------------------------------------------------------------------
/elia_chat/widgets/token_analysis.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass
4 | from itertools import cycle
5 |
6 | from rich.console import RenderableType, Console, ConsoleOptions, RenderResult
7 | from rich.style import Style
8 | from rich.text import Text
9 | from textual.widget import Widget
10 | from tiktoken import Encoding
11 |
12 |
13 | @dataclass
14 | class TokenAnalysisRenderable:
15 | tokens: list[int]
16 | encoder: Encoding
17 |
18 | def __post_init__(self):
19 | self.parts = self.encoder.decode_tokens_bytes(self.tokens)
20 |
21 | def __rich_console__(
22 | self, console: Console, options: ConsoleOptions
23 | ) -> RenderResult:
24 | colours = cycle([Style.parse("red"), Style.parse("green"), Style.parse("blue")])
25 | parts = [Text(part.decode("utf-8"), next(colours)) for part in self.parts]
26 | text = Text("").join(parts)
27 | lines = text.wrap(console, width=options.max_width)
28 | yield lines
29 |
30 |
31 | class TokenAnalysis(Widget):
32 | def __init__(
33 | self,
34 | tokens: list[int],
35 | encoder: Encoding,
36 | name: str | None = None,
37 | id: str | None = None,
38 | classes: str | None = None,
39 | disabled: bool = False,
40 | ) -> None:
41 | super().__init__(
42 | name=name,
43 | id=id,
44 | classes=classes,
45 | disabled=disabled,
46 | )
47 | self.tokens = tokens
48 | self.encoder = encoder
49 |
50 | def render(self) -> RenderableType:
51 | return TokenAnalysisRenderable(self.tokens, self.encoder)
52 |
--------------------------------------------------------------------------------
/elia_chat/database/converters.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING, Any
2 |
3 |
4 | from elia_chat.database.models import ChatDao, MessageDao
5 | from elia_chat.models import ChatData, ChatMessage, get_model
6 |
7 | if TYPE_CHECKING:
8 | from litellm.types.completion import ChatCompletionUserMessageParam
9 |
10 |
11 | def chat_message_to_message_dao(
12 | message: ChatMessage,
13 | chat_id: int,
14 | ) -> MessageDao:
15 | """Convert a ChatMessage to a SQLModel message."""
16 | meta: dict[str, Any] = {}
17 | content = message.message.get("content", "")
18 | return MessageDao(
19 | chat_id=chat_id,
20 | role=message.message["role"],
21 | content=content if isinstance(content, str) else "",
22 | timestamp=message.timestamp,
23 | model=message.model.lookup_key,
24 | meta=meta,
25 | )
26 |
27 |
28 | def chat_dao_to_chat_data(chat_dao: ChatDao) -> ChatData:
29 | """Convert the SQLModel chat to a ChatData."""
30 | model = chat_dao.model
31 | return ChatData(
32 | id=chat_dao.id,
33 | title=chat_dao.title,
34 | model=get_model(model),
35 | create_timestamp=chat_dao.started_at if chat_dao.started_at else None,
36 | messages=[
37 | message_dao_to_chat_message(message, model) for message in chat_dao.messages
38 | ],
39 | )
40 |
41 |
42 | def message_dao_to_chat_message(message_dao: MessageDao, model: str) -> ChatMessage:
43 | """Convert the SQLModel message to a ChatMessage."""
44 | message: ChatCompletionUserMessageParam = {
45 | "content": message_dao.content,
46 | "role": message_dao.role, # type: ignore
47 | }
48 |
49 | return ChatMessage(
50 | message=message,
51 | timestamp=message_dao.timestamp,
52 | model=get_model(model),
53 | )
54 |
--------------------------------------------------------------------------------
/elia_chat/widgets/app_header.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING, cast
2 | from importlib.metadata import version
3 | from rich.markup import escape
4 | from textual.app import ComposeResult
5 | from textual.containers import Horizontal, Vertical
6 | from textual.signal import Signal
7 | from textual.widget import Widget
8 | from textual.widgets import Label
9 |
10 | from rich.text import Text
11 | from elia_chat.config import EliaChatModel
12 | from elia_chat.models import get_model
13 | from elia_chat.runtime_config import RuntimeConfig
14 |
15 |
16 | if TYPE_CHECKING:
17 | from elia_chat.app import Elia
18 |
19 |
20 | class AppHeader(Widget):
21 | COMPONENT_CLASSES = {"app-title", "app-subtitle"}
22 |
23 | def __init__(
24 | self,
25 | config_signal: Signal[RuntimeConfig],
26 | name: str | None = None,
27 | id: str | None = None,
28 | classes: str | None = None,
29 | disabled: bool = False,
30 | ) -> None:
31 | super().__init__(name=name, id=id, classes=classes, disabled=disabled)
32 | self.config_signal: Signal[RuntimeConfig] = config_signal
33 | self.elia = cast("Elia", self.app)
34 |
35 | def on_mount(self) -> None:
36 | def on_config_change(config: RuntimeConfig) -> None:
37 | self._update_selected_model(config.selected_model)
38 |
39 | self.config_signal.subscribe(self, on_config_change)
40 |
41 | def compose(self) -> ComposeResult:
42 | with Horizontal():
43 | with Vertical(id="cl-header-container"):
44 | yield Label(
45 | Text("Elia") + Text(" v" + version("elia-chat"), style="dim"),
46 | id="elia-title",
47 | )
48 | model_name_or_id = (
49 | self.elia.runtime_config.selected_model.id
50 | or self.elia.runtime_config.selected_model.name
51 | )
52 | model = get_model(model_name_or_id, self.elia.launch_config)
53 | yield Label(self._get_selected_model_link_text(model), id="model-label")
54 |
55 | def _get_selected_model_link_text(self, model: EliaChatModel) -> str:
56 | return f"[@click=screen.options]{escape(model.display_name or model.name)}[/]"
57 |
58 | def _update_selected_model(self, model: EliaChatModel) -> None:
59 | print(self.elia.runtime_config)
60 | model_label = self.query_one("#model-label", Label)
61 | model_label.update(self._get_selected_model_link_text(model))
62 |
--------------------------------------------------------------------------------
/elia_chat/screens/chat_screen.py:
--------------------------------------------------------------------------------
1 | from textual import on, log
2 | from textual.app import ComposeResult
3 | from textual.binding import Binding
4 | from textual.screen import Screen
5 | from textual.widgets import Footer
6 |
7 | from elia_chat.chats_manager import ChatsManager
8 | from elia_chat.widgets.agent_is_typing import ResponseStatus
9 | from elia_chat.widgets.chat import Chat
10 | from elia_chat.models import ChatData
11 |
12 |
13 | class ChatScreen(Screen[None]):
14 | AUTO_FOCUS = "ChatPromptInput"
15 | BINDINGS = [
16 | Binding(
17 | key="escape",
18 | action="app.focus('prompt')",
19 | description="Focus prompt",
20 | key_display="esc",
21 | tooltip="Return focus to the prompt input.",
22 | ),
23 | ]
24 |
25 | def __init__(
26 | self,
27 | chat_data: ChatData,
28 | ):
29 | super().__init__()
30 | self.chat_data = chat_data
31 | self.chats_manager = ChatsManager()
32 |
33 | def compose(self) -> ComposeResult:
34 | yield Chat(self.chat_data)
35 | yield Footer()
36 |
37 | @on(Chat.NewUserMessage)
38 | def new_user_message(self, event: Chat.NewUserMessage) -> None:
39 | """Handle a new user message."""
40 | self.query_one(Chat).allow_input_submit = False
41 | response_status = self.query_one(ResponseStatus)
42 | response_status.set_awaiting_response()
43 | response_status.display = True
44 |
45 | @on(Chat.AgentResponseStarted)
46 | def start_awaiting_response(self) -> None:
47 | """Prevent sending messages because the agent is typing."""
48 | response_status = self.query_one(ResponseStatus)
49 | response_status.set_agent_responding()
50 | response_status.display = True
51 |
52 | @on(Chat.AgentResponseComplete)
53 | async def agent_response_complete(self, event: Chat.AgentResponseComplete) -> None:
54 | """Allow the user to send messages again."""
55 | self.query_one(ResponseStatus).display = False
56 | self.query_one(Chat).allow_input_submit = True
57 | log.debug(
58 | f"Agent response complete. Adding message "
59 | f"to chat_id {event.chat_id!r}: {event.message}"
60 | )
61 | if self.chat_data.id is None:
62 | raise RuntimeError("Chat has no ID. This is likely a bug in Elia.")
63 |
64 | await self.chats_manager.add_message_to_chat(
65 | chat_id=self.chat_data.id, message=event.message
66 | )
67 |
--------------------------------------------------------------------------------
/elia_chat/models.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass
4 | from datetime import UTC, datetime
5 | from typing import TYPE_CHECKING
6 |
7 |
8 | from elia_chat.config import LaunchConfig, EliaChatModel
9 |
10 | from textual._context import active_app
11 |
12 | if TYPE_CHECKING:
13 | from litellm.types.completion import ChatCompletionMessageParam
14 |
15 |
16 | class UnknownModel(EliaChatModel):
17 | pass
18 |
19 |
20 | def get_model(
21 | model_id_or_name: str, config: LaunchConfig | None = None
22 | ) -> EliaChatModel:
23 | """Given the id or name of a model as a string, return the EliaChatModel.
24 |
25 | Models are looked up by ID first.
26 | """
27 | if config is None:
28 | config = active_app.get().launch_config
29 | try:
30 | return {model.id: model for model in config.all_models}[model_id_or_name]
31 | except KeyError:
32 | try:
33 | return {model.name: model for model in config.all_models}[model_id_or_name]
34 | except KeyError:
35 | pass
36 | return UnknownModel(id="unknown", name="unknown model")
37 |
38 |
39 | @dataclass
40 | class ChatMessage:
41 | message: ChatCompletionMessageParam
42 | timestamp: datetime | None
43 | model: EliaChatModel
44 |
45 |
46 | @dataclass
47 | class ChatData:
48 | id: int | None # Can be None before the chat gets assigned ID from database.
49 | model: EliaChatModel
50 | title: str | None
51 | create_timestamp: datetime | None
52 | messages: list[ChatMessage]
53 |
54 | @property
55 | def short_preview(self) -> str:
56 | first_message = self.first_user_message.message
57 |
58 | if "content" in first_message:
59 | first_message = first_message["content"]
60 | # We have content, but it's not guaranteed to be a string quite yet.
61 | # In the case of tool calls or image generation requests, we can
62 | # have non-string types here. We're not handling/considering this atm.
63 | if first_message and isinstance(first_message, str):
64 | if len(first_message) > 77:
65 | return first_message[:77] + "..."
66 | else:
67 | return first_message
68 |
69 | return ""
70 |
71 | @property
72 | def system_prompt(self) -> ChatMessage:
73 | return self.messages[0]
74 |
75 | @property
76 | def first_user_message(self) -> ChatMessage:
77 | return self.messages[1]
78 |
79 | @property
80 | def non_system_messages(
81 | self,
82 | ) -> list[ChatMessage]:
83 | return self.messages[1:]
84 |
85 | @property
86 | def update_time(self) -> datetime:
87 | message_timestamp = self.messages[-1].timestamp
88 | return message_timestamp.astimezone().replace(tzinfo=UTC)
89 |
--------------------------------------------------------------------------------
/elia_chat/widgets/prompt_input.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from textual import events, on
3 | from textual.binding import Binding
4 | from textual.reactive import reactive
5 | from textual.widgets import TextArea
6 | from textual.message import Message
7 |
8 |
9 | class PromptInput(TextArea):
10 | @dataclass
11 | class PromptSubmitted(Message):
12 | text: str
13 | prompt_input: "PromptInput"
14 |
15 | @dataclass
16 | class CursorEscapingTop(Message):
17 | pass
18 |
19 | @dataclass
20 | class CursorEscapingBottom(Message):
21 | pass
22 |
23 | BINDINGS = [
24 | Binding("ctrl+j,alt+enter", "submit_prompt", "Send message", key_display="^j")
25 | ]
26 |
27 | submit_ready = reactive(True)
28 |
29 | def __init__(
30 | self,
31 | name: str | None = None,
32 | id: str | None = None,
33 | classes: str | None = None,
34 | disabled: bool = False,
35 | ) -> None:
36 | super().__init__(
37 | name=name, id=id, classes=classes, disabled=disabled, language="markdown"
38 | )
39 |
40 | def on_key(self, event: events.Key) -> None:
41 | if self.cursor_location == (0, 0) and event.key == "up":
42 | event.prevent_default()
43 | self.post_message(self.CursorEscapingTop())
44 | event.stop()
45 | elif self.cursor_at_end_of_text and event.key == "down":
46 | event.prevent_default()
47 | self.post_message(self.CursorEscapingBottom())
48 | event.stop()
49 |
50 | def watch_submit_ready(self, submit_ready: bool) -> None:
51 | self.set_class(not submit_ready, "-submit-blocked")
52 |
53 | def on_mount(self):
54 | self.border_title = "Enter your [u]m[/]essage..."
55 |
56 | @on(TextArea.Changed)
57 | async def prompt_changed(self, event: TextArea.Changed) -> None:
58 | text_area = event.text_area
59 | if text_area.text.strip() != "":
60 | text_area.border_subtitle = "[[white]^j[/]] Send message"
61 | else:
62 | text_area.border_subtitle = None
63 |
64 | text_area.set_class(text_area.wrapped_document.height > 1, "multiline")
65 |
66 | # TODO - when the height of the textarea changes
67 | # things don't appear to refresh correctly.
68 | # I think this may be a Textual bug.
69 | # The refresh below should not be required.
70 | self.parent.refresh()
71 |
72 | def action_submit_prompt(self) -> None:
73 | if self.text.strip() == "":
74 | self.notify("Cannot send empty message!")
75 | return
76 |
77 | if self.submit_ready:
78 | message = self.PromptSubmitted(self.text, prompt_input=self)
79 | self.clear()
80 | self.post_message(message)
81 | else:
82 | self.app.bell()
83 | self.notify("Please wait for response to complete.")
84 |
--------------------------------------------------------------------------------
/elia_chat/widgets/chat_header.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from dataclasses import dataclass
3 |
4 | from rich.console import ConsoleRenderable, RichCast
5 | from rich.markup import escape
6 |
7 | from textual.app import ComposeResult
8 | from textual.message import Message
9 | from textual.widget import Widget
10 | from textual.widgets import Static
11 |
12 | from elia_chat.config import EliaChatModel
13 | from elia_chat.models import ChatData
14 | from elia_chat.screens.rename_chat_screen import RenameChat
15 |
16 |
17 | class TitleStatic(Static):
18 | @dataclass
19 | class ChatRenamed(Message):
20 | chat_id: int
21 | new_title: str
22 |
23 | def __init__(
24 | self,
25 | chat_id: int,
26 | renderable: ConsoleRenderable | RichCast | str = "",
27 | *,
28 | expand: bool = False,
29 | shrink: bool = False,
30 | markup: bool = True,
31 | name: str | None = None,
32 | id: str | None = None,
33 | classes: str | None = None,
34 | disabled: bool = False,
35 | ) -> None:
36 | super().__init__(
37 | renderable,
38 | expand=expand,
39 | shrink=shrink,
40 | markup=markup,
41 | name=name,
42 | id=id,
43 | classes=classes,
44 | disabled=disabled,
45 | )
46 | self.chat_id = chat_id
47 |
48 | def begin_rename(self) -> None:
49 | self.app.push_screen(RenameChat(), callback=self.request_chat_rename)
50 |
51 | def action_rename_chat(self) -> None:
52 | self.begin_rename()
53 |
54 | async def request_chat_rename(self, new_title: str) -> None:
55 | self.post_message(self.ChatRenamed(self.chat_id, new_title))
56 |
57 |
58 | class ChatHeader(Widget):
59 | def __init__(
60 | self,
61 | chat: ChatData,
62 | model: EliaChatModel,
63 | name: str | None = None,
64 | id: str | None = None,
65 | classes: str | None = None,
66 | disabled: bool = False,
67 | ) -> None:
68 | super().__init__(name=name, id=id, classes=classes, disabled=disabled)
69 | self.chat = chat
70 | self.model = model
71 |
72 | def update_header(self, chat: ChatData, model: EliaChatModel):
73 | self.chat = chat
74 | self.model = model
75 |
76 | model_static = self.query_one("#model-static", Static)
77 | title_static = self.query_one("#title-static", Static)
78 |
79 | model_static.update(self.model_static_content())
80 | title_static.update(self.title_static_content())
81 |
82 | def title_static_content(self) -> str:
83 | chat = self.chat
84 | content = escape(chat.title or chat.short_preview) if chat else "Empty chat"
85 | return f"[@click=rename_chat]{content}[/]"
86 |
87 | def model_static_content(self) -> str:
88 | model = self.model
89 | return escape(model.display_name or model.name) if model else "Unknown model"
90 |
91 | def compose(self) -> ComposeResult:
92 | yield TitleStatic(self.chat.id, self.title_static_content(), id="title-static")
93 | yield Static(self.model_static_content(), id="model-static")
94 |
--------------------------------------------------------------------------------
/elia_chat/database/import_chatgpt.py:
--------------------------------------------------------------------------------
1 | import json
2 | from datetime import datetime
3 | from pathlib import Path
4 |
5 | from rich.console import Console
6 | from rich.live import Live
7 | from rich.text import Text
8 |
9 | from elia_chat.database.database import get_session
10 | from elia_chat.database.models import MessageDao, ChatDao
11 |
12 |
13 | async def import_chatgpt_data(file: Path) -> None:
14 | console = Console()
15 |
16 | with open(file, "r") as f:
17 | data = json.load(f)
18 |
19 | console.print("[green]Loaded and parsed JSON.")
20 |
21 | def output_progress(
22 | imported_count: int, total_count: int, message_count: int
23 | ) -> Text:
24 | style = "green" if imported_count == total_count else "yellow"
25 | return Text.from_markup(
26 | f"Imported [b]{imported_count}[/] of [b]{total_count}[/] chats.\n"
27 | f"[b]{message_count}[/] messages in total.",
28 | style=style,
29 | )
30 |
31 | message_count = 0
32 | with Live(output_progress(0, len(data), message_count)) as live:
33 | async with get_session() as session:
34 | for chat_number, chat_data in enumerate(data, start=1):
35 | chat = ChatDao(
36 | title=chat_data.get("title"),
37 | model="gpt-3.5-turbo",
38 | started_at=datetime.fromtimestamp(
39 | chat_data.get("create_time", 0) or 0
40 | ),
41 | )
42 | session.add(chat)
43 | await (
44 | session.commit()
45 | ) # Make sure to commit so that chat.id is assigned
46 |
47 | for _message_id, message_data in chat_data["mapping"].items():
48 | message_info = message_data.get("message")
49 | if message_info:
50 | metadata = message_info.get("metadata", {})
51 | model = "gpt-3.5-turbo"
52 | if metadata:
53 | model = metadata.get("model_slug")
54 | chat.model = (
55 | "gpt-4-turbo" if model == "gpt-4" else "gpt-3.5-turbo"
56 | )
57 | session.add(chat)
58 | await session.commit()
59 |
60 | role = message_info["author"]["role"]
61 | chat_id = chat.id
62 | message = MessageDao(
63 | chat_id=chat_id,
64 | role=role,
65 | content=str(message_info["content"].get("parts", [""])[0]),
66 | timestamp=datetime.fromtimestamp(
67 | message_info.get("create_time", 0) or 0
68 | ),
69 | model=model,
70 | meta=metadata,
71 | )
72 | session.add(message)
73 | message_count += 1
74 | live.update(
75 | output_progress(chat_number, len(data), message_count)
76 | )
77 |
78 | await session.commit()
79 |
80 |
81 | if __name__ == "__main__":
82 | path = Path("resources/conversations.json")
83 | import asyncio
84 |
85 | asyncio.run(import_chatgpt_data(path))
86 |
--------------------------------------------------------------------------------
/elia_chat/screens/chat_details.py:
--------------------------------------------------------------------------------
1 | from datetime import timezone
2 | from typing import TYPE_CHECKING, cast
3 | import humanize
4 | from textual.app import ComposeResult
5 | from textual.binding import Binding
6 | from textual.containers import Horizontal, Vertical, VerticalScroll
7 | from textual.screen import ModalScreen
8 | from textual.widgets import Label, Markdown, Rule
9 |
10 | from elia_chat.models import ChatData
11 |
12 | if TYPE_CHECKING:
13 | from elia_chat.app import Elia
14 |
15 |
16 | class ChatDetails(ModalScreen[None]):
17 | BINDINGS = [
18 | Binding(
19 | "escape",
20 | "app.pop_screen",
21 | "Close",
22 | )
23 | ]
24 |
25 | def __init__(
26 | self,
27 | chat: ChatData,
28 | name: str | None = None,
29 | id: str | None = None,
30 | classes: str | None = None,
31 | ) -> None:
32 | super().__init__(name, id, classes)
33 | self.chat = chat
34 | self.elia = cast("Elia", self.app)
35 |
36 | def compose(self) -> ComposeResult:
37 | chat = self.chat
38 | with Vertical(id="container") as vs:
39 | vs.border_title = "Chat details"
40 | vs.border_subtitle = "(read only)"
41 | with Horizontal():
42 | with VerticalScroll(id="left"):
43 | content = chat.system_prompt.message.get("content", "")
44 | if isinstance(content, str):
45 | yield Label("System prompt", classes="heading")
46 | yield Markdown(content)
47 |
48 | yield Rule(orientation="vertical")
49 |
50 | with VerticalScroll(id="right"):
51 | yield Label("Identifier", classes="heading")
52 | yield Label(str(chat.id) or "Unknown", classes="datum")
53 |
54 | yield Rule()
55 |
56 | model = chat.model
57 | yield Label("Model information", classes="heading")
58 | yield Label(model.name, classes="datum")
59 |
60 | if display_name := model.display_name:
61 | yield Label(display_name, classes="datum")
62 | if provider := model.provider:
63 | yield Label(provider, classes="datum")
64 |
65 | yield Rule()
66 |
67 | yield Label("First message", classes="heading")
68 | if chat.create_timestamp:
69 | create_timestamp = chat.create_timestamp.replace(
70 | tzinfo=timezone.utc
71 | )
72 | yield Label(
73 | f"{humanize.naturaltime(create_timestamp)}",
74 | classes="datum",
75 | )
76 | else:
77 | yield Label("N/A")
78 |
79 | yield Rule()
80 |
81 | update_time = chat.update_time.replace(tzinfo=timezone.utc)
82 | yield Label("Updated at", classes="heading")
83 | if update_time:
84 | yield Label(
85 | f"{humanize.naturaltime(chat.update_time)}",
86 | classes="datum",
87 | )
88 | else:
89 | yield Label("N/A")
90 |
91 | yield Rule()
92 |
93 | yield Label("Message count", classes="heading")
94 | yield Label(str(len(chat.messages) - 1), classes="datum")
95 |
--------------------------------------------------------------------------------
/elia_chat/__main__.py:
--------------------------------------------------------------------------------
1 | """
2 | Elia CLI
3 | """
4 |
5 | import asyncio
6 | import pathlib
7 | from textwrap import dedent
8 | import tomllib
9 | from typing import Any
10 |
11 | import click
12 | from click_default_group import DefaultGroup
13 |
14 | from rich.console import Console
15 |
16 | from elia_chat.app import Elia
17 | from elia_chat.config import LaunchConfig
18 | from elia_chat.database.import_chatgpt import import_chatgpt_data
19 | from elia_chat.database.database import create_database, sqlite_file_name
20 | from elia_chat.locations import config_file
21 |
22 | console = Console()
23 |
24 | def create_db_if_not_exists() -> None:
25 | if not sqlite_file_name.exists():
26 | click.echo(f"Creating database at {sqlite_file_name!r}")
27 | asyncio.run(create_database())
28 |
29 | def load_or_create_config_file() -> dict[str, Any]:
30 | config = config_file()
31 |
32 | try:
33 | file_config = tomllib.loads(config.read_text())
34 | except FileNotFoundError:
35 | file_config = {}
36 | try:
37 | config.touch()
38 | except OSError:
39 | pass
40 |
41 | return file_config
42 |
43 | @click.group(cls=DefaultGroup, default="default", default_if_no_args=True)
44 | def cli() -> None:
45 | """Interact with large language models using your terminal."""
46 |
47 | @cli.command()
48 | @click.argument("prompt", nargs=-1, type=str, required=False)
49 | @click.option(
50 | "-m",
51 | "--model",
52 | type=str,
53 | default="",
54 | help="The model to use for the chat",
55 | )
56 | @click.option(
57 | "-i",
58 | "--inline",
59 | is_flag=True,
60 | help="Run in inline mode, without launching full TUI.",
61 | default=False,
62 | )
63 | def default(prompt: tuple[str, ...], model: str, inline: bool) -> None:
64 | prompt = prompt or ("",)
65 | joined_prompt = " ".join(prompt)
66 | create_db_if_not_exists()
67 | file_config = load_or_create_config_file()
68 | cli_config = {}
69 | if model:
70 | cli_config["default_model"] = model
71 |
72 | launch_config: dict[str, Any] = {**file_config, **cli_config}
73 | app = Elia(LaunchConfig(**launch_config), startup_prompt=joined_prompt)
74 | app.run(inline=inline)
75 |
76 | @cli.command()
77 | def reset() -> None:
78 | """
79 | Reset the database
80 |
81 | This command will delete the database file and recreate it.
82 | Previously saved conversations and data will be lost.
83 | """
84 | from rich.padding import Padding
85 | from rich.text import Text
86 |
87 | console.print(
88 | Padding(
89 | Text.from_markup(
90 | dedent(f"""\
91 | [u b red]Warning![/]
92 |
93 | [b red]This will delete all messages and chats.[/]
94 |
95 | You may wish to create a backup of \
96 | "[bold blue u]{str(sqlite_file_name.resolve().absolute())}[/]" before continuing.
97 | """)
98 | ),
99 | pad=(1, 2),
100 | )
101 | )
102 | if click.confirm("Delete all chats?", abort=True):
103 | sqlite_file_name.unlink(missing_ok=True)
104 | asyncio.run(create_database())
105 | console.print(f"♻️ Database reset @ {sqlite_file_name}")
106 |
107 | @cli.command("import")
108 | @click.argument(
109 | "file",
110 | type=click.Path(
111 | exists=True, dir_okay=False, path_type=pathlib.Path, resolve_path=True
112 | ),
113 | )
114 | def import_file_to_db(file: pathlib.Path) -> None:
115 | """
116 | Import ChatGPT Conversations
117 |
118 | This command will import the ChatGPT conversations from a local
119 | JSON file into the database.
120 | """
121 | asyncio.run(import_chatgpt_data(file=file))
122 | console.print(f"[green]ChatGPT data imported from {str(file)!r}")
123 |
124 | if __name__ == "__main__":
125 | cli()
126 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 | elia.sqlite
163 | **/*.pyc
164 | resources/conversations.json
165 |
--------------------------------------------------------------------------------
/elia_chat/database/models.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import Any, Optional
3 |
4 | from sqlalchemy import Column, DateTime, func, JSON, desc
5 | from sqlalchemy.ext.asyncio import AsyncAttrs
6 | from sqlalchemy.orm import selectinload
7 | from sqlmodel import Field, Relationship, SQLModel, select
8 |
9 | from elia_chat.database.database import get_session
10 |
11 |
12 | class SystemPromptsDao(AsyncAttrs, SQLModel, table=True):
13 | __tablename__ = "system_prompt"
14 |
15 | id: int | None = Field(default=None, primary_key=True)
16 | title: str
17 | prompt: str
18 | created_at: datetime | None = Field(
19 | sa_column=Column(DateTime(), server_default=func.now())
20 | )
21 |
22 |
23 | class MessageDao(AsyncAttrs, SQLModel, table=True):
24 | __tablename__ = "message"
25 |
26 | id: int | None = Field(default=None, primary_key=True)
27 | chat_id: Optional[int] = Field(foreign_key="chat.id")
28 | chat: Optional["ChatDao"] = Relationship(back_populates="messages")
29 | role: str
30 | content: str
31 | timestamp: datetime | None = Field(
32 | sa_column=Column(DateTime(), server_default=func.now())
33 | )
34 | meta: dict[Any, Any] = Field(sa_column=Column(JSON), default={})
35 | parent_id: Optional[int] = Field(
36 | foreign_key="message.id", default=None, nullable=True
37 | )
38 | parent: Optional["MessageDao"] = Relationship(
39 | back_populates="replies",
40 | sa_relationship_kwargs={"remote_side": "MessageDao.id"},
41 | )
42 | """The message this message is responding to."""
43 | replies: list["MessageDao"] = Relationship(back_populates="parent")
44 | """The replies to this message
45 | (could be multiple replies e.g. from different models).
46 | """
47 | model: str | None
48 | """The model that wrote this response. (Could switch models mid-chat, possibly)"""
49 |
50 |
51 | class ChatDao(AsyncAttrs, SQLModel, table=True):
52 | __tablename__ = "chat"
53 |
54 | id: int = Field(default=None, primary_key=True)
55 | model: str
56 | title: str | None
57 | started_at: datetime | None = Field(
58 | sa_column=Column(DateTime(), server_default=func.now())
59 | )
60 | messages: list[MessageDao] = Relationship(back_populates="chat")
61 | archived: bool = Field(default=False)
62 |
63 | @staticmethod
64 | async def all() -> list["ChatDao"]:
65 | async with get_session() as session:
66 | # Create a subquery that finds the maximum
67 | # (most recent) timestamp for each chat.
68 | max_timestamp: Any = func.max(MessageDao.timestamp).label("max_timestamp")
69 | subquery = (
70 | select(MessageDao.chat_id, max_timestamp)
71 | .group_by(MessageDao.chat_id)
72 | .alias("subquery")
73 | )
74 |
75 | statement = (
76 | select(ChatDao)
77 | .join(subquery, subquery.c.chat_id == ChatDao.id)
78 | .where(ChatDao.archived == False) # noqa: E712
79 | .order_by(desc(subquery.c.max_timestamp))
80 | .options(selectinload(ChatDao.messages))
81 | )
82 | results = await session.exec(statement)
83 | return list(results)
84 |
85 | @staticmethod
86 | async def from_id(chat_id: int) -> "ChatDao":
87 | async with get_session() as session:
88 | statement = (
89 | select(ChatDao)
90 | .where(ChatDao.id == int(chat_id))
91 | .options(selectinload(ChatDao.messages))
92 | )
93 | result = await session.exec(statement)
94 | return result.one()
95 |
96 | @staticmethod
97 | async def rename_chat(chat_id: int, new_title: str) -> None:
98 | async with get_session() as session:
99 | chat = await ChatDao.from_id(chat_id)
100 | chat.title = new_title
101 | session.add(chat)
102 | await session.commit()
103 |
--------------------------------------------------------------------------------
/elia_chat/chats_manager.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass
4 | import datetime
5 |
6 | from sqlmodel import select
7 | from textual import log
8 |
9 | from elia_chat.database.converters import (
10 | chat_dao_to_chat_data,
11 | chat_message_to_message_dao,
12 | message_dao_to_chat_message,
13 | )
14 | from elia_chat.database.database import get_session
15 | from elia_chat.database.models import ChatDao, MessageDao
16 | from elia_chat.models import ChatData, ChatMessage
17 |
18 |
19 | @dataclass
20 | class ChatsManager:
21 | @staticmethod
22 | async def all_chats() -> list[ChatData]:
23 | chat_daos = await ChatDao.all()
24 | return [chat_dao_to_chat_data(chat) for chat in chat_daos]
25 |
26 | @staticmethod
27 | async def get_chat(chat_id: int) -> ChatData:
28 | chat_dao = await ChatDao.from_id(chat_id)
29 | return chat_dao_to_chat_data(chat_dao)
30 |
31 | @staticmethod
32 | async def rename_chat(chat_id: int, new_title: str) -> None:
33 | await ChatDao.rename_chat(chat_id, new_title)
34 |
35 | @staticmethod
36 | async def get_messages(
37 | chat_id: int,
38 | ) -> list[ChatMessage]:
39 | async with get_session() as session:
40 | try:
41 | chat: ChatDao | None = await session.get(ChatDao, chat_id)
42 | except ValueError:
43 | raise RuntimeError(
44 | f"Malformed chat ID {chat_id!r}. "
45 | f"I couldn't convert it to an integer."
46 | )
47 |
48 | if not chat:
49 | raise RuntimeError(f"Chat with ID {chat_id} not found.")
50 | message_daos = chat.messages
51 | await session.commit()
52 |
53 | # Convert MessageDao objects to ChatMessages
54 | model = chat.model
55 | chat_messages: list[ChatMessage] = []
56 | for message_dao in message_daos:
57 | chat_message = message_dao_to_chat_message(message_dao, model)
58 | chat_messages.append(chat_message)
59 |
60 | log.debug(f"Retrieved {len(chat_messages)} chats for chat {chat_id!r}")
61 | return chat_messages
62 |
63 | @staticmethod
64 | async def create_chat(chat_data: ChatData) -> int:
65 | log.debug(f"Creating chat in database: {chat_data!r}")
66 |
67 | model = chat_data.model
68 | lookup_key = model.lookup_key
69 | async with get_session() as session:
70 | chat = ChatDao(
71 | model=lookup_key,
72 | title="",
73 | started_at=datetime.datetime.now(datetime.timezone.utc),
74 | )
75 | session.add(chat)
76 | await session.commit()
77 |
78 | chat_id = chat.id
79 | for message in chat_data.messages:
80 | litellm_message = message.message
81 | content = litellm_message["content"]
82 | new_message = MessageDao(
83 | chat_id=chat_id,
84 | role=litellm_message["role"],
85 | content=content if isinstance(content, str) else "",
86 | model=lookup_key,
87 | timestamp=message.timestamp,
88 | )
89 | (await chat.awaitable_attrs.messages).append(new_message)
90 |
91 | await session.commit()
92 |
93 | return chat.id
94 |
95 | @staticmethod
96 | async def archive_chat(chat_id: int) -> None:
97 | async with get_session() as session:
98 | statement = select(ChatDao).where(ChatDao.id == chat_id)
99 | result = await session.exec(statement)
100 | chat_dao = result.one()
101 | chat_dao.archived = True
102 | await session.commit()
103 |
104 | @staticmethod
105 | async def add_message_to_chat(chat_id: int, message: ChatMessage) -> None:
106 | async with get_session() as session:
107 | chat: ChatDao | None = await session.get(ChatDao, chat_id)
108 | if not chat:
109 | raise Exception(f"Chat with ID {chat_id} not found.")
110 | message_dao = chat_message_to_message_dao(message, chat_id)
111 | (await chat.awaitable_attrs.messages).append(message_dao)
112 | session.add(chat)
113 | await session.commit()
114 |
--------------------------------------------------------------------------------
/elia_chat/screens/home_screen.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING, cast
2 | from textual import on
3 | from textual.app import ComposeResult
4 | from textual.binding import Binding
5 | from textual.events import ScreenResume
6 | from textual.screen import Screen
7 | from textual.signal import Signal
8 | from textual.widgets import Footer
9 |
10 | from elia_chat.runtime_config import RuntimeConfig
11 | from elia_chat.widgets.chat_list import ChatList
12 | from elia_chat.widgets.prompt_input import PromptInput
13 | from elia_chat.chats_manager import ChatsManager
14 | from elia_chat.widgets.app_header import AppHeader
15 | from elia_chat.screens.chat_screen import ChatScreen
16 | from elia_chat.widgets.chat_options import OptionsModal
17 | from elia_chat.widgets.welcome import Welcome
18 |
19 | if TYPE_CHECKING:
20 | from elia_chat.app import Elia
21 |
22 |
23 | class HomePromptInput(PromptInput):
24 | BINDINGS = [Binding("escape", "app.quit", "Quit", key_display="esc")]
25 |
26 |
27 | class HomeScreen(Screen[None]):
28 | CSS = """\
29 | ChatList {
30 | height: 1fr;
31 | width: 1fr;
32 | background: $background 15%;
33 | }
34 | """
35 |
36 | BINDINGS = [
37 | Binding(
38 | "ctrl+j,alt+enter",
39 | "send_message",
40 | "Send message",
41 | priority=True,
42 | key_display="^j",
43 | tooltip="Send a message to the chosen LLM. On modern terminals, "
44 | "[b u]alt+enter[/] can be used as an alternative.",
45 | ),
46 | Binding(
47 | "o,ctrl+o",
48 | "options",
49 | "Options",
50 | key_display="^o",
51 | tooltip="Change the model, system prompt, and check where Elia"
52 | " is storing your data.",
53 | ),
54 | ]
55 |
56 | def __init__(
57 | self,
58 | config_signal: Signal[RuntimeConfig],
59 | name: str | None = None,
60 | id: str | None = None,
61 | classes: str | None = None,
62 | ) -> None:
63 | super().__init__(name, id, classes)
64 | self.config_signal = config_signal
65 | self.elia = cast("Elia", self.app)
66 |
67 | def on_mount(self) -> None:
68 | self.chats_manager = ChatsManager()
69 |
70 | def compose(self) -> ComposeResult:
71 | yield AppHeader(self.config_signal)
72 | yield HomePromptInput(id="home-prompt")
73 | yield ChatList()
74 | yield Welcome()
75 | yield Footer()
76 |
77 | @on(ScreenResume)
78 | async def reload_screen(self) -> None:
79 | chat_list = self.query_one(ChatList)
80 | await chat_list.reload_and_refresh()
81 | self.show_welcome_if_required()
82 |
83 | @on(ChatList.ChatOpened)
84 | async def open_chat_screen(self, event: ChatList.ChatOpened):
85 | chat_id = event.chat.id
86 | assert chat_id is not None
87 | chat = await self.chats_manager.get_chat(chat_id)
88 | await self.app.push_screen(ChatScreen(chat))
89 |
90 | @on(ChatList.CursorEscapingTop)
91 | def cursor_escaping_top(self):
92 | self.query_one(HomePromptInput).focus()
93 |
94 | @on(PromptInput.PromptSubmitted)
95 | async def create_new_chat(self, event: PromptInput.PromptSubmitted) -> None:
96 | text = event.text
97 | await self.elia.launch_chat( # type: ignore
98 | prompt=text,
99 | model=self.elia.runtime_config.selected_model,
100 | )
101 |
102 | @on(PromptInput.CursorEscapingBottom)
103 | async def move_focus_below(self) -> None:
104 | self.focus_next(ChatList)
105 |
106 | def action_send_message(self) -> None:
107 | prompt_input = self.query_one(PromptInput)
108 | prompt_input.action_submit_prompt()
109 |
110 | async def action_options(self) -> None:
111 | await self.app.push_screen(
112 | OptionsModal(),
113 | callback=self.update_config,
114 | )
115 |
116 | def update_config(self, runtime_config: RuntimeConfig) -> None:
117 | app = cast("Elia", self.app)
118 | app.runtime_config = runtime_config
119 |
120 | def show_welcome_if_required(self) -> None:
121 | chat_list = self.query_one(ChatList)
122 | if chat_list.option_count == 0:
123 | welcome = self.query_one(Welcome)
124 | welcome.display = "block"
125 | else:
126 | welcome = self.query_one(Welcome)
127 | welcome.display = "none"
128 |
--------------------------------------------------------------------------------
/elia_chat/app.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import datetime
4 | from pathlib import Path
5 | from typing import TYPE_CHECKING
6 |
7 | from textual.app import App
8 | from textual.binding import Binding
9 | from textual.reactive import Reactive, reactive
10 | from textual.signal import Signal
11 |
12 | from elia_chat.chats_manager import ChatsManager
13 | from elia_chat.models import ChatData, ChatMessage
14 | from elia_chat.config import EliaChatModel, LaunchConfig
15 | from elia_chat.runtime_config import RuntimeConfig
16 | from elia_chat.screens.chat_screen import ChatScreen
17 | from elia_chat.screens.help_screen import HelpScreen
18 | from elia_chat.screens.home_screen import HomeScreen
19 | from elia_chat.themes import BUILTIN_THEMES, Theme, load_user_themes
20 |
21 | if TYPE_CHECKING:
22 | from litellm.types.completion import (
23 | ChatCompletionUserMessageParam,
24 | ChatCompletionSystemMessageParam,
25 | )
26 |
27 |
28 | class Elia(App[None]):
29 | ENABLE_COMMAND_PALETTE = False
30 | CSS_PATH = Path(__file__).parent / "elia.scss"
31 | BINDINGS = [
32 | Binding("q", "app.quit", "Quit", show=False),
33 | Binding("f1,?", "help", "Help"),
34 | ]
35 |
36 | def __init__(self, config: LaunchConfig, startup_prompt: str = ""):
37 | self.launch_config = config
38 |
39 | available_themes: dict[str, Theme] = BUILTIN_THEMES.copy()
40 | available_themes |= load_user_themes()
41 |
42 | self.themes: dict[str, Theme] = available_themes
43 |
44 | self._runtime_config = RuntimeConfig(
45 | selected_model=config.default_model_object,
46 | system_prompt=config.system_prompt,
47 | )
48 | self.runtime_config_signal = Signal[RuntimeConfig](
49 | self, "runtime-config-updated"
50 | )
51 | """Widgets can subscribe to this signal to be notified of
52 | when the user has changed configuration at runtime (e.g. using the UI)."""
53 |
54 | self.startup_prompt = startup_prompt
55 | """Elia can be launched with a prompt on startup via a command line option.
56 |
57 | This is a convenience which will immediately load the chat interface and
58 | put users into the chat window, rather than going to the home screen.
59 | """
60 |
61 | super().__init__()
62 |
63 | theme: Reactive[str | None] = reactive(None, init=False)
64 |
65 | @property
66 | def runtime_config(self) -> RuntimeConfig:
67 | return self._runtime_config
68 |
69 | @runtime_config.setter
70 | def runtime_config(self, new_runtime_config: RuntimeConfig) -> None:
71 | self._runtime_config = new_runtime_config
72 | self.runtime_config_signal.publish(self.runtime_config)
73 |
74 | async def on_mount(self) -> None:
75 | await self.push_screen(HomeScreen(self.runtime_config_signal))
76 | self.theme = self.launch_config.theme
77 | if self.startup_prompt:
78 | await self.launch_chat(
79 | prompt=self.startup_prompt,
80 | model=self.runtime_config.selected_model,
81 | )
82 |
83 | async def launch_chat(self, prompt: str, model: EliaChatModel) -> None:
84 | current_time = datetime.datetime.now(datetime.timezone.utc)
85 | system_message: ChatCompletionSystemMessageParam = {
86 | "content": self.runtime_config.system_prompt,
87 | "role": "system",
88 | }
89 | user_message: ChatCompletionUserMessageParam = {
90 | "content": prompt,
91 | "role": "user",
92 | }
93 | chat = ChatData(
94 | id=None,
95 | title=None,
96 | create_timestamp=None,
97 | model=model,
98 | messages=[
99 | ChatMessage(
100 | message=system_message,
101 | timestamp=current_time,
102 | model=model,
103 | ),
104 | ChatMessage(
105 | message=user_message,
106 | timestamp=current_time,
107 | model=model,
108 | ),
109 | ],
110 | )
111 | chat.id = await ChatsManager.create_chat(chat_data=chat)
112 | await self.push_screen(ChatScreen(chat))
113 |
114 | async def action_help(self) -> None:
115 | if isinstance(self.screen, HelpScreen):
116 | self.pop_screen()
117 | else:
118 | await self.push_screen(HelpScreen())
119 |
120 | def get_css_variables(self) -> dict[str, str]:
121 | if self.theme:
122 | theme = self.themes.get(self.theme)
123 | if theme:
124 | color_system = theme.to_color_system().generate()
125 | else:
126 | color_system = {}
127 | else:
128 | color_system = {}
129 |
130 | return {**super().get_css_variables(), **color_system}
131 |
132 | def watch_theme(self, theme: str | None) -> None:
133 | self.refresh_css(animate=False)
134 | self.screen._update_styles()
135 |
136 | @property
137 | def theme_object(self) -> Theme | None:
138 | try:
139 | return self.themes[self.theme]
140 | except KeyError:
141 | return None
142 |
143 |
144 | if __name__ == "__main__":
145 | app = Elia(LaunchConfig())
146 | app.run()
147 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | A snappy, keyboard-centric terminal user interface for interacting with large language models.
6 | Chat with Claude 3, ChatGPT, and local models like Llama 3, Phi 3, Mistral and Gemma.
7 |
8 |
9 | 
10 |
11 | ## Introduction
12 |
13 | `elia` is an application for interacting with LLMs which runs entirely in your terminal, and is designed to be keyboard-focused, efficient, and fun to use!
14 | It stores your conversations in a local SQLite database, and allows you to interact with a variety of models.
15 | Speak with proprietary models such as ChatGPT and Claude, or with local models running through `ollama` or LocalAI.
16 |
17 | ## Installation
18 |
19 | Install Elia with [pipx](https://github.com/pypa/pipx):
20 |
21 | ```bash
22 | pipx install --python 3.11 elia-chat
23 | ```
24 |
25 | Depending on the model you wish to use, you may need to set one or more environment variables (e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GEMINI_API_KEY` etc).
26 |
27 | ## Quickstart
28 |
29 | Launch Elia from the command line:
30 |
31 | ```bash
32 | elia
33 | ```
34 |
35 | Launch a new chat inline (under your prompt) with `-i`/`--inline`:
36 |
37 | ```bash
38 | elia -i "What is the Zen of Python?"
39 | ```
40 |
41 | Launch a new chat in full-screen mode:
42 |
43 | ```bash
44 | elia "Tell me a cool fact about lizards!"
45 | ```
46 |
47 | Specify a model via the command line using `-m`/`--model`:
48 |
49 | ```bash
50 | elia -m gpt-4o
51 | ```
52 |
53 | Options can be combined - here's how you launch a chat with Gemini 1.5 Flash in inline mode (requires `GEMINI_API_KEY` environment variable).
54 |
55 | ```bash
56 | elia -i -m gemini/gemini-1.5-flash-latest "How do I call Rust code from Python?"
57 | ```
58 |
59 | ## Running local models
60 |
61 | 1. Install [`ollama`](https://github.com/ollama/ollama).
62 | 2. Pull the model you require, e.g. `ollama pull llama3`.
63 | 3. Run the local ollama server: `ollama serve`.
64 | 4. Add the model to the config file (see below).
65 |
66 | ## Configuration
67 |
68 | The location of the configuration file is noted at the bottom of
69 | the options window (`ctrl+o`).
70 |
71 | The example file below shows the available options, as well as examples of how to add new models.
72 |
73 | ```toml
74 | # the ID or name of the model that is selected by default on launch
75 | default_model = "gpt-4o"
76 | # the system prompt on launch
77 | system_prompt = "You are a helpful assistant who talks like a pirate."
78 |
79 | # choose from "nebula", "cobalt", "twilight", "hacker", "alpine", "galaxy", "nautilus", "monokai", "textual"
80 | theme = "galaxy"
81 |
82 | # change the syntax highlighting theme of code in messages
83 | # choose from https://pygments.org/styles/
84 | # defaults to "monokai"
85 | message_code_theme = "dracula"
86 |
87 | # example of adding local llama3 support
88 | # only the `name` field is required here.
89 | [[models]]
90 | name = "ollama/llama3"
91 |
92 | # example of a model running on a local server, e.g. LocalAI
93 | [[models]]
94 | name = "openai/some-model"
95 | api_base = "http://localhost:8080/v1"
96 | api_key = "api-key-if-required"
97 |
98 | # example of add a groq model, showing some other fields
99 | [[models]]
100 | name = "groq/llama2-70b-4096"
101 | display_name = "Llama 2 70B" # appears in UI
102 | provider = "Groq" # appears in UI
103 | temperature = 1.0 # high temp = high variation in output
104 | max_retries = 0 # number of retries on failed request
105 |
106 | # example of multiple instances of one model, e.g. you might
107 | # have a 'work' OpenAI org and a 'personal' org.
108 | [[models]]
109 | id = "work-gpt-3.5-turbo"
110 | name = "gpt-3.5-turbo"
111 | display_name = "GPT 3.5 Turbo (Work)"
112 |
113 | [[models]]
114 | id = "personal-gpt-3.5-turbo"
115 | name = "gpt-3.5-turbo"
116 | display_name = "GPT 3.5 Turbo (Personal)"
117 | ```
118 |
119 | ## Custom themes
120 |
121 | Add a custom theme YAML file to the themes directory.
122 | You can find the themes directory location by pressing `ctrl+o` on the home screen and looking for the `Themes directory` line.
123 |
124 | Here's an example of a theme YAML file:
125 |
126 | ```yaml
127 | name: example # use this name in your config file
128 | primary: '#4e78c4'
129 | secondary: '#f39c12'
130 | accent: '#e74c3c'
131 | background: '#0e1726'
132 | surface: '#17202a'
133 | error: '#e74c3c' # error messages
134 | success: '#2ecc71' # success messages
135 | warning: '#f1c40f' # warning messages
136 | ```
137 |
138 | ## Changing keybindings
139 |
140 | Right now, keybinds cannot be changed. Terminals are also rather limited in what keybinds they support.
141 | For example, pressing Cmd+Enter to send a message is not possible (although we may support a protocol to allow this in some terminals in the future).
142 |
143 | For now, I recommend you map whatever key combo you want at the terminal emulator level to send `\n`.
144 | Here's an example using iTerm:
145 |
146 |
147 |
148 | With this mapping in place, pressing Cmd+Enter will send a message to the LLM, and pressing Enter alone will create a new line.
149 |
150 | ## Import from ChatGPT
151 |
152 | Export your conversations to a JSON file using the ChatGPT UI, then import them using the `import` command.
153 |
154 | ```bash
155 | elia import 'path/to/conversations.json'
156 | ```
157 |
158 | ## Wiping the database
159 |
160 | ```bash
161 | elia reset
162 | ```
163 |
164 | ## Uninstalling
165 |
166 | ```bash
167 | pipx uninstall elia-chat
168 | ```
169 |
--------------------------------------------------------------------------------
/elia_chat/widgets/chat_options.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from typing import TYPE_CHECKING, cast
3 |
4 |
5 | from rich.markup import escape
6 | from rich.text import Text
7 | from textual import on
8 | from textual.app import ComposeResult
9 | from textual.binding import Binding
10 | from textual.containers import Vertical, VerticalScroll
11 | from textual.screen import ModalScreen
12 | from textual.widgets import Footer, RadioSet, RadioButton, Static, TextArea
13 |
14 | from elia_chat.config import EliaChatModel
15 | from elia_chat.locations import config_file, theme_directory
16 | from elia_chat.runtime_config import RuntimeConfig
17 | from elia_chat.database.database import sqlite_file_name
18 |
19 | if TYPE_CHECKING:
20 | from elia_chat.app import Elia
21 |
22 |
23 | class ModelRadioButton(RadioButton):
24 | def __init__(
25 | self,
26 | model: EliaChatModel,
27 | label: str | Text = "",
28 | value: bool = False,
29 | button_first: bool = True,
30 | *,
31 | name: str | None = None,
32 | id: str | None = None,
33 | classes: str | None = None,
34 | disabled: bool = False,
35 | ) -> None:
36 | super().__init__(
37 | label,
38 | value,
39 | button_first,
40 | name=name,
41 | id=id,
42 | classes=classes,
43 | disabled=disabled,
44 | )
45 | self.model = model
46 |
47 |
48 | class OptionsModal(ModalScreen[RuntimeConfig]):
49 | BINDINGS = [
50 | Binding("q", "app.quit", "Quit", show=False),
51 | Binding("escape", "app.pop_screen", "Close options", key_display="esc"),
52 | ]
53 |
54 | def __init__(
55 | self,
56 | name: str | None = None,
57 | id: str | None = None,
58 | classes: str | None = None,
59 | ) -> None:
60 | super().__init__(name, id, classes)
61 | self.elia = cast("Elia", self.app)
62 | self.runtime_config = self.elia.runtime_config
63 |
64 | def compose(self) -> ComposeResult:
65 | with VerticalScroll(id="form-scrollable") as vs:
66 | vs.border_title = "Session Options"
67 | vs.can_focus = False
68 | with RadioSet(id="available-models") as models_rs:
69 | selected_model = self.runtime_config.selected_model
70 | models_rs.border_title = "Available Models"
71 | for model in self.elia.launch_config.all_models:
72 | label = f"{escape(model.display_name or model.name)}"
73 | provider = model.provider
74 | if provider:
75 | label += f" [i]by[/] {provider}"
76 |
77 | ids_defined = selected_model.id and model.id
78 | same_id = ids_defined and selected_model.id == model.id
79 | same_name = selected_model.name == model.name
80 | is_selected = same_id or same_name
81 | yield ModelRadioButton(
82 | model=model,
83 | value=is_selected,
84 | label=label,
85 | )
86 | system_prompt_ta = TextArea(
87 | self.runtime_config.system_prompt, id="system-prompt-ta"
88 | )
89 | system_prompt_ta.border_title = "System Message"
90 | yield system_prompt_ta
91 | with Vertical(id="xdg-info") as xdg_info:
92 | xdg_info.border_title = "More Information"
93 | yield Static(f"{sqlite_file_name.absolute()}\n[dim]Database[/]\n")
94 | yield Static(f"{config_file()}\n[dim]Config[/]\n")
95 | yield Static(f"{theme_directory()}\n[dim]Themes directory[/]")
96 | # TODO - yield and dock a label to the bottom explaining
97 | # that the changes made here only apply to the current session
98 | # We can probably do better when it comes to system prompts.
99 | # Perhaps we could store saved prompts in the database.
100 | yield Footer()
101 |
102 | def on_mount(self) -> None:
103 | system_prompt_ta = self.query_one("#system-prompt-ta", TextArea)
104 | selected_model_rs = self.query_one("#available-models", RadioSet)
105 | self.apply_overridden_subtitles(system_prompt_ta, selected_model_rs)
106 |
107 | @on(RadioSet.Changed)
108 | @on(TextArea.Changed)
109 | def update_state(self, event: TextArea.Changed | RadioSet.Changed) -> None:
110 | system_prompt_ta = self.query_one("#system-prompt-ta", TextArea)
111 | selected_model_rs = self.query_one("#available-models", RadioSet)
112 | if selected_model_rs.pressed_button is None:
113 | selected_model_rs._selected = 0
114 | assert selected_model_rs.pressed_button is not None
115 |
116 | model_button = cast(ModelRadioButton, selected_model_rs.pressed_button)
117 | model = model_button.model
118 | self.elia.runtime_config = self.elia.runtime_config.model_copy(
119 | update={
120 | "system_prompt": system_prompt_ta.text,
121 | "selected_model": model,
122 | }
123 | )
124 |
125 | self.apply_overridden_subtitles(system_prompt_ta, selected_model_rs)
126 | self.refresh()
127 |
128 | def apply_overridden_subtitles(
129 | self, system_prompt_ta: TextArea, selected_model_rs: RadioSet
130 | ) -> None:
131 | if (
132 | self.elia.launch_config.default_model
133 | != self.elia.runtime_config.selected_model.id
134 | and self.elia.launch_config.default_model
135 | != self.elia.runtime_config.selected_model.name
136 | ):
137 | selected_model_rs.border_subtitle = "overrides config"
138 | else:
139 | selected_model_rs.border_subtitle = ""
140 |
141 | if system_prompt_ta.text != self.elia.launch_config.system_prompt:
142 | system_prompt_ta.border_subtitle = "overrides config"
143 | else:
144 | system_prompt_ta.border_subtitle = "editable"
145 |
--------------------------------------------------------------------------------
/elia_chat/screens/help_screen.py:
--------------------------------------------------------------------------------
1 | from textual.app import ComposeResult
2 | from textual.binding import Binding
3 | from textual.containers import Vertical, VerticalScroll
4 | from textual.screen import ModalScreen
5 | from textual.widgets import Footer, Markdown
6 |
7 |
8 | class HelpScreen(ModalScreen[None]):
9 | BINDINGS = [
10 | Binding("q", "app.quit", "Quit", show=False),
11 | Binding("escape,f1,?", "app.pop_screen()", "Close help", key_display="esc"),
12 | ]
13 |
14 | HELP_MARKDOWN = """\
15 | ### How do I quit Elia?
16 |
17 | Press `Ctrl+C` on your keyboard.
18 | `q` also works if an input isn't currently focused.
19 | If focus is on the prompt input box on the home screen, `esc` will close Elia too.
20 |
21 | ### Environment
22 |
23 | You may need to set some environment variables, depending on the model
24 | you wish to set.
25 | To use OpenAI models, the `OPENAI_API_KEY` env var must be set.
26 | To use Anthropic models, the `ANTHROPIC_API_KEY` env var must be set.
27 |
28 | To use a local model, see the instructions in the README:
29 |
30 | * https://github.com/darrenburns/elia/blob/main/README.md
31 |
32 | ### Config file and database
33 |
34 | The locations of the config file and the database can be found at the bottom
35 | of the options screen (`ctrl+o`).
36 |
37 | ### General navigation
38 |
39 | Elia has very strong mouse support. Most things can be clicked.
40 |
41 | Use `tab` and `shift+tab` to move between different widgets on screen.
42 |
43 | In some places you can make use of the arrow keys or Vim nav keys to move around.
44 |
45 | In general, pressing `esc` will move you "closer to home".
46 | Pay attention to the bar at the bottom to see where `esc` will take you.
47 |
48 | If you can see a scrollbar, `pageup`, `pagedown`, `home`, and `end` can also
49 | be used to navigate.
50 |
51 | On the chat screen, pressing `up` and `down` will navigate through messages,
52 | but if you just wish to scroll a little, you can use `shift+up` and `shift+down`.
53 |
54 | ### The chat list
55 |
56 | - `up,down,k,j`: Navigate through chats.
57 | - `a`: Archive the highlighted chat.
58 | - `pageup,pagedown`: Up/down a page.
59 | - `home,end`: Go to first/last chat.
60 | - `g,G`: Go to first/last chat.
61 | - `enter,l`: Open chat.
62 |
63 | ### The options window
64 |
65 | Press `ctrl+o` to open the _options window_.
66 |
67 | On this window you can change the `model` and `system prompt`.
68 | The system prompt tells the model to behave, and is added to the
69 | start of every conversation.
70 |
71 | Changes made on the options window are saved automatically.
72 |
73 | > **`Note`**: Changes made in the options window only apply to the current session!
74 |
75 | You can change the system prompt globally by updating the config file.
76 | The location of the config file is shown at the bottom of the options window.
77 |
78 | ### Writing a prompt
79 |
80 | The shortcuts below work when the _prompt editor_ is focused.
81 | The prompt editor is the box where you type your message.
82 | It's present on both the home screen and the chat page.
83 |
84 | - `ctrl+j`: Submit the prompt
85 | - `alt+enter`: Submit the prompt (only works in some terminals)
86 | - `up`: Move the cursor up
87 | - `down`: Move the cursor down
88 | - `left`: Move the cursor left
89 | - `ctrl+left`: Move the cursor to the start of the word
90 | - `ctrl+shift+left`: Move the cursor to the start of the word and select
91 | - `right`: Move the cursor right
92 | - `ctrl+right`: Move the cursor to the end of the word
93 | - `ctrl+shift+right`: Move the cursor to the end of the word and select
94 | - `home,ctrl+a`: Move the cursor to the start of the line
95 | - `end,ctrl+e`: Move the cursor to the end of the line
96 | - `shift+home`: Move the cursor to the start of the line and select
97 | - `shift+end`: Move the cursor to the end of the line and select
98 | - `pageup`: Move the cursor one page up
99 | - `pagedown`: Move the cursor one page down
100 | - `shift+up`: Select while moving the cursor up
101 | - `shift+down`: Select while moving the cursor down
102 | - `shift+left`: Select while moving the cursor left
103 | - `backspace`: Delete character to the left of cursor
104 | - `ctrl+w`: Delete from cursor to start of the word
105 | - `delete,ctrl+d`: Delete character to the right of cursor
106 | - `ctrl+f`: Delete from cursor to end of the word
107 | - `ctrl+x`: Delete the current line
108 | - `ctrl+u`: Delete from cursor to the start of the line
109 | - `ctrl+k`: Delete from cursor to the end of the line
110 | - `f6`: Select the current line
111 | - `f7`: Select all text in the document
112 | - `ctrl+z`: Undo last edit
113 | - `ctrl+y`: Redo last undo
114 | - `cmd+v` (mac): Paste
115 | - `ctrl+v` (windows/linux): Paste
116 |
117 | You can also click to move the cursor, and click and drag to select text.
118 |
119 | The arrow keys can also be used to move focus _out_ of the prompt box.
120 | For example, pressing `up` while the prompt is focussed on the chat screen
121 | and the cursor is at (0, 0) will move focus to the latest message.
122 |
123 | ### The chat screen
124 |
125 | You can use the arrow keys to move up and down through messages.
126 |
127 | - `ctrl+r`: Rename the chat (or click the chat title).
128 | - `f2`: View more information about the chat.
129 |
130 | _With a message focused_:
131 |
132 | - `y,c`: Copy the raw Markdown of the message to the clipboard.
133 | - This requires terminal support. The default MacOS terminal is not supported.
134 | - `enter`: Enter _select mode_.
135 | - In this mode, you can move a cursor through the text, optionally holding
136 | `shift` to select text as you move.
137 | - Press `v` to toggle _visual mode_, allowing you to select without text without
138 | needing to hold `shift`.
139 | - Press `u` to quickly select the next code block in the message.
140 | - With some text selected, press `y` or c` to copy.
141 | - `enter`: View more details about a message.
142 | - The amount of details available may vary depending on the model
143 | or provider being used.
144 | - `g`: Focus the first message.
145 | - `G`: Focus the latest message.
146 | - `m`: Move focus to the prompt box.
147 | - `up,down,k,j`: Navigate through messages.
148 |
149 | """
150 |
151 | def compose(self) -> ComposeResult:
152 | with Vertical(id="help-container") as vertical:
153 | vertical.border_title = "Elia Help"
154 | with VerticalScroll():
155 | yield Markdown(self.HELP_MARKDOWN, id="help-markdown")
156 | yield Markdown(
157 | "Use `pageup`, `pagedown`, `up`, and `down` to scroll.",
158 | id="help-scroll-keys-info",
159 | )
160 | yield Footer()
161 |
--------------------------------------------------------------------------------
/elia_chat/themes.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, Field
2 | from textual.design import ColorSystem
3 | import yaml
4 |
5 | from elia_chat.locations import theme_directory
6 |
7 |
8 | class Theme(BaseModel):
9 | name: str = Field(exclude=True)
10 | primary: str
11 | secondary: str | None = None
12 | background: str | None = None
13 | surface: str | None = None
14 | panel: str | None = None
15 | warning: str | None = None
16 | error: str | None = None
17 | success: str | None = None
18 | accent: str | None = None
19 | dark: bool = True
20 |
21 | def to_color_system(self) -> ColorSystem:
22 | """Convert this theme to a ColorSystem."""
23 | return ColorSystem(
24 | **self.model_dump(
25 | exclude={
26 | "text_area",
27 | "syntax",
28 | "variable",
29 | "url",
30 | "method",
31 | }
32 | )
33 | )
34 |
35 |
36 | def load_user_themes() -> dict[str, Theme]:
37 | """Load user themes from "~/.config/elia/themes".
38 |
39 | Returns:
40 | A dictionary mapping theme names to theme objects.
41 | """
42 | themes: dict[str, Theme] = {}
43 | for path in theme_directory().iterdir():
44 | path_suffix = path.suffix
45 | if path_suffix == ".yaml" or path_suffix == ".yml":
46 | with path.open() as theme_file:
47 | theme_content = yaml.load(theme_file, Loader=yaml.FullLoader) or {}
48 | try:
49 | themes[theme_content["name"]] = Theme(**theme_content)
50 | except KeyError:
51 | raise ValueError(
52 | f"Invalid theme file {path}. A `name` is required."
53 | )
54 | return themes
55 |
56 |
57 | BUILTIN_THEMES: dict[str, Theme] = {
58 | "textual": Theme(
59 | name="textual",
60 | primary="#004578",
61 | secondary="#0178D4",
62 | warning="#ffa62b",
63 | error="#ba3c5b",
64 | success="#4EBF71",
65 | accent="#ffa62b",
66 | dark=True,
67 | ),
68 | "monokai": Theme(
69 | name="monokai",
70 | primary="#F92672", # Pink
71 | secondary="#66D9EF", # Light Blue
72 | warning="#FD971F", # Orange
73 | error="#F92672", # Pink (same as primary for consistency)
74 | success="#A6E22E", # Green
75 | accent="#AE81FF", # Purple
76 | background="#272822", # Dark gray-green
77 | surface="#3E3D32", # Slightly lighter gray-green
78 | panel="#3E3D32", # Same as surface for consistency
79 | dark=True,
80 | ),
81 | "nautilus": Theme(
82 | name="nautilus",
83 | primary="#0077BE", # Ocean Blue
84 | secondary="#20B2AA", # Light Sea Green
85 | warning="#FFD700", # Gold (like sunlight on water)
86 | error="#FF6347", # Tomato (like a warning buoy)
87 | success="#32CD32", # Lime Green (like seaweed)
88 | accent="#FF8C00", # Dark Orange (like a sunset over water)
89 | dark=True,
90 | background="#001F3F", # Dark Blue (deep ocean)
91 | surface="#003366", # Navy Blue (shallower water)
92 | panel="#005A8C", # Steel Blue (water surface)
93 | ),
94 | "galaxy": Theme(
95 | name="galaxy",
96 | primary="#8A2BE2", # Improved Deep Magenta (Blueviolet)
97 | secondary="#a684e8",
98 | warning="#FFD700", # Gold, more visible than orange
99 | error="#FF4500", # OrangeRed, vibrant but less harsh than pure red
100 | success="#00FA9A", # Medium Spring Green, kept for vibrancy
101 | accent="#FF69B4", # Hot Pink, for a pop of color
102 | dark=True,
103 | background="#0F0F1F", # Very Dark Blue, almost black
104 | surface="#1E1E3F", # Dark Blue-Purple
105 | panel="#2D2B55", # Slightly Lighter Blue-Purple
106 | ),
107 | "nebula": Theme(
108 | name="nebula",
109 | primary="#4169E1", # Royal Blue, more vibrant than Midnight Blue
110 | secondary="#9400D3", # Dark Violet, more vibrant than Indigo Dye
111 | warning="#FFD700", # Kept Gold for warnings
112 | error="#FF1493", # Deep Pink, more nebula-like than Crimson
113 | success="#00FF7F", # Spring Green, slightly more vibrant
114 | accent="#FF00FF", # Magenta, for a true neon accent
115 | dark=True,
116 | background="#0A0A23", # Dark Navy, closer to a night sky
117 | surface="#1C1C3C", # Dark Blue-Purple
118 | panel="#2E2E5E", # Slightly Lighter Blue-Purple
119 | ),
120 | "alpine": Theme(
121 | name="alpine",
122 | primary="#4A90E2", # Clear Sky Blue
123 | secondary="#81A1C1", # Misty Blue
124 | warning="#EBCB8B", # Soft Sunlight
125 | error="#BF616A", # Muted Red
126 | success="#A3BE8C", # Alpine Meadow Green
127 | accent="#5E81AC", # Mountain Lake Blue
128 | dark=True,
129 | background="#2E3440", # Dark Slate Grey
130 | surface="#3B4252", # Darker Blue-Grey
131 | panel="#434C5E", # Lighter Blue-Grey
132 | ),
133 | "cobalt": Theme(
134 | name="cobalt",
135 | primary="#334D5C", # Deep Cobalt Blue
136 | secondary="#4878A6", # Slate Blue
137 | warning="#FFAA22", # Amber, suitable for warnings related to primary
138 | error="#E63946", # Red, universally recognized for errors
139 | success="#4CAF50", # Green, commonly used for success indication
140 | accent="#D94E64", # Candy Apple Red
141 | dark=True,
142 | surface="#27343B", # Dark Lead
143 | panel="#2D3E46", # Storm Gray
144 | background="#1F262A", # Charcoal
145 | ),
146 | "twilight": Theme(
147 | name="twilight",
148 | primary="#367588",
149 | secondary="#5F9EA0",
150 | warning="#FFD700",
151 | error="#FF6347",
152 | success="#00FA9A",
153 | accent="#FF7F50",
154 | dark=True,
155 | background="#191970",
156 | surface="#3B3B6D",
157 | panel="#4C516D",
158 | ),
159 | "hacker": Theme(
160 | name="hacker",
161 | primary="#00FF00", # Bright Green (Lime)
162 | secondary="#32CD32", # Lime Green
163 | warning="#ADFF2F", # Green Yellow
164 | error="#FF4500", # Orange Red (for contrast)
165 | success="#00FA9A", # Medium Spring Green
166 | accent="#39FF14", # Neon Green
167 | dark=True,
168 | background="#0D0D0D", # Almost Black
169 | surface="#1A1A1A", # Very Dark Gray
170 | panel="#2A2A2A", # Dark Gray
171 | ),
172 | }
173 |
--------------------------------------------------------------------------------
/elia_chat/widgets/chat_list.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import datetime
4 | from dataclasses import dataclass
5 | from typing import Self, cast
6 |
7 | import humanize
8 | from rich.console import RenderResult, Console, ConsoleOptions
9 | from rich.markup import escape
10 | from rich.padding import Padding
11 | from rich.text import Text
12 | from textual import events, log, on
13 | from textual.binding import Binding
14 | from textual.geometry import Region
15 | from textual.message import Message
16 | from textual.widgets import OptionList
17 | from textual.widgets.option_list import Option
18 |
19 | from elia_chat.chats_manager import ChatsManager
20 | from elia_chat.config import LaunchConfig
21 | from elia_chat.models import ChatData
22 |
23 |
24 | @dataclass
25 | class ChatListItemRenderable:
26 | chat: ChatData
27 | config: LaunchConfig
28 |
29 | def __rich_console__(
30 | self, console: Console, options: ConsoleOptions
31 | ) -> RenderResult:
32 | now = datetime.datetime.now(datetime.timezone.utc)
33 | delta = now - self.chat.update_time
34 | time_ago = humanize.naturaltime(delta)
35 | time_ago_text = Text(time_ago, style="dim i")
36 | model = self.chat.model
37 | subtitle = f"[dim]{escape(model.display_name or model.name)}"
38 | if model.provider:
39 | subtitle += f" [i]by[/] {escape(model.provider)}"
40 | model_text = Text.from_markup(subtitle)
41 | title = self.chat.title or self.chat.short_preview.replace("\n", " ")
42 | yield Padding(
43 | Text.assemble(title, "\n", model_text, "\n", time_ago_text),
44 | pad=(0, 0, 0, 1),
45 | )
46 |
47 |
48 | class ChatListItem(Option):
49 | def __init__(self, chat: ChatData, config: LaunchConfig) -> None:
50 | """
51 | Args:
52 | chat: The chat associated with this option.
53 | """
54 | super().__init__(ChatListItemRenderable(chat, config))
55 | self.chat = chat
56 | self.config = config
57 |
58 |
59 | class ChatList(OptionList):
60 | BINDINGS = [
61 | Binding(
62 | "escape",
63 | "app.focus('home-prompt')",
64 | "Focus prompt",
65 | key_display="esc",
66 | tooltip="Return focus to the prompt input.",
67 | ),
68 | Binding(
69 | "a",
70 | "archive_chat",
71 | "Archive chat",
72 | key_display="a",
73 | tooltip="Archive the highlighted chat"
74 | " (without deleting it from Elia's database).",
75 | ),
76 | Binding("j,down", "cursor_down", "Down", show=False),
77 | Binding("k,up", "cursor_up", "Up", show=False),
78 | Binding("l,right,enter", "select", "Select", show=False),
79 | Binding("g,home", "first", "First", show=False),
80 | Binding("G,end", "last", "Last", show=False),
81 | Binding("pagedown", "page_down", "Page Down", show=False),
82 | Binding("pageup", "page_up", "Page Up", show=False),
83 | ]
84 |
85 | @dataclass
86 | class ChatOpened(Message):
87 | chat: ChatData
88 |
89 | class CursorEscapingTop(Message):
90 | """Cursor attempting to move out-of-bounds at top of list."""
91 |
92 | class CursorEscapingBottom(Message):
93 | """Cursor attempting to move out-of-bounds at bottom of list."""
94 |
95 | async def on_mount(self) -> None:
96 | await self.reload_and_refresh()
97 |
98 | @on(OptionList.OptionSelected)
99 | async def post_chat_opened(self, event: OptionList.OptionSelected) -> None:
100 | assert isinstance(event.option, ChatListItem)
101 | chat = event.option.chat
102 | await self.reload_and_refresh()
103 | self.post_message(ChatList.ChatOpened(chat=chat))
104 |
105 | @on(OptionList.OptionHighlighted)
106 | @on(events.Focus)
107 | def show_border_subtitle(self) -> None:
108 | if self.highlighted is not None:
109 | self.border_subtitle = self.get_border_subtitle()
110 | elif self.option_count > 0:
111 | self.highlighted = 0
112 |
113 | def on_blur(self) -> None:
114 | self.border_subtitle = None
115 |
116 | async def reload_and_refresh(self, new_highlighted: int = -1) -> None:
117 | """Reload the chats and refresh the widget. Can be used to
118 | update the ordering/previews/titles etc contained in the list.
119 |
120 | Args:
121 | new_highlighted: The index to highlight after refresh.
122 | """
123 | self.options = await self.load_chat_list_items()
124 | old_highlighted = self.highlighted
125 | self.clear_options()
126 | self.add_options(self.options)
127 | self.border_title = self.get_border_title()
128 | if new_highlighted > -1:
129 | self.highlighted = new_highlighted
130 | else:
131 | self.highlighted = old_highlighted
132 |
133 | self.refresh()
134 |
135 | async def load_chat_list_items(self) -> list[ChatListItem]:
136 | chats = await self.load_chats()
137 | return [ChatListItem(chat, self.app.launch_config) for chat in chats]
138 |
139 | async def load_chats(self) -> list[ChatData]:
140 | all_chats = await ChatsManager.all_chats()
141 | return all_chats
142 |
143 | async def action_archive_chat(self) -> None:
144 | if self.highlighted is None:
145 | return
146 |
147 | item = cast(ChatListItem, self.get_option_at_index(self.highlighted))
148 | self.options.pop(self.highlighted)
149 | self.remove_option_at_index(self.highlighted)
150 |
151 | chat_id = item.chat.id
152 | await ChatsManager.archive_chat(chat_id)
153 |
154 | self.border_title = self.get_border_title()
155 | self.border_subtitle = self.get_border_subtitle()
156 | self.app.notify(
157 | item.chat.title or f"Chat [b]{chat_id!r}[/] archived.",
158 | title="Chat archived",
159 | )
160 | self.refresh()
161 |
162 | def get_border_title(self) -> str:
163 | return f"History ({len(self.options)})"
164 |
165 | def get_border_subtitle(self) -> str:
166 | if self.highlighted is None:
167 | return ""
168 | return f"{self.highlighted + 1} / {self.option_count}"
169 |
170 | def create_chat(self, chat_data: ChatData) -> None:
171 | new_chat_list_item = ChatListItem(chat_data, self.app.launch_config)
172 | log.debug(f"Creating new chat {new_chat_list_item!r}")
173 |
174 | option_list = self.query_one(OptionList)
175 | self.options = [
176 | new_chat_list_item,
177 | *self.options,
178 | ]
179 | option_list.clear_options()
180 | option_list.add_options(self.options)
181 | option_list.highlighted = 0
182 | self.refresh()
183 |
184 | def action_cursor_up(self) -> None:
185 | if self.highlighted == 0:
186 | self.post_message(self.CursorEscapingTop())
187 | else:
188 | return super().action_cursor_up()
189 |
--------------------------------------------------------------------------------
/elia_chat/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field, SecretStr
3 |
4 |
5 | class EliaChatModel(BaseModel):
6 | name: str
7 | """The name of the model e.g. `gpt-3.5-turbo`.
8 | This must match the name of the model specified by the provider.
9 | """
10 | id: str | None = None
11 | """If you have multiple versions of the same model (e.g. a personal
12 | OpenAI gpt-3.5 and a work OpenAI gpt-3.5 with different API keys/org keys),
13 | you need to be able to refer to them. For example, when setting the `default_model`
14 | key in the config, if you write `gpt-3.5`, there's no way to know whether you
15 | mean your work or your personal `gpt-3.5`. That's where `id` comes in."""
16 | display_name: str | None = None
17 | """The display name of the model in the UI."""
18 | provider: str | None = None
19 | """The provider of the model, e.g. openai, anthropic, etc"""
20 | api_key: SecretStr | None = None
21 | """If set, this will be used in place of the environment variable that
22 | would otherwise be used for this model (instead of OPENAI_API_KEY,
23 | ANTHROPIC_API_KEY, etc.)."""
24 | api_base: AnyHttpUrl | None = None
25 | """If set, this will be used as the base URL for making API calls.
26 | This can be useful if you're hosting models on a LocalAI server, for
27 | example."""
28 | organization: str | None = None
29 | """Some providers, such as OpenAI, allow you to specify an organization.
30 | In the case of """
31 | description: str | None = Field(default=None)
32 | """A description of the model which may appear inside the Elia UI."""
33 | product: str | None = Field(default=None)
34 | """For example `ChatGPT`, `Claude`, `Gemini`, etc."""
35 | temperature: float = Field(default=1.0)
36 | """The temperature to use. Low temperature means the same prompt is likely
37 | to produce similar results. High temperature means a flatter distribution
38 | when predicting the next token, and so the next token will be more random.
39 | Setting a very high temperature will likely produce junk output."""
40 | max_retries: int = Field(default=0)
41 | """The number of times to retry a request after it fails before giving up."""
42 |
43 | @property
44 | def lookup_key(self) -> str:
45 | return self.id or self.name
46 |
47 |
48 | def get_builtin_openai_models() -> list[EliaChatModel]:
49 | return [
50 | EliaChatModel(
51 | id="elia-gpt-3.5-turbo",
52 | name="gpt-3.5-turbo",
53 | display_name="GPT-3.5 Turbo",
54 | provider="OpenAI",
55 | product="ChatGPT",
56 | description="Fast & inexpensive model for simple tasks.",
57 | temperature=0.7,
58 | ),
59 | EliaChatModel(
60 | id="elia-gpt-4o",
61 | name="gpt-4o",
62 | display_name="GPT-4o",
63 | provider="OpenAI",
64 | product="ChatGPT",
65 | description="Fastest and most affordable flagship model.",
66 | temperature=0.7,
67 | ),
68 | EliaChatModel(
69 | id="elia-gpt-4-turbo",
70 | name="gpt-4-turbo",
71 | display_name="GPT-4 Turbo",
72 | provider="OpenAI",
73 | product="ChatGPT",
74 | description="Previous high-intelligence model.",
75 | temperature=0.7,
76 | ),
77 | ]
78 |
79 |
80 | def get_builtin_anthropic_models() -> list[EliaChatModel]:
81 | return [
82 | EliaChatModel(
83 | id="elia-claude-3-5-sonnet-20240620",
84 | name="claude-3-5-sonnet-20240620",
85 | display_name="Claude 3.5 Sonnet",
86 | provider="Anthropic",
87 | product="Claude 3.5",
88 | description=("Anthropic's most intelligent model"),
89 | ),
90 | EliaChatModel(
91 | id="elia-claude-3-haiku-20240307",
92 | name="claude-3-haiku-20240307",
93 | display_name="Claude 3 Haiku",
94 | provider="Anthropic",
95 | product="Claude 3",
96 | description=(
97 | "Fastest and most compact model for near-instant responsiveness"
98 | ),
99 | ),
100 | EliaChatModel(
101 | id="elia-claude-3-sonnet-20240229",
102 | name="claude-3-sonnet-20240229",
103 | display_name="Claude 3 Sonnet",
104 | provider="Anthropic",
105 | product="Claude 3",
106 | description=(
107 | "Ideal balance of intelligence and speed for enterprise workloads"
108 | ),
109 | ),
110 | EliaChatModel(
111 | id="elia-claude-3-opus-20240229",
112 | name="claude-3-opus-20240229",
113 | display_name="Claude 3 Opus",
114 | provider="Anthropic",
115 | product="Claude 3",
116 | description="Excels at writing and complex tasks",
117 | ),
118 | ]
119 |
120 |
121 | def get_builtin_google_models() -> list[EliaChatModel]:
122 | return [
123 | EliaChatModel(
124 | id="elia-gemini/gemini-1.5-pro-latest",
125 | name="gemini/gemini-1.5-pro-latest",
126 | display_name="Gemini 1.5 Pro",
127 | provider="Google",
128 | product="Gemini",
129 | description="Excels at reasoning tasks including code and text generation, "
130 | "text editing, problem solving, data extraction and generation",
131 | ),
132 | EliaChatModel(
133 | id="elia-gemini/gemini-1.5-flash-latest",
134 | name="gemini/gemini-1.5-flash-latest",
135 | display_name="Gemini 1.5 Flash",
136 | provider="Google",
137 | product="Gemini",
138 | description="Fast and versatile performance across a variety of tasks",
139 | ),
140 | ]
141 |
142 |
143 | def get_builtin_models() -> list[EliaChatModel]:
144 | return (
145 | get_builtin_openai_models()
146 | + get_builtin_anthropic_models()
147 | + get_builtin_google_models()
148 | )
149 |
150 |
151 | class LaunchConfig(BaseModel):
152 | """The config of the application at launch.
153 |
154 | Values may be sourced via command line options, env vars, config files.
155 | """
156 |
157 | model_config = ConfigDict(frozen=True)
158 |
159 | default_model: str = Field(default="elia-gpt-4o")
160 | """The ID or name of the default model."""
161 | system_prompt: str = Field(
162 | default=os.getenv(
163 | "ELIA_SYSTEM_PROMPT", "You are a helpful assistant named Elia."
164 | )
165 | )
166 | message_code_theme: str = Field(default="monokai")
167 | """The default Pygments syntax highlighting theme to be used in chatboxes."""
168 | models: list[EliaChatModel] = Field(default_factory=list)
169 | builtin_models: list[EliaChatModel] = Field(
170 | default_factory=get_builtin_models, init=False
171 | )
172 | theme: str = Field(default="nebula")
173 |
174 | @property
175 | def all_models(self) -> list[EliaChatModel]:
176 | return self.models + self.builtin_models
177 |
178 | @property
179 | def default_model_object(self) -> EliaChatModel:
180 | from elia_chat.models import get_model
181 |
182 | return get_model(self.default_model, self)
183 |
184 | @classmethod
185 | def get_current(cls) -> "LaunchConfig":
186 | return cls()
187 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/elia_chat/elia.scss:
--------------------------------------------------------------------------------
1 | $main-lighten-2: $primary-lighten-2;
2 | $main-lighten-1: $primary-lighten-1;
3 | $main: $primary;
4 | $main-darken-1: $primary-darken-1;
5 | $main-darken-2: $primary-darken-2;
6 | $main-border-text-color: $accent;
7 | $main-border-color: $main-lighten-1 50%;
8 | $main-border-color-focus: $main-lighten-1 100%;
9 |
10 | $left-border-trim: vkey $main-lighten-2 15%;
11 |
12 | * {
13 | scrollbar-color: $accent 30%;
14 | scrollbar-color-hover: $accent 50%;
15 | scrollbar-color-active: $accent 80%;
16 | scrollbar-background: $surface-darken-1;
17 | scrollbar-background-hover: $surface-darken-1;
18 | scrollbar-background-active: $surface-darken-1;
19 | scrollbar-size-vertical: 1;
20 | link-style: none;
21 | link-color-hover: $accent;
22 | link-background-hover: $main 0%;
23 | link-style-hover: u not dim bold;
24 | }
25 |
26 | Tabs .underline--bar {
27 | color: $main-lighten-2 50%;
28 | background: $main 75%;
29 | }
30 |
31 | Screen {
32 | background: $background;
33 | padding: 0 2 1 2;
34 | &:inline {
35 | height: 80vh;
36 | padding: 0 2;
37 | }
38 | }
39 |
40 | ModalScreen {
41 | background: black 50%;
42 | padding: 0;
43 | &:inline {
44 | padding: 0;
45 | border: none;
46 | }
47 | & Footer {
48 | margin: 0 2 1 2;
49 | }
50 | }
51 |
52 | Rule {
53 | color: white 20%;
54 |
55 | &.-horizontal {
56 | margin: 0 0;
57 | }
58 | }
59 |
60 | Toast {
61 | background: $background-darken-1;
62 | border-right: none;
63 | &.-information {
64 | border-left: outer $main-lighten-1;
65 | & .toast--title {
66 | color: $main-lighten-2;
67 | }
68 | }
69 |
70 | &.-error {
71 | border-left: outer $main-lighten-1;
72 | & .toast--title {
73 | color: $main-lighten-2;
74 | }
75 | }
76 | }
77 |
78 | Tooltip {
79 | background: $background-darken-1;
80 | border: wide $main-border-color;
81 | }
82 |
83 |
84 | Chat {
85 | ChatHeader {
86 | width: 100%;
87 | height: auto;
88 | padding: 1 2;
89 | background: $background;
90 | &:inline {
91 | padding: 0 2 1 2;
92 | }
93 |
94 | & #model-static {
95 | color: $text-muted;
96 | }
97 | }
98 | ChatPromptInput {
99 | height: auto;
100 | max-height: 50%;
101 | padding: 0 1;
102 | background: $background;
103 | &.-submit-blocked {
104 | border: round $error 50%;
105 | }
106 | }
107 |
108 | ResponseStatus {
109 | dock: top;
110 | align-horizontal: right;
111 | display: none;
112 | layer: overlay;
113 | height: 2;
114 | width: auto;
115 | margin-top: 1;
116 | margin-right: 2;
117 | & Label {
118 | width: auto;
119 | }
120 | & LoadingIndicator {
121 | width: auto;
122 | color: $primary;
123 | height: 1;
124 | margin-top: 1;
125 | dock: right;
126 | }
127 | &.-awaiting-response {
128 | LoadingIndicator {
129 | color: $primary;
130 | }
131 | }
132 | &.-agent-responding {
133 | LoadingIndicator {
134 | color: $secondary;
135 | }
136 | }
137 | }
138 | }
139 |
140 | TextArea {
141 | padding: 1 2;
142 | border: round $main-border-color;
143 | border-title-color: $main-border-text-color;
144 | border-subtitle-color: $main-border-text-color;
145 | &:focus {
146 | border: round $main-border-color-focus;
147 | border-title-style: bold;
148 | & .text-area--selection {
149 | background: $main 60%;
150 | }
151 | }
152 | & .text-area--cursor-line {
153 | background: $background 0%;
154 | }
155 |
156 | & .text-area--cursor-gutter {
157 | background: $background 0%;
158 | }
159 | &:focus .text-area--cursor-gutter {
160 | color: yellowgreen;
161 | background: $background 0%;
162 | }
163 | & .text-area--selection {
164 | background: $main-lighten-2 25%;
165 | }
166 |
167 | &.selection-mode {
168 | border: none;
169 | height: auto;
170 | max-height: 75vh;
171 | width: 100%;
172 | padding: 0;
173 | }
174 | }
175 |
176 | PromptInput.multiline .text-area--cursor-line {
177 | background: white 3%;
178 | }
179 |
180 | HomeScreen PromptInput {
181 | height: auto;
182 | max-height: 65%;
183 |
184 | & TextArea {
185 | height: auto;
186 | max-height: 100%;
187 | }
188 | }
189 |
190 | AppHeader #cl-header-container {
191 | height: auto;
192 | background: $background;
193 | }
194 |
195 | AppHeader {
196 | dock: top;
197 | width: 1fr;
198 | padding: 1 2;
199 | height: auto;
200 | background: $background;
201 |
202 | &:inline {
203 | padding: 0 2 1 2;
204 | }
205 |
206 | & #elia-title {
207 | color: $accent;
208 | text-style: bold;
209 | }
210 |
211 | & Horizontal {
212 | height: auto;
213 | }
214 |
215 | & #model-label {
216 | border: none;
217 | background: $main-darken-2 0%;
218 | text-style: dim;
219 | }
220 | }
221 |
222 | HomeScreen {
223 | & ChatList {
224 | padding: 0;
225 | height: 1fr;
226 | width: 1fr;
227 | border: round $main-border-color;
228 | border-title-color: $main-border-text-color;
229 | border-subtitle-color: $main-border-text-color;
230 |
231 | &:focus {
232 | border: round $main-border-color-focus;
233 | border-title-style: bold;
234 | border-subtitle-color: $main-border-text-color;
235 | }
236 |
237 | &.-empty {
238 | hatch: right $background-lighten-1;
239 | }
240 | }
241 |
242 | & Welcome {
243 | display: none;
244 | border: round $main-border-color;
245 | border-title-color: $main-border-text-color;
246 | border-title-align: center;
247 | border-title-style: bold;
248 | padding: 0 1;
249 | text-align: center;
250 | }
251 | }
252 |
253 | Chatbox {
254 | height: auto;
255 | width: auto;
256 | min-width: 12;
257 | max-width: 1fr;
258 | margin: 0 1;
259 | padding: 0 2;
260 |
261 | &.assistant-message.response-in-progress {
262 | background: $accent 3%;
263 | min-width: 30%;
264 | }
265 |
266 | &.assistant-message {
267 | width: 1fr;
268 | border: round $accent 60%;
269 | &:focus-within {
270 | border: round $accent;
271 | border-left: thick $accent 50%;
272 | }
273 |
274 | & TextArea {
275 | & .text-area--selection {
276 | background: $accent 23%;
277 | color: white 93%;
278 | }
279 | }
280 |
281 | & SelectionTextArea.visual-mode {
282 | & .text-area--cursor {
283 | background: $accent;
284 | }
285 | }
286 |
287 | }
288 |
289 | &.human-message {
290 | border: round $main-border-color;
291 | &:focus-within {
292 | border: round $main-border-color-focus;
293 | border-left: thick $main-border-color-focus;
294 | }
295 | & SelectionTextArea.visual-mode {
296 | & .text-area--cursor {
297 | background: $main-lighten-2;
298 | }
299 | }
300 | & TextArea {
301 | width: 1fr;
302 | }
303 | }
304 |
305 | }
306 |
307 | Footer {
308 | background: transparent;
309 |
310 | & FooterKey {
311 | background: transparent;
312 | }
313 |
314 | .footer-key--key {
315 | color: $accent;
316 | background: transparent;
317 | }
318 | }
319 |
320 | OptionList {
321 | background: $background 15%;
322 | }
323 |
324 | OptionList > .option-list--option-highlighted {
325 | background: $main 15%;
326 | color: $text;
327 | text-style: none;
328 | }
329 |
330 | OptionList:focus > .option-list--option-highlighted {
331 | background: $main-darken-2 80%;
332 | text-style: none;
333 | }
334 |
335 | OptionList > .option-list--option-disabled {
336 | color: $text-disabled;
337 | }
338 |
339 | OptionList > .option-list--option-hover {
340 | background: $boost;
341 | }
342 |
343 | OptionList > .option-list--option-hover-disabled {
344 | color: $text-disabled;
345 | background: $boost;
346 | }
347 |
348 | OptionList > .option-list--option-hover-highlighted {
349 | background: $main-lighten-1 40%;
350 | color: $text;
351 | text-style: bold;
352 | }
353 |
354 | OptionList:focus > .option-list--option-hover-highlighted {
355 | background: $main-lighten-1 50%;
356 | color: $text;
357 | text-style: bold;
358 | }
359 |
360 | OptionList > .option-list--option-hover-highlighted-disabled {
361 | color: $text-disabled;
362 | background: $main 60%;
363 | }
364 |
365 | RenameChat {
366 | & > Vertical {
367 | background: $background 0%;
368 | height: auto;
369 | & Input {
370 | padding: 0 4;
371 | border: none;
372 | border-bottom: hkey $main-border-color;
373 | border-top: hkey $main-border-color;
374 | border-subtitle-color: $main-border-text-color;
375 | border-subtitle-background: $background;
376 | }
377 | }
378 |
379 | }
380 |
381 | ChatDetails {
382 | align: center middle;
383 | & > #container {
384 | width: 90%;
385 | height: 85%;
386 | background: $background;
387 | padding: 1 2;
388 | border: wide $main-border-color-focus;
389 | border-title-color: $main-border-text-color;
390 | border-title-background: $background;
391 | border-title-style: b;
392 | border-subtitle-color: $text-muted;
393 | border-subtitle-background: $background;
394 |
395 | & Markdown {
396 | padding: 0;
397 | margin: 0;
398 | }
399 |
400 | & .heading {
401 | color: $text-muted;
402 | }
403 |
404 | & .datum {
405 | text-style: i;
406 | }
407 |
408 | }
409 | }
410 |
411 |
412 | MessageInfo #message-info-header {
413 | dock: top;
414 | width: 1fr;
415 | height: auto;
416 | background: $main-darken-2 70%;
417 | color: $text;
418 | }
419 |
420 | MessageInfo #message-info-header Tab {
421 | width: 1fr;
422 | }
423 |
424 | MessageInfo #message-info-footer {
425 | dock: bottom;
426 | height: 3;
427 | padding: 1 2;
428 | background: $main-darken-1;
429 | color: $text;
430 | width: 1fr;
431 | }
432 |
433 | MessageInfo #timestamp {
434 | dock: left;
435 | height: 3;
436 | width: auto;
437 | }
438 |
439 | MessageInfo #token-count {
440 | dock: right;
441 | height: 3;
442 | width: auto;
443 | }
444 |
445 | Tabs:focus .underline--bar {
446 | color: $text 35%;
447 | }
448 |
449 | MessageInfo #inner-container ContentSwitcher {
450 | height: auto;
451 | padding: 1 2;
452 | }
453 |
454 | MessageInfo #inner-container #markdown-content {
455 | height: 1fr;
456 | }
457 |
458 | MessageInfo #inner-container #metadata {
459 | height: auto;
460 | }
461 |
462 | OptionsModal {
463 | align: center middle;
464 | }
465 |
466 | OptionsModal #form-scrollable {
467 | background: $background;
468 | padding: 1 0 0 1;
469 | scrollbar-gutter: stable;
470 | scrollbar-size: 2 1;
471 | width: 75%;
472 | height: auto;
473 | max-height: 90%;
474 | border: wide $main-darken-2 80%;
475 | border-title-color: $main-border-text-color;
476 | border-title-background: $background;
477 | & #available-models {
478 | width: 100%;
479 | border: round $main-border-color;
480 | border-title-color: $main-border-text-color;
481 | background: red 0%;
482 | padding: 1 0;
483 |
484 | &:focus {
485 | border: round $main-border-color-focus;
486 | border-title-style: bold;
487 | }
488 | }
489 |
490 | & TextArea {
491 | height: auto;
492 | max-height: 12;
493 | border-subtitle-color: $main-border-color;
494 | &:focus {
495 | border-subtitle-color: $main-border-color-focus;
496 | }
497 | }
498 |
499 |
500 | #xdg-info {
501 | padding: 1 2;
502 | border: round $main-border-color;
503 | height: auto;
504 | border-title-color: $main-border-text-color;
505 | }
506 |
507 | }
508 |
509 | .code_inline {
510 | text-style: none;
511 | color: $accent;
512 | }
513 |
514 | RadioSet:focus > RadioButton.-on {
515 | & > .toggle--label {
516 | text-style: bold not dim;
517 | color: $accent;
518 | }
519 | }
520 |
521 | RadioSet > RadioButton.-on {
522 | text-style: bold not dim;
523 | color: $accent;
524 | }
525 |
526 | RadioButton .toggle--button {
527 | color: $accent 80%;
528 | background: $background-lighten-1;
529 | }
530 |
531 | HelpScreen {
532 | width:1fr;
533 | height: auto;
534 | align: center middle;
535 |
536 | & #help-scroll-keys-info {
537 | dock: bottom;
538 | border-top: solid $main 50%;
539 | text-style: dim;
540 | padding: 0 2;
541 | background: $background 0%;
542 | }
543 | & > Vertical {
544 | border: wide $main-border-color 80%;
545 | border-title-color: $main-border-text-color;
546 | border-title-background: $background;
547 | border-title-style: bold;
548 | width: 80%;
549 | height: 90%;
550 | background: $background;
551 | }
552 |
553 | & VerticalScroll {
554 | scrollbar-gutter: stable;
555 | }
556 |
557 | & Markdown {
558 | margin: 0;
559 | padding: 0 2;
560 |
561 | & MarkdownH3 {
562 | background: $background 0%;
563 | margin: 0;
564 | margin-top: 1;
565 | padding: 0;
566 | }
567 | & MarkdownBlockQuote {
568 | background: $boost;
569 | border-left: outer $main;
570 | padding: 1;
571 | }
572 | & MarkdownBulletList {
573 | margin-left: 1;
574 | }
575 | }
576 | }
577 |
--------------------------------------------------------------------------------
/elia_chat/widgets/chat.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import datetime
4 | from dataclasses import dataclass
5 | from typing import TYPE_CHECKING, cast
6 |
7 | from textual.widgets import Label
8 |
9 | from elia_chat import constants
10 | from textual import log, on, work, events
11 | from textual.app import ComposeResult
12 | from textual.binding import Binding
13 | from textual.containers import VerticalScroll
14 | from textual.css.query import NoMatches
15 | from textual.message import Message
16 | from textual.reactive import reactive
17 | from textual.widget import Widget
18 |
19 | from elia_chat.chats_manager import ChatsManager
20 | from elia_chat.models import ChatData, ChatMessage
21 | from elia_chat.screens.chat_details import ChatDetails
22 | from elia_chat.widgets.agent_is_typing import ResponseStatus
23 | from elia_chat.widgets.chat_header import ChatHeader, TitleStatic
24 | from elia_chat.widgets.prompt_input import PromptInput
25 | from elia_chat.widgets.chatbox import Chatbox
26 |
27 |
28 | if TYPE_CHECKING:
29 | from elia_chat.app import Elia
30 | from litellm.types.completion import (
31 | ChatCompletionUserMessageParam,
32 | ChatCompletionAssistantMessageParam,
33 | )
34 |
35 |
36 | class ChatPromptInput(PromptInput):
37 | BINDINGS = [Binding("escape", "app.pop_screen", "Close chat", key_display="esc")]
38 |
39 |
40 | class Chat(Widget):
41 | BINDINGS = [
42 | Binding("ctrl+r", "rename", "Rename", key_display="^r"),
43 | Binding("shift+down", "scroll_container_down", show=False),
44 | Binding("shift+up", "scroll_container_up", show=False),
45 | Binding(
46 | key="g",
47 | action="focus_first_message",
48 | description="First message",
49 | key_display="g",
50 | show=False,
51 | ),
52 | Binding(
53 | key="G",
54 | action="focus_latest_message",
55 | description="Latest message",
56 | show=False,
57 | ),
58 | Binding(key="f2", action="details", description="Chat info"),
59 | ]
60 |
61 | allow_input_submit = reactive(True)
62 | """Used to lock the chat input while the agent is responding."""
63 |
64 | def __init__(self, chat_data: ChatData) -> None:
65 | super().__init__()
66 | self.chat_data = chat_data
67 | self.elia = cast("Elia", self.app)
68 | self.model = chat_data.model
69 |
70 | @dataclass
71 | class AgentResponseStarted(Message):
72 | pass
73 |
74 | @dataclass
75 | class AgentResponseComplete(Message):
76 | chat_id: int | None
77 | message: ChatMessage
78 | chatbox: Chatbox
79 |
80 | @dataclass
81 | class AgentResponseFailed(Message):
82 | """Sent when the agent fails to respond e.g. cant connect.
83 | Can be used to reset UI state."""
84 |
85 | last_message: ChatMessage
86 |
87 | @dataclass
88 | class NewUserMessage(Message):
89 | content: str
90 |
91 | def compose(self) -> ComposeResult:
92 | yield ResponseStatus()
93 | yield ChatHeader(chat=self.chat_data, model=self.model)
94 |
95 | with VerticalScroll(id="chat-container") as vertical_scroll:
96 | vertical_scroll.can_focus = False
97 |
98 | yield ChatPromptInput(id="prompt")
99 |
100 | async def on_mount(self, _: events.Mount) -> None:
101 | """
102 | When the component is mounted, we need to check if there is a new chat to start
103 | """
104 | await self.load_chat(self.chat_data)
105 |
106 | @property
107 | def chat_container(self) -> VerticalScroll:
108 | return self.query_one("#chat-container", VerticalScroll)
109 |
110 | @property
111 | def is_empty(self) -> bool:
112 | """True if the conversation is empty, False otherwise."""
113 | return len(self.chat_data.messages) == 1 # Contains system message at first.
114 |
115 | def scroll_to_latest_message(self):
116 | container = self.chat_container
117 | container.refresh()
118 | container.scroll_end(animate=False, force=True)
119 |
120 | @on(AgentResponseFailed)
121 | def restore_state_on_agent_failure(self, event: Chat.AgentResponseFailed) -> None:
122 | original_prompt = event.last_message.message.get("content", "")
123 | if isinstance(original_prompt, str):
124 | self.query_one(ChatPromptInput).text = original_prompt
125 |
126 | async def new_user_message(self, content: str) -> None:
127 | log.debug(f"User message submitted in chat {self.chat_data.id!r}: {content!r}")
128 |
129 | now_utc = datetime.datetime.now(datetime.timezone.utc)
130 | user_message: ChatCompletionUserMessageParam = {
131 | "content": content,
132 | "role": "user",
133 | }
134 |
135 | user_chat_message = ChatMessage(user_message, now_utc, self.chat_data.model)
136 | self.chat_data.messages.append(user_chat_message)
137 | user_message_chatbox = Chatbox(user_chat_message, self.chat_data.model)
138 |
139 | assert (
140 | self.chat_container is not None
141 | ), "Textual has mounted container at this point in the lifecycle."
142 |
143 | await self.chat_container.mount(user_message_chatbox)
144 |
145 | self.scroll_to_latest_message()
146 | self.post_message(self.NewUserMessage(content))
147 |
148 | await ChatsManager.add_message_to_chat(
149 | chat_id=self.chat_data.id, message=user_chat_message
150 | )
151 |
152 | prompt = self.query_one(ChatPromptInput)
153 | prompt.submit_ready = False
154 | self.stream_agent_response()
155 |
156 | @work(thread=True, group="agent_response")
157 | async def stream_agent_response(self) -> None:
158 | model = self.chat_data.model
159 | log.debug(f"Creating streaming response with model {model.name!r}")
160 |
161 | import litellm
162 | from litellm import ModelResponse, acompletion
163 | from litellm.utils import trim_messages
164 |
165 | raw_messages = [message.message for message in self.chat_data.messages]
166 |
167 | messages: list[ChatCompletionUserMessageParam] = trim_messages(
168 | raw_messages, model.name
169 | ) # type: ignore
170 |
171 | litellm.organization = model.organization
172 | try:
173 | response = await acompletion(
174 | messages=messages,
175 | stream=True,
176 | model=model.name,
177 | temperature=model.temperature,
178 | max_retries=model.max_retries,
179 | api_key=model.api_key.get_secret_value() if model.api_key else None,
180 | api_base=model.api_base.unicode_string() if model.api_base else None,
181 | )
182 | except Exception as exception:
183 | self.app.notify(
184 | f"{exception}",
185 | title="Error",
186 | severity="error",
187 | timeout=constants.ERROR_NOTIFY_TIMEOUT_SECS,
188 | )
189 | self.post_message(self.AgentResponseFailed(self.chat_data.messages[-1]))
190 | return
191 |
192 | ai_message: ChatCompletionAssistantMessageParam = {
193 | "content": "",
194 | "role": "assistant",
195 | }
196 | now = datetime.datetime.now(datetime.timezone.utc)
197 |
198 | message = ChatMessage(message=ai_message, model=model, timestamp=now)
199 | response_chatbox = Chatbox(
200 | message=message,
201 | model=self.chat_data.model,
202 | classes="response-in-progress",
203 | )
204 | self.post_message(self.AgentResponseStarted())
205 | self.app.call_from_thread(self.chat_container.mount, response_chatbox)
206 |
207 | assert (
208 | self.chat_container is not None
209 | ), "Textual has mounted container at this point in the lifecycle."
210 |
211 | try:
212 | chunk_count = 0
213 | async for chunk in response:
214 | chunk = cast(ModelResponse, chunk)
215 | response_chatbox.border_title = "Agent is responding..."
216 |
217 | chunk_content = chunk.choices[0].delta.content
218 | if isinstance(chunk_content, str):
219 | self.app.call_from_thread(
220 | response_chatbox.append_chunk, chunk_content
221 | )
222 | else:
223 | break
224 |
225 | scroll_y = self.chat_container.scroll_y
226 | max_scroll_y = self.chat_container.max_scroll_y
227 | if scroll_y in range(max_scroll_y - 3, max_scroll_y + 1):
228 | self.app.call_from_thread(
229 | self.chat_container.scroll_end, animate=False
230 | )
231 |
232 | chunk_count += 1
233 | except Exception:
234 | self.notify(
235 | "There was a problem using this model. "
236 | "Please check your configuration file.",
237 | title="Error",
238 | severity="error",
239 | timeout=constants.ERROR_NOTIFY_TIMEOUT_SECS,
240 | )
241 | self.post_message(self.AgentResponseFailed(self.chat_data.messages[-1]))
242 | else:
243 | self.post_message(
244 | self.AgentResponseComplete(
245 | chat_id=self.chat_data.id,
246 | message=response_chatbox.message,
247 | chatbox=response_chatbox,
248 | )
249 | )
250 |
251 | @on(AgentResponseFailed)
252 | @on(AgentResponseStarted)
253 | async def agent_started_responding(
254 | self, event: AgentResponseFailed | AgentResponseStarted
255 | ) -> None:
256 | try:
257 | awaiting_reply = self.chat_container.query_one("#awaiting-reply", Label)
258 | except NoMatches:
259 | pass
260 | else:
261 | if awaiting_reply:
262 | await awaiting_reply.remove()
263 |
264 | @on(AgentResponseComplete)
265 | def agent_finished_responding(self, event: AgentResponseComplete) -> None:
266 | # Ensure the thread is updated with the message from the agent
267 | self.chat_data.messages.append(event.message)
268 | event.chatbox.border_title = "Agent"
269 | event.chatbox.remove_class("response-in-progress")
270 | prompt = self.query_one(ChatPromptInput)
271 | prompt.submit_ready = True
272 |
273 | @on(PromptInput.PromptSubmitted)
274 | async def user_chat_message_submitted(
275 | self, event: PromptInput.PromptSubmitted
276 | ) -> None:
277 | if self.allow_input_submit is True:
278 | user_message = event.text
279 | await self.new_user_message(user_message)
280 |
281 | @on(PromptInput.CursorEscapingTop)
282 | async def on_cursor_up_from_prompt(
283 | self, event: PromptInput.CursorEscapingTop
284 | ) -> None:
285 | self.focus_latest_message()
286 |
287 | @on(Chatbox.CursorEscapingBottom)
288 | def move_focus_to_prompt(self) -> None:
289 | self.query_one(ChatPromptInput).focus()
290 |
291 | @on(TitleStatic.ChatRenamed)
292 | async def handle_chat_rename(self, event: TitleStatic.ChatRenamed) -> None:
293 | if event.chat_id == self.chat_data.id and event.new_title:
294 | self.chat_data.title = event.new_title
295 | header = self.query_one(ChatHeader)
296 | header.update_header(self.chat_data, self.model)
297 | await ChatsManager.rename_chat(event.chat_id, event.new_title)
298 |
299 | def get_latest_chatbox(self) -> Chatbox:
300 | return self.query(Chatbox).last()
301 |
302 | def focus_latest_message(self) -> None:
303 | try:
304 | self.get_latest_chatbox().focus()
305 | except NoMatches:
306 | pass
307 |
308 | def action_rename(self) -> None:
309 | title_static = self.query_one(TitleStatic)
310 | title_static.begin_rename()
311 |
312 | def action_focus_latest_message(self) -> None:
313 | self.focus_latest_message()
314 |
315 | def action_focus_first_message(self) -> None:
316 | try:
317 | self.query(Chatbox).first().focus()
318 | except NoMatches:
319 | pass
320 |
321 | def action_scroll_container_up(self) -> None:
322 | if self.chat_container:
323 | self.chat_container.scroll_up()
324 |
325 | def action_scroll_container_down(self) -> None:
326 | if self.chat_container:
327 | self.chat_container.scroll_down()
328 |
329 | async def action_details(self) -> None:
330 | await self.app.push_screen(ChatDetails(self.chat_data))
331 |
332 | async def load_chat(self, chat_data: ChatData) -> None:
333 | chatboxes = [
334 | Chatbox(chat_message, chat_data.model)
335 | for chat_message in chat_data.non_system_messages
336 | ]
337 | await self.chat_container.mount_all(chatboxes)
338 | self.chat_container.scroll_end(animate=False, force=True)
339 | chat_header = self.query_one(ChatHeader)
340 | chat_header.update_header(
341 | chat=chat_data,
342 | model=chat_data.model,
343 | )
344 |
345 | # If the last message didn't receive a response, try again.
346 | messages = chat_data.messages
347 | if messages and messages[-1].message["role"] == "user":
348 | prompt = self.query_one(ChatPromptInput)
349 | prompt.submit_ready = False
350 | self.stream_agent_response()
351 |
352 | def action_close(self) -> None:
353 | self.app.clear_notifications()
354 | self.app.pop_screen()
355 |
--------------------------------------------------------------------------------
/elia_chat/widgets/chatbox.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import bisect
3 | from dataclasses import dataclass
4 |
5 | from rich.cells import cell_len
6 | from rich.console import RenderableType
7 | from rich.markdown import Markdown
8 | from rich.syntax import Syntax
9 | from textual import on
10 | from textual.binding import Binding
11 | from textual.css.query import NoMatches
12 | from textual.geometry import Size
13 | from textual.message import Message
14 | from textual.reactive import reactive
15 | from textual.widget import Widget
16 | from textual.widgets import TextArea
17 | from textual.widgets.text_area import Selection
18 | from textual.document._syntax_aware_document import SyntaxAwareDocumentError
19 |
20 | from elia_chat.config import EliaChatModel
21 | from elia_chat.models import ChatMessage
22 |
23 |
24 | class SelectionTextArea(TextArea):
25 | class LeaveSelectionMode(Message):
26 | """Broadcast that the user wants to leave selection mode."""
27 |
28 | @dataclass
29 | class VisualModeToggled(Message):
30 | """Sent when we enter/leave visual select mode."""
31 |
32 | enabled: bool
33 |
34 | BINDINGS = [
35 | Binding(
36 | "escape",
37 | "leave_selection_mode",
38 | description="Exit selection mode",
39 | key_display="esc",
40 | ),
41 | Binding(
42 | "v",
43 | "toggle_visual_mode",
44 | description="Toggle visual select",
45 | key_display="v",
46 | ),
47 | Binding("up,k", "cursor_up", "Cursor Up", show=False),
48 | Binding("down,j", "cursor_down", "Cursor Down", show=False),
49 | Binding("right,l", "cursor_right", "Cursor Right", show=False),
50 | Binding("left,h", "cursor_left", "Cursor Left", show=False),
51 | Binding("shift+up,K", "cursor_up(True)", "cursor up select", show=False),
52 | Binding("shift+down,J", "cursor_down(True)", "cursor down select", show=False),
53 | Binding("shift+left,H", "cursor_left(True)", "cursor left select", show=False),
54 | Binding(
55 | "shift+right,L", "cursor_right(True)", "cursor right select", show=False
56 | ),
57 | Binding("ctrl+left,b", "cursor_word_left", "cursor word left", show=False),
58 | Binding("ctrl+right,w", "cursor_word_right", "cursor word right", show=False),
59 | Binding(
60 | "home,ctrl+a,0,^", "cursor_line_start", "cursor line start", show=False
61 | ),
62 | Binding("end,ctrl+e,$", "cursor_line_end", "cursor line end", show=False),
63 | Binding("pageup,ctrl+b", "cursor_page_up", "cursor page up", show=False),
64 | Binding("pagedown,ctrl+f", "cursor_page_down", "cursor page down", show=False),
65 | Binding("ctrl+d", "cursor_half_page_down", "cursor half page down", show=False),
66 | Binding("ctrl+u", "cursor_half_page_up", "cursor half page up", show=False),
67 | Binding(
68 | "ctrl+shift+left,B",
69 | "cursor_word_left(True)",
70 | "cursor left word select",
71 | show=False,
72 | ),
73 | Binding(
74 | "ctrl+shift+right,W",
75 | "cursor_word_right(True)",
76 | "cursor right word select",
77 | show=False,
78 | ),
79 | Binding("f6,V", "select_line", "select line", show=False),
80 | Binding(
81 | "y,c",
82 | "copy_to_clipboard",
83 | description="Copy selection",
84 | show=False,
85 | ),
86 | Binding("g", "cursor_top", "Go to top", show=False),
87 | Binding("G", "cursor_bottom", "Go to bottom", show=False),
88 | Binding("u", "next_code_block", description="Next code block", key_display="u"),
89 | ]
90 |
91 | visual_mode = reactive(False, init=False)
92 |
93 | def action_toggle_visual_mode(self) -> None:
94 | self.visual_mode = not self.visual_mode
95 |
96 | def watch_visual_mode(self, value: bool) -> None:
97 | self.post_message(self.VisualModeToggled(value))
98 | self.cursor_blink = not value
99 |
100 | if not value:
101 | self.selection = Selection.cursor(self.selection.end)
102 |
103 | self.set_class(value, "visual-mode")
104 |
105 | def action_cursor_up(self, select: bool = False) -> None:
106 | return super().action_cursor_up(self.visual_mode or select)
107 |
108 | def action_cursor_right(self, select: bool = False) -> None:
109 | return super().action_cursor_right(self.visual_mode or select)
110 |
111 | def action_cursor_down(self, select: bool = False) -> None:
112 | return super().action_cursor_down(self.visual_mode or select)
113 |
114 | def action_cursor_left(self, select: bool = False) -> None:
115 | return super().action_cursor_left(self.visual_mode or select)
116 |
117 | def action_cursor_line_end(self, select: bool = False) -> None:
118 | return super().action_cursor_line_end(self.visual_mode or select)
119 |
120 | def action_cursor_line_start(self, select: bool = False) -> None:
121 | return super().action_cursor_line_start(self.visual_mode or select)
122 |
123 | def action_cursor_word_left(self, select: bool = False) -> None:
124 | return super().action_cursor_word_left(self.visual_mode or select)
125 |
126 | def action_cursor_word_right(self, select: bool = False) -> None:
127 | return super().action_cursor_word_right(self.visual_mode or select)
128 |
129 | def action_cursor_top(self) -> None:
130 | self.selection = Selection.cursor((0, 0))
131 |
132 | def action_cursor_bottom(self) -> None:
133 | self.selection = Selection.cursor((self.document.line_count - 1, 0))
134 |
135 | def action_copy_to_clipboard(self) -> None:
136 | text_to_copy = self.selected_text
137 |
138 | if text_to_copy:
139 | message = f"Copied {len(text_to_copy)} characters to clipboard."
140 | title = "Selection copied"
141 | else:
142 | text_to_copy = self.text
143 | message = f"Copied message ({len(text_to_copy)} characters)."
144 | title = "Message copied"
145 |
146 | try:
147 | import pyperclip
148 |
149 | pyperclip.copy(text_to_copy)
150 | except pyperclip.PyperclipException as exc:
151 | self.notify(
152 | str(exc),
153 | title="Clipboard error",
154 | severity="error",
155 | timeout=10,
156 | )
157 | else:
158 | self.notify(message, title=title)
159 |
160 | self.visual_mode = False
161 |
162 | def action_next_code_block(self) -> None:
163 | try:
164 | query = self.document.prepare_query(
165 | "(fenced_code_block (code_fence_content) @code_block)"
166 | )
167 | except SyntaxAwareDocumentError:
168 | self.app.notify(
169 | "This feature requires tree-sitter, which isn't installed.",
170 | severity="error",
171 | )
172 | else:
173 | if query:
174 | code_block_nodes = self.document.query_syntax_tree(query)
175 | locations: list[tuple[tuple[int, int], tuple[int, int]]] = [
176 | (node.start_point, node.end_point)
177 | for (node, _name) in code_block_nodes
178 | ]
179 | if not locations:
180 | return
181 | self.visual_mode = True
182 | end_locations = [end for _start, end in locations]
183 | cursor_row, _cursor_column = self.cursor_location
184 | search_start_location = cursor_row + 1, 0
185 | insertion_index = bisect.bisect_left(
186 | end_locations, search_start_location
187 | )
188 | insertion_index %= len(end_locations)
189 | start, end = locations[insertion_index]
190 | self.selection = Selection(start, end)
191 |
192 | def action_leave_selection_mode(self) -> None:
193 | self.post_message(self.LeaveSelectionMode())
194 |
195 | def action_cursor_half_page_down(self) -> None:
196 | """Move the cursor and scroll down half of a page."""
197 | half_height = self.content_size.height // 2
198 | _, cursor_location = self.selection
199 | target = self.navigator.get_location_at_y_offset(
200 | cursor_location,
201 | half_height,
202 | )
203 | self.scroll_relative(y=half_height, animate=False)
204 | self.move_cursor(target)
205 |
206 | def action_cursor_half_page_up(self) -> None:
207 | """Move the cursor and scroll down half of a page."""
208 | half_height = self.content_size.height // 2
209 | _, cursor_location = self.selection
210 | target = self.navigator.get_location_at_y_offset(
211 | cursor_location,
212 | -half_height,
213 | )
214 | self.scroll_relative(y=-half_height, animate=False)
215 | self.move_cursor(target)
216 |
217 |
218 | class Chatbox(Widget, can_focus=True):
219 | BINDINGS = [
220 | Binding(key="up,k", action="up", description="Up", show=False),
221 | Binding(key="down,j", action="down", description="Down", show=False),
222 | Binding(key="enter", action="select", description="Toggle select mode"),
223 | Binding(
224 | key="y,c",
225 | action="copy_to_clipboard",
226 | description="Copy full message",
227 | key_display="y",
228 | ),
229 | Binding(
230 | key="escape",
231 | action="screen.focus('prompt')",
232 | description="Focus prompt",
233 | key_display="esc",
234 | ),
235 | ]
236 |
237 | class CursorEscapingBottom(Message):
238 | """Sent when the cursor moves down from the bottom message."""
239 |
240 | selection_mode = reactive(False, init=False)
241 |
242 | def __init__(
243 | self,
244 | message: ChatMessage,
245 | model: EliaChatModel,
246 | name: str | None = None,
247 | id: str | None = None,
248 | classes: str | None = None,
249 | disabled: bool = False,
250 | ) -> None:
251 | super().__init__(
252 | name=name,
253 | id=id,
254 | classes=classes,
255 | disabled=disabled,
256 | )
257 | self.message = message
258 | self.model = model
259 |
260 | def on_mount(self) -> None:
261 | litellm_message = self.message.message
262 | role = litellm_message["role"]
263 | if role == "assistant":
264 | self.add_class("assistant-message")
265 | self.border_title = "Agent"
266 | else:
267 | self.add_class("human-message")
268 | self.border_title = "You"
269 |
270 | def action_up(self) -> None:
271 | self.screen.focus_previous(Chatbox)
272 |
273 | def action_down(self) -> None:
274 | if self.parent and self is self.parent.children[-1]:
275 | self.post_message(self.CursorEscapingBottom())
276 | else:
277 | self.screen.focus_next(Chatbox)
278 |
279 | def action_select(self) -> None:
280 | self.selection_mode = not self.selection_mode
281 | self.set_class(self.selection_mode, "selecting")
282 |
283 | def action_copy_to_clipboard(self) -> None:
284 | if not self.selection_mode:
285 | text_to_copy = self.message.message.get("content")
286 | if isinstance(text_to_copy, str):
287 | try:
288 | import pyperclip
289 |
290 | pyperclip.copy(text_to_copy)
291 | except pyperclip.PyperclipException as exc:
292 | self.notify(
293 | str(exc),
294 | title="Clipboard error",
295 | severity="error",
296 | timeout=10,
297 | )
298 | else:
299 | message = f"Copied message ({len(text_to_copy)} characters)."
300 | self.notify(message, title="Message copied")
301 | else:
302 | message = "Unable to copy message"
303 | self.notify(message, title="Clipboard error", severity="error")
304 |
305 | async def watch_selection_mode(self, value: bool) -> None:
306 | if value:
307 | async with self.batch():
308 | self.border_subtitle = "SELECT"
309 | content = self.message.message.get("content")
310 | text_area = SelectionTextArea(
311 | content if isinstance(content, str) else "",
312 | read_only=True,
313 | language="markdown",
314 | classes="selection-mode",
315 | )
316 | await self.mount(text_area)
317 | text_area._rewrap_and_refresh_virtual_size()
318 | text_area.focus(scroll_visible=False)
319 | else:
320 | self.border_subtitle = ""
321 | try:
322 | self.query_one(SelectionTextArea)
323 | except NoMatches:
324 | # Shouldn't happen, but let's be defensive.
325 | self.log.warning("In selection mode, but no text area found.")
326 | pass
327 | else:
328 | await self.remove_children()
329 |
330 | @on(SelectionTextArea.LeaveSelectionMode)
331 | def leave_selection_mode(self) -> None:
332 | self.selection_mode = False
333 |
334 | def watch_has_focus(self, value: bool) -> None:
335 | if value:
336 | try:
337 | child = self.query_one(SelectionTextArea)
338 | except NoMatches:
339 | return None
340 | else:
341 | child.focus()
342 |
343 | @on(SelectionTextArea.VisualModeToggled)
344 | def handle_visual_select(self, event: SelectionTextArea.VisualModeToggled) -> None:
345 | self.border_subtitle = (
346 | "[reverse] VISUAL SELECT [/]" if event.enabled else "SELECT"
347 | )
348 |
349 | @property
350 | def markdown(self) -> Markdown:
351 | """Return the content as a Rich Markdown object."""
352 | content = self.message.message.get("content")
353 | if not isinstance(content, str):
354 | content = ""
355 |
356 | return Markdown(content, code_theme=self.app.launch_config.message_code_theme)
357 |
358 | def render(self) -> RenderableType:
359 | if self.selection_mode:
360 | # When in selection mode, this widget has a SelectionTextArea child,
361 | # so we do not need to render anything.
362 | return ""
363 |
364 | message = self.message.message
365 | theme = self.app.theme_object
366 | if theme:
367 | background_color = theme.background
368 | else:
369 | background_color = "#121212"
370 |
371 | if message["role"] == "user":
372 | content = message["content"] or ""
373 | if isinstance(content, str):
374 | return Syntax(
375 | content,
376 | lexer="markdown",
377 | word_wrap=True,
378 | background_color=background_color,
379 | )
380 | else:
381 | return ""
382 | return self.markdown
383 |
384 | def append_chunk(self, chunk: str) -> None:
385 | """Append a chunk of text to the end of the message."""
386 | content = self.message.message.get("content")
387 | if isinstance(content, str):
388 | content += chunk
389 | self.message.message["content"] = content
390 | self.refresh(layout=True)
391 |
--------------------------------------------------------------------------------