├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── 10_bug_report.md │ └── 20_feature_request.md └── workflows │ ├── build-docs.yml │ └── build-publish.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGES.txt ├── LICENSE ├── README.md ├── docs ├── app_config.md ├── commands.md ├── development │ └── index.md ├── img │ ├── chat.png │ ├── customizations.png │ ├── image_selection.png │ ├── log_viewer.svg │ ├── mcp_prompts.svg │ ├── mcp_tools.svg │ ├── splash.gif │ └── theme.png ├── index.md ├── installation.md ├── mcp │ └── index.md ├── oracle │ ├── .python-version │ ├── README.md │ ├── pyproject.toml │ └── src │ │ └── oracle │ │ ├── __init__.py │ │ └── tool.py ├── parameters.md └── tools │ └── index.md ├── mkdocs.yml ├── pyproject.toml ├── src └── oterm │ ├── __init__.py │ ├── app │ ├── __init__.py │ ├── chat_edit.py │ ├── chat_export.py │ ├── chat_rename.py │ ├── css.py │ ├── image_browser.py │ ├── log_viewer.py │ ├── mcp_prompt.py │ ├── oterm.py │ ├── oterm.tcss │ ├── prompt_history.py │ ├── pull_model.py │ ├── splash.py │ └── widgets │ │ ├── __init__.py │ │ ├── chat.py │ │ ├── image.py │ │ ├── monkey.py │ │ ├── prompt.py │ │ └── tool_select.py │ ├── cli │ ├── __init__.py │ └── oterm.py │ ├── config.py │ ├── log.py │ ├── ollamaclient.py │ ├── store │ ├── __init__.py │ ├── store.py │ └── upgrades │ │ ├── __init__.py │ │ ├── v0_12_0.py │ │ ├── v0_1_11.py │ │ ├── v0_1_6.py │ │ ├── v0_2_0.py │ │ ├── v0_2_4.py │ │ ├── v0_2_8.py │ │ ├── v0_3_0.py │ │ ├── v0_4_0.py │ │ ├── v0_5_1.py │ │ ├── v0_6_0.py │ │ ├── v0_7_0.py │ │ └── v0_9_0.py │ ├── tools │ ├── __init__.py │ ├── date_time.py │ ├── external.py │ ├── mcp │ │ ├── __init__.py │ │ ├── client.py │ │ ├── logging.py │ │ ├── prompts.py │ │ ├── sampling.py │ │ ├── setup.py │ │ └── tools.py │ ├── shell.py │ └── think.py │ ├── types.py │ └── utils.py ├── tests ├── conftest.py ├── data │ └── lama.jpg ├── test_llm_client.py ├── test_ollama_api.py ├── test_store.py └── tools │ ├── __init__.py │ ├── mcp_servers.py │ ├── test_custom_tool.py │ ├── test_date_time_tool.py │ ├── test_mcp_prompts.py │ ├── test_mcp_sampling.py │ ├── test_mcp_tools.py │ ├── test_mcp_transports.py │ ├── test_shell_tool.py │ └── test_think_tool.py └── uv.lock /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: ggozad 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/10_bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create an issue to help us improve 4 | title: '' 5 | labels: ['bug'] 6 | assignees: '' 7 | 8 | --- 9 | 10 | Have you read the [documentation](https://ggozad.github.io/oterm/)? 11 | 12 | Have you checked [closed issues](https://github.com/ggozad/oterm/issues?q=is%3Aissue+is%3Aclosed)? 13 | 14 | Are you running the most [recent version of oterm](https://pypi.org/search/?q=oterm)? 15 | 16 | ## The bug 17 | 18 | Include any useful information from the [logs](https://ggozad.github.io/oterm/development/) and/or full traceback. 19 | 20 | Include your OS, python version and information about how you installed oterm. 21 | 22 | Please give a brief but clear explanation of the issue and how we can reproduce it. Feel free to add screenshots. 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/20_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Request a new feature 4 | title: '' 5 | labels: ['enhancement'] 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /.github/workflows/build-docs.yml: -------------------------------------------------------------------------------- 1 | name: build-docs 2 | on: 3 | push: 4 | branches: 5 | - main 6 | permissions: 7 | contents: write 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Configure Git Credentials 14 | run: | 15 | git config user.name github-actions[bot] 16 | git config user.email 41898282+github-actions[bot]@users.noreply.github.com 17 | - uses: actions/setup-python@v5 18 | with: 19 | python-version: 3.x 20 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 21 | - uses: actions/cache@v4 22 | with: 23 | key: mkdocs-material-${{ env.cache_id }} 24 | path: .cache 25 | restore-keys: | 26 | mkdocs-material- 27 | - run: pip install mkdocs-material 28 | - run: mkdocs gh-deploy --force 29 | -------------------------------------------------------------------------------- /.github/workflows/build-publish.yml: -------------------------------------------------------------------------------- 1 | name: Build & publish to pypi 2 | on: 3 | release: 4 | types: [published] 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Set up uv 12 | run: curl -LsSf https://astral.sh/uv/0.3.0/install.sh | sh 13 | - name: Set up Python 3.10 14 | run: uv python install 3.10 15 | - name: Build package 16 | run: uvx --from build pyproject-build --installer uv 17 | - name: Publish package 18 | run: uvx twine upload -u __token__ -p ${{ secrets.PYPI_API_TOKEN }} dist/* --non-interactive 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .cache/ 3 | .venv/ 4 | .venv3.12/ 5 | .env 6 | .env.local 7 | .DS_Store 8 | dist/ 9 | oterm.rb 10 | photos/ 11 | .vscode 12 | /site/ 13 | .pytest_cache/ 14 | .ruff_cache/ 15 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-merge-conflict 8 | - id: check-toml 9 | - id: debug-statements 10 | - repo: https://github.com/astral-sh/ruff-pre-commit 11 | # Ruff version. 12 | rev: v0.11.4 13 | hooks: 14 | # Run the linter. 15 | - id: ruff 16 | # Run the formatter. 17 | - id: ruff-format 18 | 19 | - repo: https://github.com/RobertCraigie/pyright-python 20 | rev: v1.1.399 21 | hooks: 22 | - id: pyright 23 | 24 | - repo: https://github.com/RodrigoGonzalez/check-mkdocs 25 | rev: v1.2.0 26 | hooks: 27 | - id: check-mkdocs 28 | name: check-mkdocs 29 | args: ["--config", "mkdocs.yml"] # Optional, mkdocs.yml is the default 30 | # If you have additional plugins or libraries that are not included in 31 | # check-mkdocs, add them here 32 | additional_dependencies: ["mkdocs-material"] 33 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | 0.13.0 - 2025-05-30 5 | ------------------- 6 | 7 | - Streaming support while using tools. 8 | [ggozad] 9 | 10 | 0.12.1 - 2025-05-21 11 | ------------------- 12 | 13 | - Dependencies update. 14 | [ggozad] 15 | 16 | 0.12.0 - 2025-05-11 17 | ------------------- 18 | 19 | - Move as much as possible to pydantic, simplify code. 20 | [ggozad] 21 | 22 | - Remove commands, little feedback seems noone is using it. 23 | [ggozad] 24 | 25 | - When regenerating a message default to chat completion as tools do not work when streaming. 26 | [ggozad] 27 | 28 | - When editing a chat filter out tools that are no longer available. 29 | [ggozad] 30 | 31 | 0.11.2 - 2025-04-22 32 | ------------------- 33 | 34 | - Support for SSE & WS MCP transports. 35 | [ggozad] 36 | 37 | 0.11.1 - 2025-04-17 38 | ------------------- 39 | 40 | - Fix error with prompts not submitting if they have no fields. 41 | [ggozad] 42 | 43 | - Group tools by MCP server. 44 | [ggozad] 45 | 46 | 0.11.0 - 2025-04-13 47 | ------------------- 48 | 49 | - MCP Sampling support. 50 | [ggozad] 51 | 52 | 0.10.3 - 2025-04-13 53 | ------------------- 54 | 55 | - Built-in log viewer. 56 | [ggozad] 57 | 58 | 0.10.2 - 2025-04-12 59 | ------------------- 60 | 61 | - Improve and release textualeffects for faster splash screen. 62 | [ggozad] 63 | 64 | - Faster boot by performing version & ollama checks in parallel. 65 | [ggozad] 66 | 67 | - Documentation for development & debugging. 68 | [ggozad] 69 | 70 | 0.10.1 - 2025-04-09 71 | ------------------- 72 | 73 | - More stability handling MCP servers. 74 | [ggozad] 75 | - Add pre-commit hooks and fix linting errors. 76 | [ggozad] 77 | - Add "think" tool allowing models to think before responding. 78 | [ggozad] 79 | - Remove tools that are not that useful. 80 | [ggozad] 81 | 82 | 0.10.0 - 2025-04-02 83 | ------------------- 84 | 85 | - Add sixel support for images. 86 | [ggozad] 87 | 88 | - Enable scrolling with keys within a chat. 89 | [ggozad] 90 | 91 | 0.9.5 - 2025-03-25 92 | ------------------ 93 | 94 | - When creating a command, pin the current version of oterm as a dependency. 95 | [ggozad] 96 | 97 | 0.9.4 - 2025-03-25 98 | ------------------ 99 | 100 | - Support for MCP prompts. 101 | [ggozad] 102 | 103 | 104 | 0.9.3 - 2025-03-25 105 | ------------------ 106 | 107 | - Include the default environment when running an mcp server with custom env. 108 | [ggozad] 109 | 110 | 0.9.2 - 2025-03-25 111 | ------------------ 112 | 113 | - Override default Ollama Options() to allow for single string as a stop word. 114 | [ggozad] 115 | 116 | - Enable '-h' short-form help option. 117 | [brucewillke] 118 | 119 | - Fix checking latest version on pypi. 120 | [ggozad] 121 | 122 | 0.9.1 - 2025-03-25 123 | ------------------ 124 | - Check with pypi if oterm is up to date. 125 | [ggozad] 126 | 127 | - CLI command creation. 128 | [ggozad] 129 | 130 | - Remove pyperclip dependency. 131 | [ggozad] 132 | 133 | - Documentation site. 134 | [ggozad] 135 | 136 | 0.8.4 - 2025-02-23 137 | ------------------ 138 | - Keep MCP sessions alive while oterm is running. Fix running multiple MCP tools. 139 | [sheffler, ggozad] 140 | 141 | 0.8.3 - 2025-02-06 142 | ------------------ 143 | - Do not save the chat when a model is selected when creating a new chat. 144 | [ggozad] 145 | 146 | - Replace custom Notification() with textual's built-in notification. 147 | [ggozad] 148 | 149 | - Dependency updates. 150 | [ggozad] 151 | 152 | - Improve visibility of labels in the modals. 153 | [ggozad] 154 | 155 | 0.8.2 - 2025-02-03 156 | ------------------ 157 | 158 | - Fix merging chat and additional options properly. 159 | [ggozad] 160 | 161 | 0.8.1 - 2025-01-29 162 | ------------------ 163 | 164 | - Support for thinking models (e.g. DeepSeek R1). 165 | [liorm] 166 | 167 | 0.8.0 - 2025-01-19 168 | ------------------ 169 | 170 | - Support for Model Context Protocol(MCP) tools. 171 | [ggozad] 172 | 173 | - Simplify Config(), base it on pydantic's BaseModel. 174 | [ggozad] 175 | 176 | 0.7.3 - 2025-01-07 177 | ------------------ 178 | 179 | - Fix parameter parsing / Options bug 180 | [lorenmh] 181 | 182 | 0.7.2 - 2025-01-03 183 | ------------------ 184 | 185 | - Ability to add custom tools! 186 | [ggozad] 187 | 188 | - Add a web tool, giving Ollama access to the web. 189 | [ggozad] 190 | 191 | 0.7.1 - 2025-01-02 192 | ------------------ 193 | 194 | - Support for Ollama's structured output. 195 | Use the `format` parameter to specify the output format as a JSON schema. 196 | 197 | - Ability to clear a chat, removing all messages. 198 | [ggozad] 199 | 200 | 0.7.0 - 2024-12-29 201 | ------------------ 202 | 203 | - Enforce foreign key constraints in the sqlite db, to allow proper cascading deletes. 204 | [ggozad] 205 | 206 | - Perist images in the chat history & sqlite db. 207 | [ggozad] 208 | 209 | - Update OllamaLLM client to match the use of Pydantic in olllama-python. 210 | [ggozad] 211 | 212 | - Gracefully handle exceptions in tools. 213 | [ggozad] 214 | 215 | - Fix documentation on keymap example in the readme. 216 | [bartosz] 217 | 218 | - Update the shortcuts for new chat and close chat. 219 | [pekcheey] 220 | 221 | 222 | 0.6.9 - 2024-11-23 223 | ------------------ 224 | 225 | - Simplify aiosql usage. 226 | [ggozad] 227 | 228 | 0.6.8 - 2024-11-20 229 | ------------------ 230 | 231 | - Fixed styling bug that obscured the chat tabs. 232 | [ggozad] 233 | 234 | 0.6.7 - 2024-11-19 235 | ------------------ 236 | 237 | - Support all textual built-in themes. 238 | [ggozad] 239 | 240 | 0.6.6 - 2024-11-13 241 | ------------------ 242 | 243 | - Replace can_view with can_view_partial following changes to Widget in textual. 244 | [ggozad] 245 | 246 | 0.6.5 - 2024-10-12 247 | ------------------ 248 | 249 | - Allow customizing select key bindings. 250 | [ggozad] 251 | 252 | - Fixed erroneous OLLAMA_URL documentaion. 253 | [gerroon] 254 | 255 | - Documentation improvements. 256 | [tylerlocnguyen] 257 | 258 | - When Ollama throws an exception while generating a response, capture 259 | it and show a notification to the user. 260 | [ggozad] 261 | 262 | 0.6.4 - 2024-09-28 263 | ------------------ 264 | 265 | - Command to pull/update model. 266 | [ggozad] 267 | 268 | - ESC dismisses the splash screen. 269 | [ggozad] 270 | 271 | 272 | 0.6.3 - 2024-09-25 273 | ------------------ 274 | 275 | - Fix typo preventing build on FreeBSD. 276 | [nivit] 277 | 278 | - Allow disabling the splash screen. 279 | [ggozad] 280 | 281 | 0.6.2 - 2024-09-25 282 | ------------------ 283 | 284 | - Fix creating a new chat when no chats are available. 285 | [ggozad] 286 | 287 | - Fancy splash screen using textualeffects. 288 | [ggozad] 289 | 290 | 0.6.1 - 2024-09-24 291 | ------------------ 292 | 293 | - Add support for tools/function calling. 294 | [ggozad] 295 | 296 | - Fix newline insertion in multi-line widget. 297 | [ggozad] 298 | 299 | 0.5.2 - 2024-09-06 300 | ------------------ 301 | 302 | - Fix crash when starting the app without an existing db. 303 | [ggozad] 304 | 305 | 0.5.1 - 2024-09-06 306 | ------------------ 307 | 308 | - Persist changed parameters when editing a chat. 309 | [ggozad] 310 | 311 | - Add (id) column to message table. 312 | [ggozad] 313 | 314 | - Command to regenerate last ollama response. 315 | [ggozad] 316 | 317 | 0.5.0 - 2024-09-04 318 | ------------------ 319 | 320 | - Add support for the command palette. Move most chat-related actions there. 321 | [ggozad] 322 | 323 | 0.4.4 - 2024-08-30 324 | ------------------ 325 | 326 | - Restore shortcut for command palette that overrided our choice for adding images. 327 | [ggozad] 328 | 329 | 0.4.3 - 2024-08-28 330 | ------------------ 331 | 332 | - Force utf-8 when exporting messages to a file. 333 | [ggozad] 334 | 335 | - Migrate to using uv instead of poetry for packaging/dependency management. 336 | [ggozad] 337 | 338 | 0.4.2 - 2024-08-20 339 | ------------------ 340 | 341 | - Remove patch to TextArea & restore tab handling. 342 | [ggozad] 343 | 344 | 0.4.1 - 2024-08-20 345 | ------------------ 346 | 347 | - Use 127.0.0.1 as the default host for Ollama. 348 | [ggozad] 349 | 350 | 0.4.0 - 2024-08-19 351 | ------------------ 352 | 353 | - Use stored messages and chat API instead of context and generate API. 354 | [yilmaz08, ggozad] 355 | 356 | 0.3.1 - 2024-08-14 357 | ------------------ 358 | 359 | - Remove dependency on tree-sitter, tree-sitter-languages since they require pre-compiled wheels. 360 | [ggozad] 361 | 362 | 0.3.0 - 2024-08-14 363 | ------------------ 364 | 365 | - Support customizing model parameters. 366 | [ggozad] 367 | 368 | - Cycle saved chats with Ctrl+Tab and Ctrl+Shift+Tab 369 | [yilmaz08] 370 | 371 | 0.2.10 - 2024-08-09 372 | ------------------- 373 | 374 | - Enter posts while Shift+Enter injects a newline in the multiline-widget. 375 | [ggozad] 376 | 377 | - Minor bug fixes & updates. 378 | [ggozad] 379 | 380 | 0.2.9 - 2024-05-03 381 | ------------------ 382 | 383 | - Dependency updates. 384 | [suhr, ggozad] 385 | 386 | 0.2.8 - 2024-05-03 387 | ------------------ 388 | 389 | - Do not scroll to the bottom of the chat when the user is reading past messages. 390 | [lainedfles, ggozad] 391 | 392 | - Allow customizing keep-alive parameter. 393 | [ggozad] 394 | 395 | 0.2.7 - 2024-04-22 396 | ------------------ 397 | 398 | - Take into account env variables when calling show/list etc. on Ollama. 399 | [habaneraa] 400 | 401 | 0.2.6 - 2024-04-20 402 | ------------------ 403 | 404 | - Fix handling of OLLAMA_HOST, OLLAMA_URL, OTERM_VERIFY_SSL env variables. 405 | [ggozad] 406 | 407 | - Fix windows crash when switching chat panes on slow machines. 408 | [ggozad] 409 | 410 | 0.2.5 - 2024-04-02 411 | ------------------ 412 | 413 | - Copy code block when a Markdown block is clicked instead of the entire bot reply. 414 | [ggozad] 415 | 416 | 0.2.4 - 2024-03-19 417 | ------------------ 418 | 419 | - Minor bug fixes. 420 | [ggozad] 421 | 422 | - Remove our own implementation of the Ollama client and use the official one. 423 | [ggozad] 424 | 425 | - Allow user to customize the path of data dir via the OTERM_DATA_DIR env. 426 | [PeronGH] 427 | 428 | 0.2.3 - 2024-02-28 429 | ------------------ 430 | 431 | - Minor fix for the chat history styling. 432 | [ggozad] 433 | 434 | 0.2.2 - 2024-02-28 435 | ------------------ 436 | 437 | - Allow user to navigate through the prompt history in a chat. 438 | [ggozad] 439 | 440 | 0.2.1 - 2024-02-16 441 | ------------------ 442 | 443 | - Export chat as markdown document. 444 | [ggozad] 445 | 446 | 0.2.0 - 2024-02-14 447 | ------------------ 448 | 449 | - Remove the template from the chat configuration. 450 | [ggozad] 451 | 452 | - Add support for "editing" a chat, allowing for changing system prompt and template. 453 | [ggozad] 454 | 455 | - Update textual and remove our own monkey patching 456 | for Markdown. Increase Markdown size from 20 lines to 50. 457 | [ggozad] 458 | 459 | 0.1.22 - 2024-02-01 460 | ------------------- 461 | 462 | - Cancel inference when the user presses ESC. 463 | [ggozad] 464 | 465 | - Speed up initial loading of the app by mounting past messages lazily 466 | only when a chat pane is viewed. 467 | [ggozad] 468 | 469 | 0.1.21 - 2024-01-24 470 | ------------------- 471 | 472 | - Allow changing the root of the filesystem tree when selecting an image. 473 | [ggozad] 474 | 475 | - Minor bug fixes. 476 | [ggozad] 477 | 478 | 0.1.20 - 2024-01-12 479 | ------------------- 480 | 481 | - Minor bug fixes. 482 | [ggozad] 483 | 484 | 0.1.19 - 2024-01-11 485 | ------------------- 486 | 487 | - Introduce AppConfig saved to json file. Save theme setting for the time being. 488 | [ggozad] 489 | 490 | - Fix TextArea and Markdown widgets to work with light theme. 491 | [ggozad] 492 | 493 | 0.1.18 - 2024-01-05 494 | ------------------- 495 | 496 | - Bug fixes. 497 | [ggozad] 498 | 499 | 0.1.17 - 2023-12-19 500 | ------------------- 501 | 502 | - Support multimodal models, allow adding images to chat messages. 503 | [ggozad] 504 | 505 | - Change key bindings so that they can be invoked without loosing prompt focus. 506 | [ggozad] 507 | 508 | - Add key binding to switch to multiline input using ctrl+n. 509 | [ggozad] 510 | 511 | 0.1.16 - 2023-12-07 512 | ------------------- 513 | 514 | - Support markdown in chat messages. 515 | [ggozad] 516 | 517 | - Show db location with when running with `--db`. 518 | [ggozad] 519 | 520 | 0.1.15 - 2023-12-07 521 | ------------------- 522 | 523 | - Fix crash on renaming a chat. 524 | [ggozad] 525 | 526 | 0.1.14 - 2023-12-06 527 | ------------------- 528 | 529 | - Automate pypi releases through github actions. 530 | [ggozad] 531 | 532 | - Minor bug fixes. 533 | [ggozad] 534 | 535 | 0.1.13 - 2023-12-04 536 | ------------------- 537 | 538 | - Forgotten db upgrade. 539 | [ggozad] 540 | 541 | 0.1.11 - 2023-11-29 542 | ------------------- 543 | 544 | - Syntax highlighting for json responses. 545 | [ggozad] 546 | 547 | - Support for `format` parameter in Ollama (essentially json for the time being). 548 | [ggozad] 549 | 550 | 0.1.10 - 2023-11-14 551 | ------------------- 552 | 553 | - Prompt widget improvements. 554 | [ggozad] 555 | 556 | - When pasting multiline text to the singleline input, switch to multiline textarea 557 | [ggozad] 558 | 559 | - Disable SSL verification via OTERM_VERIFY_SSL. 560 | [huynle] 561 | 562 | 0.1.9 - 2023-11-04 563 | ------------------ 564 | 565 | - Introduce FlexibleInput, an input that can be multiline. 566 | [ggozad] 567 | 568 | 0.1.8 - 2023-11-03 569 | ------------------ 570 | 571 | - Remove distutils dependency, make oterm compatible with python 3.12. 572 | [denehoffman] 573 | 574 | 0.1.7 - 2023-11-01 575 | ------------------ 576 | 577 | - Allow customizing the system and template of models. 578 | [ggozad] 579 | 580 | - DB migrations. 581 | [ggozad] 582 | 583 | 584 | 0.1.5 - 2023-11-01 585 | ------------------ 586 | 587 | - Fix whitespace bug in model selection screen. 588 | [ggozad] 589 | 590 | 0.1.4 - 2023-10-18 591 | ------------------ 592 | 593 | - Show template, system, and params in the model selection screen. 594 | [ggozad] 595 | 596 | - Click to copy to clipboard. 597 | [ggozad] 598 | 599 | 0.1.3 - 2023-10-17 600 | ------------------ 601 | 602 | - Remove pydantic and as a result, the rust dependency & build in homebrew. 603 | [ggozad] 604 | 605 | - Show discreet info on running model. 606 | [ggozad] 607 | 608 | 0.1.2 - 2023-10-17 609 | ------------------ 610 | 611 | - Ability to rename chats. 612 | [ggozad] 613 | 614 | 0.1.0 - 2023-10-15 615 | ------------------ 616 | 617 | - Initial release. 618 | [ggozad] 619 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 Yiorgis Gozadinos 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # oterm 2 | 3 | the terminal client for [Ollama](https://github.com/ollama/ollama). 4 | 5 | ## Features 6 | 7 | * intuitive and simple terminal UI, no need to run servers, frontends, just type `oterm` in your terminal. 8 | * supports Linux, MacOS, and Windows and most terminal emulators. 9 | * multiple persistent chat sessions, stored together with system prompt & parameter customizations in sqlite. 10 | * support for Model Context Protocol (MCP) tools & prompts integration. 11 | * can use any of the models you have pulled in Ollama, or your own custom models. 12 | * allows for easy customization of the model's system prompt and parameters. 13 | * supports tools integration for providing external information to the model. 14 | 15 | ## Quick install 16 | 17 | ```bash 18 | uvx oterm 19 | ``` 20 | See [Installation](https://ggozad.github.io/oterm/installation) for more details. 21 | 22 | ## Documentation 23 | 24 | [oterm Documentation](https://ggozad.github.io/oterm/) 25 | 26 | ## What's new 27 | * Support for streaming with tools! 28 | * Messages UI styling improvements. 29 | * MCP Sampling is here in addition to MCP tools & prompts! Also support for SSE & WebSocket transports for MCP servers. 30 | 31 | ### Screenshots 32 | ![Splash](https://raw.githubusercontent.com/ggozad/oterm/refs/heads/main/docs/img/splash.gif) 33 | The splash screen animation that greets users when they start oterm. 34 | 35 | ![Chat](https://raw.githubusercontent.com/ggozad/oterm/main/docs/img/chat.png) 36 | A view of the chat interface, showcasing the conversation between the user and the model. 37 | 38 | ![Model selection](https://raw.githubusercontent.com/ggozad/oterm/main/docs/img/customizations.png) 39 | The model selection screen, allowing users to choose and customize available models. 40 | 41 | ![Tool support](https://raw.githubusercontent.com/ggozad/oterm/main/docs/img/mcp_tools.svg) 42 | oterm using the `git` MCP server to access its own repo. 43 | 44 | ![Image selection](https://raw.githubusercontent.com/ggozad/oterm/main/docs/img/image_selection.png) 45 | The image selection interface, demonstrating how users can include images in their conversations. 46 | 47 | ![Theme](https://raw.githubusercontent.com/ggozad/oterm/main/docs/img/theme.png) 48 | oterm supports multiple themes, allowing users to customize the appearance of the interface. 49 | 50 | ## License 51 | 52 | This project is licensed under the [MIT License](LICENSE). 53 | -------------------------------------------------------------------------------- /docs/app_config.md: -------------------------------------------------------------------------------- 1 | ### App configuration 2 | 3 | The app configuration is stored in a directory specific to your operating system, by default: 4 | 5 | * Linux: `~/.local/share/oterm/config.json` 6 | * macOS: `~/Library/Application Support/oterm/config.json` 7 | * Windows: `C:/Users//AppData/Roaming/oterm/config.json` 8 | 9 | If in doubt you can get the directory where `config.json` can be found by running `oterm --data-dir` or `uvx oterm --data-dir` if you installed oterm using uvx. 10 | 11 | You can set the following options in the configuration file: 12 | ```json 13 | { "splash-screen": true } 14 | ``` 15 | 16 | `splash-screen` controls whether the splash screen is shown on startup. 17 | 18 | ### Key bindings 19 | 20 | We strive to have sane default key bindings, but there will always be cases where your terminal emulator or shell will interfere. You can customize select keybindings by editing the app config `config.json` file. The following are the defaults: 21 | 22 | ```json 23 | { 24 | ... 25 | "keymap": { 26 | "next.chat": "ctrl+tab", 27 | "prev.chat": "ctrl+shift+tab", 28 | "quit": "ctrl+q", 29 | "newline": "shift+enter" 30 | } 31 | } 32 | ``` 33 | 34 | ### Chat storage 35 | 36 | All your chat sessions are stored locally in a sqlite database. You can customize the directory where the database is stored by setting the `OTERM_DATA_DIR` environment variable. 37 | 38 | You can find the location of the database by running `oterm --db`. -------------------------------------------------------------------------------- /docs/commands.md: -------------------------------------------------------------------------------- 1 | ### Commands 2 | By pressing ^ Ctrl+p you can access the command palette from where you can perform most of the chat actions. The following commands are available: 3 | 4 | * `New chat` - create a new chat session 5 | * `Edit chat parameters` - edit the current chat session (change system prompt, parameters or format) 6 | * `Rename chat` - rename the current chat session 7 | * `Export chat` - export the current chat session as markdown 8 | * `Delete chat` - delete the current chat session 9 | * `Clear chat` - clear the chat history, preserving model and system prompt customizations 10 | * `Regenerate last Ollama message` - regenerates the last message from Ollama (will override the `seed` for the specific message with a random one.) Useful if you want to change the system prompt or parameters or just want to try again. 11 | * `Pull model` - pull a model or update an existing one. 12 | * `Change theme` - choose among the available themes. 13 | * `Show logs` - shows the logs of the current oterm session. 14 | 15 | ### Keyboard shortcuts 16 | 17 | The following keyboard shortcuts are supported: 18 | 19 | * ^ Ctrl+q - quit 20 | 21 | * ^ Ctrl+m - switch to multiline input mode 22 | * ^ Ctrl+i - select an image to include with the next message 23 | * ↑/↓ (while messages are focused) - navigate through the messages 24 | * (while prompt is focused) - navigate through history of previous prompts 25 | * ^ Ctrl+l - show logs 26 | 27 | * ^ Ctrl+n - open a new chat 28 | * ^ Ctrl+Backspace - close the current chat 29 | 30 | * ^ Ctrl+Tab - open the next chat 31 | * ^ Ctrl+Shift+Tab - open the previous chat 32 | 33 | In multiline mode, you can press Enter to send the message, or Shift+Enter to add a new line at the cursor. 34 | 35 | While Ollama is inferring the next message, you can press Esc to cancel the inference. 36 | 37 | !!! note 38 | Some of the shortcuts may not work in a certain context, if they are overridden by the widget in focus. For example pressing while the prompt is in multi-line mode. 39 | 40 | If the key bindings clash with your terminal, it is possible to change them by editing the configuration file. See [Configuration](app_config.md). 41 | 42 | ### Copy / Paste 43 | 44 | It is difficult to properly support copy/paste in terminal applications. You can copy blocks to your clipboard as such: 45 | 46 | * clicking a message will copy it to the clipboard. 47 | * clicking a code block will only copy the code block to the clipboard. 48 | 49 | For most terminals there exists a key modifier you can use to click and drag to manually select text. For example: 50 | * `iTerm` Option key. 51 | * `Gnome Terminal` Shift key. 52 | * `Windows Terminal` Shift key. 53 | 54 | ![Image selection](./img/image_selection.png) 55 | The image selection interface. 56 | -------------------------------------------------------------------------------- /docs/development/index.md: -------------------------------------------------------------------------------- 1 | # Development & Debugging 2 | 3 | ## Inspecting logs 4 | 5 | You can inspect basic logs from oterm by invoking the log viewer with ^ Ctrl+l or by using the command palette. This is particurly useful if you want to debug tool calling. 6 | 7 | ![Log viewer](../img/log_viewer.svg) 8 | oterm's internal log viewer showing the Brave Search MCP tool in action. 9 | 10 | ## Setup for development 11 | 12 | - Create a virtual environment 13 | ```sh 14 | uv venv 15 | ``` 16 | - Activate the virtual environment 17 | ```sh 18 | source .venv/bin/activate 19 | # or on Windows 20 | source .venv\Scripts\activate 21 | ``` 22 | 23 | - Install oterm 24 | ```sh 25 | uv pip install oterm 26 | ``` 27 | or checkout the repository and install the oterm package from source 28 | ```sh 29 | git clone git@github.com:ggozad/oterm.git 30 | uv sync 31 | ``` 32 | 33 | ### Debugging 34 | 35 | - In order to inspect logs from oterm, open a new terminal and run: 36 | ```sh 37 | source .venv/bin/activate 38 | textual console -x SYSTEM -x EVENT -x WORKER -x DEBUG 39 | ``` 40 | This will start the textual console and listen all log messages from oterm, hiding some of the textual UI messsages. 41 | 42 | - You can now start oterm in debug mode: 43 | ```sh 44 | source .venv/bin/activate 45 | textual run -c --dev oterm 46 | ``` 47 | 48 | ## Documentation 49 | 50 | oterm uses [mkdocs](https://www.mkdocs.org/) with [material](https://squidfunk.github.io/mkdocs-material/) to generate the documentation. To build the documentation, run: 51 | ```sh 52 | source .venv/bin/activate 53 | mkdocs serve -o 54 | ``` 55 | This will start a local server and open the documentation pages in your default web browser. 56 | -------------------------------------------------------------------------------- /docs/img/chat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/img/chat.png -------------------------------------------------------------------------------- /docs/img/customizations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/img/customizations.png -------------------------------------------------------------------------------- /docs/img/image_selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/img/image_selection.png -------------------------------------------------------------------------------- /docs/img/splash.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/img/splash.gif -------------------------------------------------------------------------------- /docs/img/theme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/img/theme.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # oterm 2 | 3 | the terminal client for [Ollama](https://github.com/ollama/ollama). 4 | 5 | ## Features 6 | 7 | * intuitive and simple terminal UI, no need to run servers, frontends, just type `oterm` in your terminal. 8 | * multiple persistent chat sessions, stored together with system prompt & parameter customizations in sqlite. 9 | * support for Model Context Protocol (MCP) tools & prompts integration. 10 | * can use any of the models you have pulled in Ollama, or your own custom models. 11 | * allows for easy customization of the model's system prompt and parameters. 12 | * supports tools integration for providing external information to the model. 13 | 14 | ## Installation 15 | 16 | See the [Installation](installation.md) section. 17 | 18 | ## Using oterm 19 | 20 | In order to use `oterm` you will need to have the Ollama server running. By default it expects to find the Ollama API running on `http://127.0.0.1:11434`. If you are running Ollama inside docker or on a different host/port, use the `OLLAMA_HOST` environment variable to customize the host/port. Alternatively you can use `OLLAMA_URL` to specify the full http(s) url. Setting `OTERM_VERIFY_SSL` to `False` will disable SSL verification. 21 | 22 | ```bash 23 | OLLAMA_URL=http://host:port 24 | ``` 25 | 26 | To start `oterm` simply run: 27 | 28 | ```bash 29 | oterm 30 | ``` 31 | 32 | If you installed oterm using `uvx`, you can also start it using: 33 | 34 | ```bash 35 | uvx oterm 36 | ``` 37 | 38 | ### Screenshots 39 | ![Splash](img/splash.gif) 40 | The splash screen animation that greets users when they start oterm. 41 | 42 | ![Chat](img/chat.png) 43 | A view of the chat interface, showcasing the conversation between the user and the model. 44 | 45 | ![Theme](./img/theme.png) 46 | oterm supports multiple themes, allowing users to customize the appearance of the interface. 47 | 48 | ## License 49 | 50 | This project is licensed under the [MIT License](https://raw.githubusercontent.com/ggozad/oterm/main/LICENSE). 51 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | ## Installation 2 | 3 | !!! note 4 | Ollama needs to be installed and running in order to use `oterm`. Please follow the [Ollama Installation Guide](https://github.com/ollama/ollama?tab=readme-ov-file#ollama). 5 | 6 | Using `uvx`: 7 | 8 | ```bash 9 | uvx oterm 10 | ``` 11 | 12 | Using `brew` for MacOS: 13 | 14 | ```bash 15 | brew tap ggozad/formulas 16 | brew install ggozad/formulas/oterm 17 | ``` 18 | 19 | Using `yay` (or any AUR helper) for Arch Linux, thanks goes to [Daniel Chesters](https://github.com/DanielChesters) for maintaining the package: 20 | 21 | ```bash 22 | yay -S oterm 23 | ``` 24 | 25 | Using `nix-env` on NixOs, thanks goes to [Gaël James](https://github.com/gaelj) for maintaining the package: 26 | 27 | ```bash 28 | nix-env -iA nixpkgs.oterm 29 | ``` 30 | 31 | Using `pip`: 32 | 33 | ```bash 34 | pip install oterm 35 | ``` 36 | 37 | Using `pkg` for FreeBSD, thanks goes to [Nicola Vitale](https://github.com/nivit) for maintaining the package: 38 | 39 | ```bash 40 | pkg install misc/py-oterm 41 | ``` 42 | 43 | Using [`x-cmd`](https://x-cmd.com/install/oterm): 44 | 45 | ```bash 46 | x install oterm 47 | ``` 48 | 49 | ## Updating oterm 50 | 51 | To update oterm to the latest version, you can use the same method you used for installation: 52 | 53 | Using `uvx`: 54 | 55 | ```bash 56 | uvx oterm@latest 57 | ``` 58 | 59 | Using `brew` for MacOS: 60 | 61 | ```bash 62 | brew upgrade ggozad/formulas/oterm 63 | ``` 64 | Using 'yay' (or any AUR helper) for Arch Linux: 65 | 66 | ```bash 67 | yay -Syu oterm 68 | ``` 69 | Using `pip`: 70 | 71 | ```bash 72 | pip install --upgrade oterm 73 | ``` 74 | 75 | Using `pkg` for FreeBSD: 76 | 77 | ```bash 78 | pkg upgrade misc/py-oterm 79 | ``` 80 | -------------------------------------------------------------------------------- /docs/mcp/index.md: -------------------------------------------------------------------------------- 1 | # Model Context Protocol 2 | 3 | `oterm` has support for Anthropic's open-source [Model Context Protocol](https://modelcontextprotocol.io). While Ollama does not yet directly support the protocol, `oterm` attempts to bridge [MCP servers](https://github.com/modelcontextprotocol/servers) with Ollama. 4 | 5 | To add an MCP server to `oterm`, simply add the server shim to oterm's [config.json](../app_config.md). The following MCP transports are supported 6 | 7 | #### `stdio` transport 8 | 9 | Used for running local MCP servers, the configuration supports the `command`, `args`, `env` & `cwd` parameters. For example for the [git](https://github.com/modelcontextprotocol/servers/tree/main/src/git) MCP server you would add something like the following to the `mcpServers` section of the `oterm` [configuration file](../app_config.md): 10 | 11 | ```json 12 | { 13 | ... 14 | "mcpServers": { 15 | "git": { 16 | "command": "docker", 17 | "args": [ 18 | "run", 19 | "--rm", 20 | "-i", 21 | "--mount", 22 | "type=bind,src=/Users/ggozad/dev/open-source/oterm,dst=/oterm", 23 | "mcp/git" 24 | ] 25 | } 26 | } 27 | } 28 | ``` 29 | 30 | #### `SSE` transport 31 | 32 | Typically used to connect to remote MCP servers through Server Side Events, the only accepted parameter is the `url` parameter (should start with `http://` or `https://`). For example, 33 | 34 | ```json 35 | { 36 | ... 37 | "mcpServers": { 38 | "my_mcp": { 39 | "url": "http://remote:5678/some_path/sse" 40 | } 41 | } 42 | } 43 | ``` 44 | 45 | #### `Websocket` transport 46 | 47 | Also used to connect to remote MCP servers, but through websockets. The only accepted parameter is the `url` parameter (should start with `ws://` or `wss://`). For example, 48 | 49 | ```json 50 | { 51 | ... 52 | "mcpServers": { 53 | "my_mcp": { 54 | "url": "wss://remote:5678/some_path/wss" 55 | } 56 | } 57 | } 58 | ``` 59 | 60 | ### Supported MCP Features 61 | #### Tools 62 | By transforming [MCP tools](https://modelcontextprotocol.io/docs/concepts/tools) into Ollama tools `oterm` provides full support. 63 | 64 | !!! note 65 | Not all models are equipped to support tools. For those models that do not, the tool selection will be disabled. 66 | 67 | A lot of the smaller LLMs are not as capable with tools as larger ones you might be used to. If you experience issues with tools, try reducing the number of tools you attach to a chat, increase the context size, or use a larger LLM. 68 | 69 | 70 | ![Tool support](../img/mcp_tools.svg) 71 | oterm using the `git` MCP server to access its own repo. 72 | 73 | #### Prompts 74 | `oterm` supports [MCP prompts](https://modelcontextprotocol.io/docs/concepts/prompts). Use the "Use MCP prompt" command to invoke a form with the prompt. Submitting will insert the prompt messages into the chat. 75 | 76 | ![Prompt support](../img/mcp_prompts.svg) 77 | oterm displaying a test MCP prompt. 78 | 79 | #### Sampling 80 | `oterm` supports [MCP sampling](https://modelcontextprotocol.io/docs/concepts/sampling), acting as a geteway between Ollama and the servers it connects to. This way, an MCP server can request `oterm` to run a *completion* and even declare its model preferences and parameters! 81 | -------------------------------------------------------------------------------- /docs/oracle/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /docs/oracle/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/oracle/README.md -------------------------------------------------------------------------------- /docs/oracle/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "oracle" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | authors = [ 7 | { name = "Yiorgis Gozadinos", email = "ggozadinos@gmail.com" } 8 | ] 9 | requires-python = ">=3.10" 10 | dependencies = [ 11 | "ollama>=0.4.4,<0.5", 12 | ] 13 | 14 | [build-system] 15 | requires = ["hatchling"] 16 | build-backend = "hatchling.build" 17 | -------------------------------------------------------------------------------- /docs/oracle/src/oracle/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/docs/oracle/src/oracle/__init__.py -------------------------------------------------------------------------------- /docs/oracle/src/oracle/tool.py: -------------------------------------------------------------------------------- 1 | from ollama import Tool 2 | 3 | OracleTool = Tool( 4 | type="function", 5 | function=Tool.Function( 6 | name="oracle", 7 | description="Function to return the Oracle's answer to any question.", 8 | parameters=Tool.Function.Parameters( 9 | type="object", 10 | properties={ 11 | "question": Tool.Function.Parameters.Property( 12 | type="str", description="The question to ask." 13 | ), 14 | }, 15 | required=["question"], 16 | ), 17 | ), 18 | ) 19 | 20 | 21 | def oracle(question: str): 22 | return "oterm" 23 | -------------------------------------------------------------------------------- /docs/parameters.md: -------------------------------------------------------------------------------- 1 | When creating a new chat, you may not only select the model, but also customize the following: 2 | 3 | - `system` instruction prompt 4 | - `tools` used. See [Tools](tools/index.md) for more information on how to make tools available. 5 | - chat `parameters` (such as context length, seed, temperature etc) passed to the model. For a list of all supported parameters refer to the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values). 6 | - Ouput `format`/structured output. In the format field you can use Ollama's [Structured Output](https://ollama.com/blog/structured-outputs) specifying the full format as a JSON schema. Leaving the field empty (default) will return the output as text. 7 | 8 | You can also "edit" an existing chat to change the system prompt, parameters, tools or format. Note, that the model cannot be changed once the chat has started. 9 | 10 | ![Model selection](./img/customizations.png) 11 | The model selection screen, allowing users to choose and customize available models. 12 | -------------------------------------------------------------------------------- /docs/tools/index.md: -------------------------------------------------------------------------------- 1 | # Tools 2 | 3 | `oterm` supports integration with tools. Tools are special "functions" that can provide external information to the LLM model that it does not otherwise have access to. 4 | 5 | With tools, you can provide the model with access to the web, run shell commands, perform RAG and more. 6 | 7 | [Use existing Model Context Protocol servers](../mcp/index.md) 8 | 9 | or 10 | 11 | [create your own custom tools](#custom-tools-with-oterm). 12 | 13 | ### Custom tools with oterm 14 | 15 | You can create your own custom tools and integrate them with `oterm`. 16 | 17 | #### Create a python package. 18 | 19 | You will need to create a python package that exports a `Tool` definition as well as a *callable* function that will be called when the tool is invoked. 20 | 21 | Here is an [example](https://github.com/ggozad/oterm/tree/main/docs/oracle){:target="_blank"} of a simple tool that implements an Oracle. The tool is defined in the `oracle` package which exports the `OracleTool` tool definition and an `oracle` callable function. 22 | 23 | ```python 24 | from ollama import Tool 25 | 26 | OracleTool = Tool( 27 | type="function", 28 | function=Tool.Function( 29 | name="oracle", 30 | description="Function to return the Oracle's answer to any question.", 31 | parameters=Tool.Function.Parameters( 32 | type="object", 33 | properties={ 34 | "question": Tool.Function.Parameters.Property( 35 | type="str", description="The question to ask." 36 | ), 37 | }, 38 | required=["question"], 39 | ), 40 | ), 41 | ) 42 | 43 | 44 | def oracle(question: str): 45 | return "oterm" 46 | ``` 47 | 48 | You need to install the package in the same environment where `oterm` is installed so that `oterm` can resolve it. 49 | 50 | ```bash 51 | cd oracle 52 | uv pip install . # or pip install . 53 | ``` 54 | 55 | #### Register the tool with oterm 56 | 57 | You can register the tool with `oterm` by adding the tool definittion and callable to the `tools` section of the `oterm` configuration file. You can find the location of the configuration file's directory by running `oterm --data-dir`. 58 | 59 | ```json 60 | { 61 | ... 62 | "tools": [{ 63 | "tool": "oracle.tool:OracleTool", 64 | "callable": "oracle.tool:oracle" 65 | }] 66 | } 67 | ``` 68 | Note the notation `module:object` for the tool and callable. 69 | 70 | That's it! You can now use the tool in `oterm` with models that support it. 71 | 72 | ### Built-in example tools 73 | 74 | The following example tools are currently built-in to `oterm`: 75 | 76 | * `think` - provides the model with a way to think about a question before answering it. This is useful for complex questions that require reasoning. Use it for adding a "thinking" step to the model's response. 77 | * `date_time` - provides the current date and time in ISO format. 78 | * `shell` - allows you to run shell commands and use the output as input to the model. Obviously this can be dangerous, so use with caution. 79 | 80 | These tools are defined in `src/oterm/tools`. You can make those tools available and enable them for selection when creating or editing a chat, by adding them to the `tools` section of the `oterm` configuration file. You can find the location of the configuration file's directory by running `oterm --data-dir`. So for example to enable the `shell` tool, you would add the following to the configuration file: 81 | 82 | ```json 83 | { 84 | ... 85 | "tools": [{ 86 | "tool": "oterm.tools.think:ThinkTool", 87 | "callable": "oterm.tools.think:think" 88 | }] 89 | } 90 | ``` 91 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: oterm 2 | site_description: the terminal client for Ollama. 3 | site_url: https://ggozad.github.io/oterm/ 4 | theme: 5 | name: material 6 | palette: 7 | - media: "(prefers-color-scheme)" 8 | toggle: 9 | icon: material/lightbulb-auto 10 | name: Switch to light mode 11 | - media: "(prefers-color-scheme: light)" 12 | scheme: default 13 | primary: deep purple 14 | accent: amber 15 | toggle: 16 | icon: material/lightbulb 17 | name: Switch to dark mode 18 | - media: "(prefers-color-scheme: dark)" 19 | scheme: slate 20 | primary: deep purple 21 | accent: amber 22 | toggle: 23 | icon: material/lightbulb-outline 24 | name: Switch to system preference 25 | features: 26 | - content.code.annotate 27 | - content.code.copy 28 | - content.code.select 29 | - content.footnote.tooltips 30 | - content.tabs.link 31 | - content.tooltips 32 | - navigation.footer 33 | - navigation.indexes 34 | - navigation.instant 35 | - navigation.instant.prefetch 36 | - navigation.instant.progress 37 | - navigation.path 38 | - navigation.tabs 39 | - navigation.tabs.sticky 40 | - navigation.top 41 | - navigation.tracking 42 | - search.highlight 43 | - search.share 44 | - search.suggest 45 | - toc.follow 46 | 47 | icon: 48 | repo: fontawesome/brands/github-alt 49 | # logo: img/icon-white.svg 50 | # favicon: img/favicon.png 51 | language: en 52 | repo_name: ggozad/oterm 53 | repo_url: https://github.com/ggozad/oterm 54 | plugins: 55 | # Material for MkDocs 56 | search: 57 | nav: 58 | - oterm: 59 | - index.md 60 | - Installation: installation.md 61 | - Commands & shortcuts: commands.md 62 | - Chat parameters: parameters.md 63 | - Model Context Protocol: 64 | - mcp/index.md 65 | - Tools: 66 | - tools/index.md 67 | - Configuration: app_config.md 68 | - Debugging / Development: development/index.md 69 | markdown_extensions: 70 | - admonition 71 | - attr_list 72 | - pymdownx.details 73 | - pymdownx.highlight: 74 | anchor_linenums: true 75 | line_spans: __span 76 | pygments_lang_class: true 77 | use_pygments: true 78 | - pymdownx.inlinehilite 79 | - pymdownx.snippets 80 | - pymdownx.superfences 81 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "oterm" 3 | version = "0.13.0" 4 | description = "A text-based terminal client for Ollama." 5 | authors = [{ name = "Yiorgis Gozadinos", email = "ggozadinos@gmail.com" }] 6 | license = { text = "MIT" } 7 | readme = { file = "README.md", content-type = "text/markdown" } 8 | classifiers = [ 9 | "Development Status :: 4 - Beta", 10 | "Environment :: Console", 11 | "Intended Audience :: Developers", 12 | "Operating System :: Microsoft :: Windows :: Windows 10", 13 | "Operating System :: Microsoft :: Windows :: Windows 11", 14 | "Operating System :: MacOS", 15 | "Operating System :: POSIX :: Linux", 16 | "Programming Language :: Python :: 3.10", 17 | "Programming Language :: Python :: 3.11", 18 | "Programming Language :: Python :: 3.12", 19 | "Typing :: Typed", 20 | ] 21 | requires-python = ">=3.10" 22 | dependencies = [ 23 | "textual>=3.2.0,<3.3.0", 24 | "typer>=0.15.2,<0.16", 25 | "python-dotenv>=1.0.1", 26 | "aiosql>=13.4,<14", 27 | "aiosqlite>=0.21.0,<0.22", 28 | "packaging>=25.0,<26", 29 | "pillow>=11.2.1,<12", 30 | "ollama>=0.5.0,<0.6", 31 | "textualeffects>=0.1.4", 32 | "pydantic>=2.11.3,<2.12", 33 | "textual-image>=0.8.2,<0.9.0", 34 | "fastmcp>=2.5.2,<2.6", 35 | ] 36 | 37 | [project.urls] 38 | Homepage = "https://github.com/ggozad/oterm" 39 | Repository = "https://github.com/ggozad/oterm" 40 | Issues = "https://github.com/ggozad/oterm/issues" 41 | Documentation = "https://ggozad.github.io/oterm/" 42 | 43 | [project.scripts] 44 | oterm = "oterm.cli.oterm:cli" 45 | 46 | [tool.uv] 47 | dev-dependencies = [ 48 | "ruff>=0.11.6", 49 | "pdbpp", 50 | "pytest>=8.3.4", 51 | "pytest-asyncio>=0.25.3", 52 | "textual-dev>=1.7.0", 53 | "homebrew-pypi-poet>=0.10.0", 54 | "mkdocs>=1.6.1", 55 | "mkdocs-material>=9.6.5", 56 | "pre-commit>=4.2.0", 57 | "pyright>=1.1.400", 58 | ] 59 | 60 | [tool.uv.sources] 61 | 62 | [tool.ruff] 63 | line-length = 88 64 | 65 | [tool.ruff.lint] 66 | select = [ 67 | "E", 68 | "F", 69 | "UP", 70 | "I", 71 | ] # Enable Flake's "E" and "F" codes by default and "I" for sorting imports 72 | ignore = ["E501"] 73 | per-file-ignores = { "__init__.py" = ["F401", "F403"] } 74 | # Allow unused variables when underscore-prefixed. 75 | dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" 76 | 77 | [tool.ruff.format] 78 | quote-style = "double" 79 | indent-style = "space" 80 | skip-magic-trailing-comma = false 81 | line-ending = "auto" 82 | 83 | [tool.pyright] 84 | venvPath = "." 85 | venv = ".venv" 86 | 87 | [build-system] 88 | requires = ["hatchling"] 89 | build-backend = "hatchling.build" 90 | 91 | [tool.hatch.build.targets.sdist] 92 | exclude = ["/screenshots", "/examples"] 93 | 94 | [tool.hatch.build.targets.wheel] 95 | only-packages = true 96 | -------------------------------------------------------------------------------- /src/oterm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/src/oterm/__init__.py -------------------------------------------------------------------------------- /src/oterm/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/src/oterm/app/__init__.py -------------------------------------------------------------------------------- /src/oterm/app/chat_edit.py: -------------------------------------------------------------------------------- 1 | from ollama import Options, ShowResponse 2 | from pydantic import ValidationError 3 | from rich.text import Text 4 | from textual.app import ComposeResult 5 | from textual.containers import ( 6 | Container, 7 | Horizontal, 8 | Vertical, 9 | ) 10 | from textual.reactive import reactive 11 | from textual.screen import ModalScreen 12 | from textual.widgets import Button, Input, Label, OptionList, TextArea 13 | 14 | from oterm.app.widgets.tool_select import ToolSelector 15 | from oterm.ollamaclient import ( 16 | OllamaLLM, 17 | jsonify_options, 18 | parse_format, 19 | parse_ollama_parameters, 20 | ) 21 | from oterm.types import ChatModel, OtermOllamaOptions, Tool 22 | 23 | 24 | class ChatEdit(ModalScreen[str]): 25 | models = [] 26 | models_info: dict[str, ShowResponse] = {} 27 | 28 | model_name: reactive[str] = reactive("") 29 | tag: reactive[str] = reactive("") 30 | bytes: reactive[int] = reactive(0) 31 | model_info: ShowResponse 32 | system: reactive[str] = reactive("") 33 | parameters: reactive[Options] = reactive(Options()) 34 | format: reactive[str] = reactive("") 35 | keep_alive: reactive[int] = reactive(5) 36 | last_highlighted_index = None 37 | tools: reactive[list[Tool]] = reactive([]) 38 | edit_mode: reactive[bool] = reactive(False) 39 | 40 | BINDINGS = [ 41 | ("escape", "cancel", "Cancel"), 42 | ("enter", "save", "Save"), 43 | ] 44 | 45 | def __init__( 46 | self, 47 | chat_model: ChatModel | None = None, 48 | edit_mode: bool = False, 49 | ) -> None: 50 | super().__init__() 51 | 52 | if chat_model is None: 53 | chat_model = ChatModel() 54 | 55 | self.chat_model = chat_model 56 | self.model_name, self.tag = ( 57 | chat_model.model.split(":") if chat_model.model else ("", "") 58 | ) 59 | self.system = chat_model.system or "" 60 | self.parameters = chat_model.parameters 61 | self.format = chat_model.format 62 | self.keep_alive = chat_model.keep_alive 63 | self.tools = chat_model.tools 64 | self.edit_mode = edit_mode 65 | 66 | def _return_chat_meta(self) -> None: 67 | model = f"{self.model_name}:{self.tag}" 68 | system = self.query_one(".system", TextArea).text 69 | system = system if system != self.model_info.get("system", "") else None 70 | keep_alive = int(self.query_one(".keep-alive", Input).value) 71 | p_area = self.query_one(".parameters", TextArea) 72 | try: 73 | parameters = OtermOllamaOptions.model_validate_json( 74 | p_area.text, strict=True 75 | ) 76 | 77 | if isinstance(parameters.stop, str): 78 | parameters.stop = [parameters.stop] 79 | 80 | except ValidationError: 81 | self.app.notify("Error validating parameters", severity="error") 82 | p_area = self.query_one(".parameters", TextArea) 83 | p_area.styles.animate("opacity", 0.0, final_value=1.0, duration=0.5) 84 | return 85 | 86 | f_area = self.query_one(".format", TextArea) 87 | try: 88 | parse_format(f_area.text) 89 | format = f_area.text 90 | except Exception: 91 | self.app.notify("Error parsing format", severity="error") 92 | f_area.styles.animate("opacity", 0.0, final_value=1.0, duration=0.5) 93 | return 94 | 95 | self.tools = self.query_one(ToolSelector).selected 96 | 97 | # Create updated chat model 98 | updated_chat_model = ChatModel( 99 | id=self.chat_model.id, 100 | name=self.chat_model.name, 101 | model=model, 102 | system=system, 103 | format=format, 104 | parameters=parameters, 105 | keep_alive=keep_alive, 106 | tools=self.tools, 107 | type=self.chat_model.type, 108 | ) 109 | 110 | self.dismiss(updated_chat_model.model_dump_json(exclude_none=True)) 111 | 112 | def action_cancel(self) -> None: 113 | self.dismiss() 114 | 115 | def action_save(self) -> None: 116 | self._return_chat_meta() 117 | 118 | def select_model(self, model: str) -> None: 119 | select = self.query_one("#model-select", OptionList) 120 | for index, option in enumerate(select._options): 121 | if str(option.prompt) == model: 122 | select.highlighted = index 123 | break 124 | 125 | async def on_mount(self) -> None: 126 | self.models = OllamaLLM.list().models 127 | 128 | models = [model.model or "" for model in self.models] 129 | for model in models: 130 | info = OllamaLLM.show(model) 131 | self.models_info[model] = info 132 | option_list = self.query_one("#model-select", OptionList) 133 | option_list.clear_options() 134 | for model in models: 135 | option_list.add_option(option=self.model_option(model)) 136 | option_list.highlighted = self.last_highlighted_index 137 | if self.model_name and self.tag: 138 | self.select_model(f"{self.model_name}:{self.tag}") 139 | 140 | # Disable the model select widget if we are in edit mode. 141 | widget = self.query_one("#model-select", OptionList) 142 | widget.disabled = self.edit_mode 143 | 144 | def on_option_list_option_highlighted( 145 | self, option: OptionList.OptionHighlighted 146 | ) -> None: 147 | model = option.option.prompt 148 | model_meta = next((m for m in self.models if m.model == str(model)), None) 149 | if model_meta: 150 | name, tag = (model_meta.model or "").split(":") 151 | self.model_name = name 152 | widget = self.query_one(".name", Label) 153 | widget.update(f"{self.model_name}") 154 | 155 | self.tag = tag 156 | widget = self.query_one(".tag", Label) 157 | widget.update(f"{self.tag}") 158 | 159 | self.bytes = model_meta["size"] 160 | widget = self.query_one(".size", Label) 161 | widget.update(f"{(self.bytes / 1.0e9):.2f} GB") 162 | 163 | meta = self.models_info.get(model_meta.model or "") 164 | self.model_info = meta # type: ignore 165 | if not self.edit_mode: 166 | self.parameters = parse_ollama_parameters( 167 | self.model_info.parameters or "" 168 | ) 169 | widget = self.query_one(".parameters", TextArea) 170 | widget.load_text(jsonify_options(self.parameters)) 171 | widget = self.query_one(".system", TextArea) 172 | 173 | # XXX Does not work as expected, there is no longer system in model_info 174 | widget.load_text(self.system or self.model_info.get("system", "")) 175 | 176 | # Deduce from the model's template if the model is tool-capable. 177 | tools_supported = ".Tools" in self.model_info["template"] 178 | tool_selector = self.query_one(ToolSelector) 179 | tool_selector.disabled = not tools_supported 180 | 181 | # Now that there is a model selected we can save the chat. 182 | save_button = self.query_one("#save-btn", Button) 183 | save_button.disabled = False 184 | ChatEdit.last_highlighted_index = option.option_index 185 | 186 | def on_button_pressed(self, event: Button.Pressed) -> None: 187 | if event.button.name == "save": 188 | self._return_chat_meta() 189 | else: 190 | self.dismiss() 191 | 192 | @staticmethod 193 | def model_option(model: str) -> Text: 194 | return Text(model) 195 | 196 | def compose(self) -> ComposeResult: 197 | with Container(classes="screen-container full-height"): 198 | with Horizontal(): 199 | with Vertical(): 200 | with Horizontal(id="model-info"): 201 | yield Label("Model:", classes="title") 202 | yield Label(f"{self.model_name}", classes="name") 203 | yield Label("Tag:", classes="title") 204 | yield Label(f"{self.tag}", classes="tag") 205 | yield Label("Size:", classes="title") 206 | yield Label(f"{self.size}", classes="size") 207 | 208 | yield OptionList(id="model-select") 209 | yield Label("Tools:", classes="title") 210 | yield ToolSelector( 211 | id="tool-selector-container", selected=self.tools 212 | ) 213 | 214 | with Vertical(): 215 | yield Label("System:", classes="title") 216 | yield TextArea(self.system, classes="system log") 217 | yield Label("Parameters:", classes="title") 218 | yield TextArea( 219 | jsonify_options(self.parameters), 220 | classes="parameters log", 221 | language="json", 222 | ) 223 | yield Label("Format:", classes="title") 224 | yield TextArea( 225 | self.format or "", 226 | classes="format log", 227 | language="json", 228 | ) 229 | 230 | with Horizontal(): 231 | with Horizontal(): 232 | yield Label( 233 | "Keep-alive (min)", classes="title keep-alive-label" 234 | ) 235 | yield Input( 236 | classes="keep-alive", value=str(self.keep_alive) 237 | ) 238 | 239 | with Horizontal(classes="button-container"): 240 | yield Button( 241 | "Save", 242 | id="save-btn", 243 | name="save", 244 | disabled=True, 245 | variant="primary", 246 | ) 247 | yield Button("Cancel", name="cancel") 248 | -------------------------------------------------------------------------------- /src/oterm/app/chat_export.py: -------------------------------------------------------------------------------- 1 | import re 2 | import unicodedata 3 | from collections.abc import Sequence 4 | 5 | from textual import on 6 | from textual.app import ComposeResult 7 | from textual.containers import Container 8 | from textual.screen import ModalScreen 9 | from textual.widgets import Input, Label 10 | 11 | from oterm.store.store import Store 12 | from oterm.types import MessageModel 13 | 14 | 15 | def slugify(value): 16 | """ 17 | Taken from https://github.com/django/django/blob/master/django/utils/text.py 18 | """ 19 | value = str(value) 20 | value = ( 21 | unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii") 22 | ) 23 | value = re.sub(r"[^\w\s-]", "", value.lower()) 24 | return re.sub(r"[-\s]+", "-", value).strip("-_") 25 | 26 | 27 | class ChatExport(ModalScreen[str]): 28 | chat_id: int 29 | file_name: str = "" 30 | BINDINGS = [ 31 | ("escape", "cancel", "Cancel"), 32 | ] 33 | 34 | def __init__(self, chat_id: int, file_name: str = "") -> None: 35 | super().__init__() 36 | self.chat_id = chat_id 37 | self.file_name = file_name 38 | 39 | def action_cancel(self) -> None: 40 | self.dismiss() 41 | 42 | @on(Input.Submitted) 43 | async def on_submit(self, event: Input.Submitted) -> None: 44 | store = await Store.get_store() 45 | 46 | if not event.value: 47 | return 48 | 49 | messages: Sequence[MessageModel] = await store.get_messages(self.chat_id) 50 | with open(event.value, "w", encoding="utf-8") as file: 51 | for message in messages: 52 | file.write(f"*{message.role}*\n") 53 | file.write(f"{message.text}\n") 54 | file.write("\n---\n") 55 | self.app.notify(f"Chat exported to {file.name}") 56 | self.dismiss() 57 | 58 | def compose(self) -> ComposeResult: 59 | with Container(classes="screen-container short"): 60 | yield Label("Export chat", classes="title") 61 | yield Input(id="chat-name-input", value=self.file_name) 62 | -------------------------------------------------------------------------------- /src/oterm/app/chat_rename.py: -------------------------------------------------------------------------------- 1 | from textual import on 2 | from textual.app import ComposeResult 3 | from textual.containers import Container 4 | from textual.screen import ModalScreen 5 | from textual.widgets import Input, Label 6 | 7 | 8 | class ChatRename(ModalScreen[str]): 9 | old_name: str = "" 10 | 11 | BINDINGS = [ 12 | ("escape", "cancel", "Cancel"), 13 | ] 14 | 15 | def __init__(self, old_name: str) -> None: 16 | super().__init__() 17 | self.old_name = old_name 18 | 19 | def action_cancel(self) -> None: 20 | self.dismiss() 21 | 22 | @on(Input.Submitted) 23 | async def on_submit(self, event: Input.Submitted) -> None: 24 | if event.value: 25 | self.dismiss(event.value) 26 | 27 | def compose(self) -> ComposeResult: 28 | with Container(classes="screen-container short"): 29 | yield Label("Rename chat", classes="title") 30 | yield Input(id="chat-name-input", value=self.old_name) 31 | -------------------------------------------------------------------------------- /src/oterm/app/css.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | tcss = "" 4 | with open(Path(__file__).parent / "oterm.tcss") as f: 5 | tcss = f.read() 6 | -------------------------------------------------------------------------------- /src/oterm/app/image_browser.py: -------------------------------------------------------------------------------- 1 | from base64 import b64encode 2 | from io import BytesIO 3 | from pathlib import Path 4 | 5 | import PIL.Image as PILImage 6 | from PIL import UnidentifiedImageError 7 | from textual import on 8 | from textual.app import ComposeResult 9 | from textual.containers import Container, Horizontal, Vertical 10 | from textual.screen import ModalScreen 11 | from textual.widgets import DirectoryTree, Input, Label 12 | from textual_image.widget import Image 13 | 14 | from oterm.app.widgets.image import IMAGE_EXTENSIONS, ImageDirectoryTree 15 | 16 | 17 | class ImageSelect(ModalScreen[tuple[Path, str]]): 18 | BINDINGS = [ 19 | ("escape", "cancel", "Cancel"), 20 | ] 21 | 22 | def action_cancel(self) -> None: 23 | self.dismiss() 24 | 25 | async def on_mount(self) -> None: 26 | dt = self.query_one(ImageDirectoryTree) 27 | dt.show_guides = False 28 | dt.focus() 29 | 30 | @on(DirectoryTree.FileSelected) 31 | async def on_image_selected(self, ev: DirectoryTree.FileSelected) -> None: 32 | try: 33 | buffer = BytesIO() 34 | image = PILImage.open(ev.path) 35 | if image.mode != "RGB": 36 | image = image.convert("RGB") 37 | image.save(buffer, format="JPEG") 38 | b64 = b64encode(buffer.getvalue()).decode("utf-8") 39 | self.dismiss((ev.path, b64)) 40 | except UnidentifiedImageError: 41 | self.dismiss() 42 | 43 | @on(DirectoryTree.NodeHighlighted) 44 | async def on_image_highlighted(self, ev: DirectoryTree.NodeHighlighted) -> None: 45 | path = ev.node.data.path # type: ignore 46 | image_widget = self.query_one(Image) 47 | if path.suffix in IMAGE_EXTENSIONS: 48 | try: 49 | image_widget.image = PILImage.open(path.as_posix()) 50 | except UnidentifiedImageError: 51 | image_widget.image = None 52 | else: 53 | image_widget.image = None 54 | 55 | @on(Input.Changed) 56 | async def on_root_changed(self, ev: Input.Changed) -> None: 57 | dt = self.query_one(ImageDirectoryTree) 58 | path = Path(ev.value) 59 | if not path.exists() or not path.is_dir(): 60 | return 61 | dt.path = path 62 | 63 | def compose(self) -> ComposeResult: 64 | with Container( 65 | id="image-select-container", classes="screen-container full-height" 66 | ): 67 | with Horizontal(): 68 | with Vertical(id="image-directory-tree"): 69 | yield Label("Select an image:", classes="title") 70 | yield Label("Root:") 71 | yield Input(Path("./").resolve().as_posix()) 72 | yield ImageDirectoryTree("./") 73 | with Container(id="image-preview"): 74 | yield Image(id="image") 75 | -------------------------------------------------------------------------------- /src/oterm/app/log_viewer.py: -------------------------------------------------------------------------------- 1 | from textual.app import ComposeResult 2 | from textual.containers import Container 3 | from textual.reactive import reactive 4 | from textual.screen import ModalScreen 5 | from textual.widgets import Label, RichLog 6 | 7 | from oterm.log import log_lines 8 | from oterm.utils import debounce 9 | 10 | 11 | class LogViewer(ModalScreen[str]): 12 | line_count: reactive[int] = reactive(0) 13 | 14 | BINDINGS = [ 15 | ("escape", "cancel", "Cancel"), 16 | ] 17 | 18 | def action_cancel(self) -> None: 19 | self.dismiss() 20 | 21 | @debounce(0.5) 22 | async def log_update(self) -> None: 23 | widget = self.query_one(RichLog) 24 | new_lines = log_lines[self.line_count :] 25 | self.line_count += len(new_lines) 26 | for group, line in new_lines: 27 | widget.write(f"[b]{group.name}[/b] - {line}") 28 | await self.log_update() 29 | 30 | async def on_screen_resume(self) -> None: 31 | await self.log_update() 32 | 33 | def compose(self) -> ComposeResult: 34 | with Container(id="log-viewer", classes="screen-container full-height"): 35 | yield Label("oterm logs", classes="title") 36 | yield RichLog( 37 | highlight=True, 38 | markup=True, 39 | auto_scroll=True, 40 | wrap=True, 41 | ) 42 | -------------------------------------------------------------------------------- /src/oterm/app/mcp_prompt.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import json 3 | from collections.abc import Awaitable, Callable 4 | 5 | from mcp.types import Prompt 6 | from ollama import Message 7 | from textual import on 8 | from textual.app import ComposeResult, RenderResult 9 | from textual.containers import ( 10 | Container, 11 | Horizontal, 12 | Vertical, 13 | VerticalScroll, 14 | ) 15 | from textual.screen import ModalScreen 16 | from textual.widget import Widget 17 | from textual.widgets import Button, Input, Label, OptionList, TextArea 18 | from textual.widgets.option_list import Option 19 | 20 | from oterm.tools.mcp.prompts import ( 21 | available_prompt_calls, 22 | available_prompt_defs, 23 | mcp_prompt_to_ollama_messages, 24 | ) 25 | from oterm.utils import debounce 26 | 27 | 28 | class PromptOptionWidget(Widget): 29 | def __init__(self, server: str, prompt: Prompt) -> None: 30 | super().__init__() 31 | self.prompt = prompt 32 | self.server = server 33 | 34 | def render(self) -> RenderResult: 35 | return f"[b]{self.server} - {self.prompt.name}[/b]\n[i]{self.prompt.description}[/i]" 36 | 37 | 38 | class PromptFormWidget(Widget): 39 | prompt: Prompt 40 | callable: Callable | Awaitable 41 | messages: list[Message] = [] 42 | 43 | @on(Input.Changed) 44 | @debounce(1.0) 45 | async def on_text_area_change(self) -> None: 46 | is_valid = True 47 | params = {} 48 | for arg in self.prompt.arguments or []: 49 | params[arg.name] = self.query_one(f"#arg-{arg.name}", Input).value 50 | if arg.required and not params[arg.name]: 51 | is_valid = False 52 | prompt_result_widget = self.query_one("#prompt-result", TextArea) 53 | if inspect.iscoroutinefunction(self.callable): 54 | messages = await self.callable(**params) 55 | else: 56 | messages = self.callable(**params) # type: ignore 57 | self.messages = messages = mcp_prompt_to_ollama_messages(messages) 58 | prompt_result_widget.text = "\n".join( 59 | [f"{m.role}: {m.content}" for m in messages] 60 | ) 61 | submit_button = self.screen.query_one("#submit", Button) 62 | submit_button.disabled = not is_valid 63 | 64 | def compose(self) -> ComposeResult: 65 | with VerticalScroll(id="prompt-form-container"): 66 | for arg in self.prompt.arguments or []: 67 | yield Label( 68 | f"{arg.name}{arg.required and ' (required)' or ''}", classes="title" 69 | ) 70 | yield Input(id=f"arg-{arg.name}", tooltip=arg.description) 71 | yield Label("Messages:", classes="subtitle") 72 | yield TextArea(id="prompt-result", read_only=True) 73 | 74 | 75 | class MCPPrompt(ModalScreen[str]): 76 | BINDINGS = [ 77 | ("escape", "cancel", "Cancel"), 78 | ("enter", "submit", "Submit"), 79 | ] 80 | 81 | def action_cancel(self) -> None: 82 | self.dismiss() 83 | 84 | async def on_mount(self) -> None: 85 | option_list = self.query_one("#mcp-prompt-select", OptionList) 86 | option_list.clear_options() 87 | for server in available_prompt_defs.keys(): 88 | for prompt_call in available_prompt_defs[server]: 89 | option_list.add_option( 90 | option=self.prompt_option(server, prompt_call["prompt"]) 91 | ) 92 | 93 | @staticmethod 94 | def prompt_option(server: str, prompt: Prompt) -> Option: 95 | return Option( 96 | prompt=PromptOptionWidget(server, prompt).render(), id=prompt.name 97 | ) 98 | 99 | async def on_option_list_option_highlighted( 100 | self, option: OptionList.OptionHighlighted 101 | ) -> None: 102 | prompt = None 103 | prompt_call = None 104 | for prompt_call in available_prompt_calls(): 105 | prompt = prompt_call["prompt"] 106 | if prompt.name == option.option.id: 107 | break 108 | if prompt is None or prompt_call is None: 109 | return 110 | 111 | form_container = self.query_one("#prompt-form-container", Vertical) 112 | form_container = self.query_one("#prompt-form-container", Vertical) 113 | form_container.remove_children() 114 | widget = PromptFormWidget(classes="prompt-form") 115 | widget.prompt = prompt 116 | widget.callable = prompt_call["callable"] 117 | form_container.mount(widget) 118 | 119 | has_required_args = any(arg.required for arg in prompt.arguments or []) 120 | submit_button = self.query_one("#submit", Button) 121 | submit_button.disabled = has_required_args 122 | if not has_required_args: 123 | await widget.on_text_area_change() 124 | 125 | def on_button_pressed(self, event: Button.Pressed) -> None: 126 | if event.button.name == "submit": 127 | form = self.query_one(".prompt-form", PromptFormWidget) 128 | jsn = json.dumps([m.model_dump() for m in form.messages]) 129 | self.dismiss(jsn) 130 | else: 131 | self.dismiss() 132 | 133 | def compose(self) -> ComposeResult: 134 | with Container(classes="screen-container full-height"): 135 | with Horizontal(): 136 | with Vertical(): 137 | yield Label("Available MCP prompts", classes="title") 138 | yield OptionList(id="mcp-prompt-select") 139 | 140 | with Vertical(): 141 | yield Label("Customize prompt:", classes="title") 142 | yield Vertical(id="prompt-form-container") 143 | with Horizontal(classes="button-container"): 144 | yield Button( 145 | "Submit", 146 | id="submit", 147 | name="submit", 148 | variant="primary", 149 | disabled=True, 150 | ) 151 | yield Button("Cancel", name="cancel") 152 | -------------------------------------------------------------------------------- /src/oterm/app/oterm.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable 2 | 3 | from textual import on, work 4 | from textual.app import App, ComposeResult, SystemCommand 5 | from textual.binding import Binding 6 | from textual.screen import Screen 7 | from textual.widgets import Footer, Header, TabbedContent, TabPane 8 | 9 | from oterm.app.chat_edit import ChatEdit 10 | from oterm.app.chat_export import ChatExport, slugify 11 | from oterm.app.pull_model import PullModel 12 | from oterm.app.splash import splash 13 | from oterm.app.widgets.chat import ChatContainer 14 | from oterm.config import appConfig 15 | from oterm.store.store import Store 16 | from oterm.tools.external import load_external_tools 17 | from oterm.tools.mcp.setup import setup_mcp_servers, teardown_mcp_servers 18 | from oterm.types import ChatModel, ExternalToolDefinition 19 | from oterm.utils import check_ollama, is_up_to_date 20 | 21 | 22 | class OTerm(App): 23 | TITLE = "oterm" 24 | SUB_TITLE = "the TUI Ollama client." 25 | CSS_PATH = "oterm.tcss" 26 | BINDINGS = [ 27 | Binding("ctrl+tab", "cycle_chat(+1)", "next chat", id="next.chat"), 28 | Binding("ctrl+shift+tab", "cycle_chat(-1)", "prev chat", id="prev.chat"), 29 | Binding("ctrl+backspace", "delete_chat", "delete chat", id="delete.chat"), 30 | Binding("ctrl+n", "new_chat", "new chat", id="new.chat"), 31 | Binding("ctrl+l", "show_logs", "show logs", id="show.logs"), 32 | Binding("ctrl+q", "quit", "quit", id="quit"), 33 | ] 34 | 35 | def get_system_commands(self, screen: Screen) -> Iterable[SystemCommand]: 36 | yield from super().get_system_commands(screen) 37 | yield SystemCommand("New chat", "Creates a new chat", self.action_new_chat) 38 | yield SystemCommand( 39 | "Edit chat parameters", 40 | "Allows to redefine model parameters and system prompt", 41 | self.action_edit_chat, 42 | ) 43 | yield SystemCommand( 44 | "Rename chat", "Renames the current chat", self.action_rename_chat 45 | ) 46 | yield SystemCommand( 47 | "Clear chat", "Clears the current chat", self.action_clear_chat 48 | ) 49 | yield SystemCommand( 50 | "Delete chat", "Deletes the current chat", self.action_delete_chat 51 | ) 52 | yield SystemCommand( 53 | "Export chat", 54 | "Exports the current chat as Markdown (in the current working directory)", 55 | self.action_export_chat, 56 | ) 57 | yield SystemCommand( 58 | "Regenerate last Ollama message", 59 | "Regenerates the last Ollama message (setting a random seed for the message)", 60 | self.action_regenerate_last_message, 61 | ) 62 | yield SystemCommand( 63 | "Use MCP prompt", 64 | "Create and copy to clipboard an MCP prompt.", 65 | self.action_mcp_prompt, 66 | ) 67 | yield SystemCommand( 68 | "Pull model", 69 | "Pulls (or updates) the model from the Ollama server", 70 | self.action_pull_model, 71 | ) 72 | yield SystemCommand( 73 | "Show logs", "Shows the logs of the app", self.action_show_logs 74 | ) 75 | 76 | async def action_quit(self) -> None: 77 | self.log("Quitting...") 78 | await teardown_mcp_servers() 79 | return self.exit() 80 | 81 | async def action_cycle_chat(self, change: int) -> None: 82 | tabs = self.query_one(TabbedContent) 83 | store = await Store.get_store() 84 | saved_chats = await store.get_chats() 85 | if tabs.active_pane is None: 86 | return 87 | active_id = int(str(tabs.active_pane.id).split("-")[1]) 88 | for chat_model in saved_chats: 89 | if chat_model.id == active_id: 90 | next_index = (saved_chats.index(chat_model) + change) % len(saved_chats) 91 | next_id = saved_chats[next_index].id 92 | if next_id is not None: # Ensure we have a valid ID 93 | tabs.active = f"chat-{next_id}" 94 | break 95 | 96 | @work 97 | async def action_new_chat(self) -> None: 98 | store = await Store.get_store() 99 | model_info: str | None = await self.push_screen_wait(ChatEdit()) 100 | if not model_info: 101 | return 102 | 103 | chat_model = ChatModel.model_validate_json(model_info) 104 | tabs = self.query_one(TabbedContent) 105 | tab_count = tabs.tab_count 106 | 107 | name = f"chat #{tab_count + 1} - {chat_model.model}" 108 | chat_model.name = name 109 | 110 | id = await store.save_chat(chat_model) 111 | chat_model.id = id 112 | 113 | pane = TabPane(name, id=f"chat-{id}") 114 | pane.compose_add_child( 115 | ChatContainer( 116 | chat_model=chat_model, 117 | messages=[], 118 | ) 119 | ) 120 | await tabs.add_pane(pane) 121 | tabs.active = f"chat-{id}" 122 | 123 | async def action_edit_chat(self) -> None: 124 | tabs = self.query_one(TabbedContent) 125 | if tabs.active_pane is None: 126 | return 127 | chat = tabs.active_pane.query_one(ChatContainer) 128 | chat.action_edit_chat() 129 | 130 | async def action_rename_chat(self) -> None: 131 | tabs = self.query_one(TabbedContent) 132 | if tabs.active_pane is None: 133 | return 134 | chat = tabs.active_pane.query_one(ChatContainer) 135 | chat.action_rename_chat() 136 | 137 | async def action_clear_chat(self) -> None: 138 | tabs = self.query_one(TabbedContent) 139 | if tabs.active_pane is None: 140 | return 141 | chat = tabs.active_pane.query_one(ChatContainer) 142 | await chat.action_clear_chat() 143 | 144 | async def action_delete_chat(self) -> None: 145 | tabs = self.query_one(TabbedContent) 146 | if tabs.active_pane is None: 147 | return 148 | chat = tabs.active_pane.query_one(ChatContainer) 149 | store = await Store.get_store() 150 | 151 | if chat.chat_model.id is not None: 152 | await store.delete_chat(chat.chat_model.id) 153 | await tabs.remove_pane(tabs.active) 154 | self.notify(f"Deleted {chat.chat_model.name}", severity="information") 155 | 156 | async def action_export_chat(self) -> None: 157 | tabs = self.query_one(TabbedContent) 158 | if tabs.active_pane is None: 159 | return 160 | chat = tabs.active_pane.query_one(ChatContainer) 161 | 162 | if chat.chat_model.id is not None: 163 | screen = ChatExport( 164 | chat_id=chat.chat_model.id, 165 | file_name=f"{slugify(chat.chat_model.name)}.md", 166 | ) 167 | self.push_screen(screen) 168 | 169 | async def action_regenerate_last_message(self) -> None: 170 | tabs = self.query_one(TabbedContent) 171 | if tabs.active_pane is None: 172 | return 173 | chat = tabs.active_pane.query_one(ChatContainer) 174 | await chat.action_regenerate_llm_message() 175 | 176 | async def action_mcp_prompt(self) -> None: 177 | tabs = self.query_one(TabbedContent) 178 | if tabs.active_pane is None: 179 | return 180 | chat = tabs.active_pane.query_one(ChatContainer) 181 | chat.action_mcp_prompt() 182 | 183 | async def action_pull_model(self) -> None: 184 | tabs = self.query_one(TabbedContent) 185 | if tabs.active_pane is None: 186 | screen = PullModel("") 187 | else: 188 | chat = tabs.active_pane.query_one(ChatContainer) 189 | screen = PullModel(chat.ollama.model) 190 | self.push_screen(screen) 191 | 192 | async def action_show_logs(self) -> None: 193 | from oterm.app.log_viewer import LogViewer 194 | 195 | screen = LogViewer() 196 | self.push_screen(screen) 197 | 198 | async def load_mcp(self): 199 | from oterm.tools import available_tool_defs 200 | from oterm.tools.mcp.prompts import available_prompt_defs 201 | 202 | external_tool_defs: list[ExternalToolDefinition] = appConfig.get("tools", []) # type: ignore 203 | external_tools = list(load_external_tools(external_tool_defs)) 204 | available_tool_defs["external"] = external_tools 205 | mcp_tool_calls, mcp_prompt_calls = await setup_mcp_servers() 206 | available_tool_defs.update(mcp_tool_calls) 207 | available_prompt_defs.update(mcp_prompt_calls) 208 | 209 | @work(exclusive=True) 210 | async def perform_checks(self) -> None: 211 | await check_ollama() 212 | up_to_date, _, latest = await is_up_to_date() 213 | if not up_to_date: 214 | self.notify( 215 | f"[b]oterm[/b] version [i]{latest}[/i] is available, please update.", 216 | severity="warning", 217 | ) 218 | 219 | async def on_mount(self) -> None: 220 | store = await Store.get_store() 221 | theme = appConfig.get("theme") 222 | if theme: 223 | if theme == "dark": 224 | self.theme = "textual-dark" 225 | elif theme == "light": 226 | self.theme = "textual-light" 227 | else: 228 | self.theme = theme 229 | self.dark = appConfig.get("theme") == "dark" 230 | self.watch(self.app, "theme", self.on_theme_change, init=False) 231 | 232 | saved_chats = await store.get_chats() 233 | # Apply any remap of key bindings. 234 | keymap = appConfig.get("keymap") 235 | if keymap: 236 | self.set_keymap(keymap) 237 | 238 | await self.load_mcp() 239 | 240 | async def on_splash_done(message) -> None: 241 | if not saved_chats: 242 | # Pyright suggests awaiting here which has bitten me twice 243 | # so I'm ignoring it 244 | self.action_new_chat() # type: ignore 245 | else: 246 | tabs = self.query_one(TabbedContent) 247 | for chat_model in saved_chats: 248 | # Only process chats with a valid ID 249 | if chat_model.id is not None: 250 | messages = await store.get_messages(chat_model.id) 251 | container = ChatContainer( 252 | chat_model=chat_model, 253 | messages=messages, 254 | ) 255 | pane = TabPane( 256 | chat_model.name, container, id=f"chat-{chat_model.id}" 257 | ) 258 | tabs.add_pane(pane) 259 | self.perform_checks() 260 | 261 | if appConfig.get("splash-screen"): 262 | self.push_screen(splash, callback=on_splash_done) 263 | else: 264 | await on_splash_done("") 265 | 266 | def on_theme_change(self, old_value: str, new_value: str) -> None: 267 | if appConfig.get("theme") != new_value: 268 | appConfig.set("theme", new_value) 269 | 270 | @work 271 | @on(TabbedContent.TabActivated) 272 | async def on_tab_activated(self, event: TabbedContent.TabActivated) -> None: 273 | container = event.pane.query_one(ChatContainer) 274 | await container.load_messages() 275 | 276 | def compose(self) -> ComposeResult: 277 | yield Header() 278 | yield TabbedContent(id="tabs") 279 | yield Footer() 280 | 281 | 282 | app = OTerm() 283 | -------------------------------------------------------------------------------- /src/oterm/app/oterm.tcss: -------------------------------------------------------------------------------- 1 | ChatContainer { 2 | height: auto; 3 | } 4 | 5 | #messageContainer { 6 | overflow-y: auto; 7 | padding-bottom: 1; 8 | padding-right: 2; 9 | height: 100%; 10 | } 11 | 12 | ChatItem { 13 | height: auto; 14 | } 15 | 16 | ChatItem .chatItem { 17 | height: auto; 18 | } 19 | 20 | ChatItem .user { 21 | align: right top; 22 | } 23 | 24 | ChatItem .assistant { 25 | align: left top; 26 | } 27 | 28 | ChatItem .text{ 29 | margin-bottom: 1; 30 | max-width: 90%; 31 | border: round $secondary-lighten-2; 32 | } 33 | 34 | ChatItem .user .text { 35 | background: $panel-lighten-1; 36 | align: right top; 37 | 38 | text-align: right; 39 | } 40 | 41 | ChatItem .assistant .text { 42 | background: $panel; 43 | } 44 | 45 | #prompt { 46 | background: $panel; 47 | dock: bottom; 48 | padding: 1; 49 | } 50 | 51 | #prompt.singleline { 52 | height: 5; 53 | } 54 | 55 | #prompt.multiline #promptInput { 56 | display: none; 57 | } 58 | 59 | 60 | #prompt.singleline #promptArea { 61 | display: none; 62 | } 63 | 64 | #prompt.multiline { 65 | height: 10; 66 | } 67 | 68 | #prompt #promptInput #promptArea { 69 | dock: left; 70 | } 71 | 72 | #prompt #button-container { 73 | padding-left: 1; 74 | width: 24; 75 | dock: right; 76 | } 77 | 78 | #prompt #button-container #toggle-multiline { 79 | margin-left:1; 80 | } 81 | 82 | #info { 83 | color: $primary-lighten-1; 84 | text-align: right; 85 | padding-right: 5; 86 | text-style: italic; 87 | } 88 | 89 | LoadingIndicator { 90 | height: 3; 91 | } 92 | 93 | .sceen-container { 94 | width: auto; 95 | height: auto; 96 | background: $panel; 97 | border: $panel-lighten-2; 98 | margin-top: 2; 99 | margin-left: 2; 100 | padding: 1; 101 | } 102 | 103 | .full-height { 104 | margin: 1; 105 | padding: 1; 106 | } 107 | 108 | .short { 109 | height: 10; 110 | } 111 | 112 | .title{ 113 | color: $primary; 114 | margin-right: 1; 115 | margin-bottom: 1; 116 | } 117 | 118 | .subtitle { 119 | color: $secondary 120 | } 121 | 122 | .screen-container .button-container { 123 | height: 3; 124 | } 125 | 126 | .screen-container .button-container Button { 127 | margin-right: 1; 128 | } 129 | 130 | #edit-chat-container { 131 | margin: 1; 132 | padding: 1; 133 | } 134 | 135 | #edit-chat-container .system { 136 | margin: 1; 137 | } 138 | 139 | #model-select { 140 | margin-top: 1; 141 | max-height: 50%; 142 | } 143 | 144 | #model-info { 145 | height: 1; 146 | } 147 | 148 | #model-info .name, .tag, .size { 149 | margin-right: 1; 150 | } 151 | 152 | #tool-selector-container { 153 | height: auto; 154 | max-height: 50%; 155 | padding-bottom: 2; 156 | padding-right:2; 157 | } 158 | 159 | #tool-selector { 160 | margin-bottom: 2; 161 | width: 100%; 162 | } 163 | 164 | #tool-selector .tool-group { 165 | height: auto; 166 | width: 100%; 167 | padding-top: 1; 168 | padding-bottom: 1; 169 | background: $surface; 170 | } 171 | 172 | #tool-selector .tool-group-select-all{ 173 | width: 50%; 174 | } 175 | 176 | #tool-selector .tools { 177 | height: auto; 178 | width: 50%; 179 | 180 | } 181 | 182 | #edit-chat-container .button-container { 183 | margin: 1; 184 | } 185 | 186 | #edit-chat-container .json-format { 187 | background: $panel; 188 | color: $secondary; 189 | } 190 | 191 | Label.keep-alive-label { 192 | margin-top: 1; 193 | } 194 | 195 | #prompt-history { 196 | height: 90%; 197 | } 198 | 199 | #chat-name-input { 200 | margin: 2; 201 | } 202 | 203 | #image-select-container .title { 204 | width: 100%; 205 | padding-top: 1; 206 | text-align: center; 207 | } 208 | 209 | #image-directory-tree { 210 | width: 30%; 211 | } 212 | 213 | #image-preview{ 214 | width: 70%; 215 | } 216 | 217 | #image-preview #image { 218 | width: auto; 219 | height: auto; 220 | } 221 | 222 | #image-select-container Input { 223 | margin: 1; 224 | padding: 0; 225 | } 226 | 227 | #pull-model-container Horizontal { 228 | margin-top: 1; 229 | height: 3; 230 | } 231 | 232 | #pull-model-container Input { 233 | width: 60; 234 | } 235 | 236 | #prompt-form-container { 237 | height: 100%; 238 | } 239 | 240 | #prompt-form-container .title, .subtitle { 241 | margin-top: 1; 242 | margin-bottom: 1; 243 | } 244 | 245 | #prompt-result { 246 | height: auto; 247 | padding: 1; 248 | margin-top: 1; 249 | margin-bottom: 1; 250 | } 251 | 252 | TabbedContent { 253 | padding-top: 2; 254 | layer: below; 255 | } 256 | 257 | MarkdownFence { 258 | max-height: 50; 259 | } 260 | 261 | Button.icon { 262 | min-width: 5; 263 | } 264 | 265 | Input.keep-alive { 266 | width: 10; 267 | } 268 | 269 | #app-root { 270 | height: 50vh; 271 | max-height: 50vh; 272 | } 273 | -------------------------------------------------------------------------------- /src/oterm/app/prompt_history.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual.app import ComposeResult 3 | from textual.containers import Container 4 | from textual.screen import ModalScreen 5 | from textual.widgets import Label, OptionList 6 | 7 | 8 | class PromptHistory(ModalScreen[str]): 9 | history: list[str] = [] 10 | BINDINGS = [ 11 | ("escape", "cancel", "Cancel"), 12 | ] 13 | 14 | def __init__(self, history=[]) -> None: 15 | self.history = history 16 | super().__init__() 17 | 18 | def action_cancel(self) -> None: 19 | self.dismiss() 20 | 21 | def on_mount(self) -> None: 22 | option_list = self.query_one("#prompt-history", OptionList) 23 | option_list.clear_options() 24 | for prompt in self.history: 25 | option_list.add_option(option=Text(prompt)) 26 | 27 | def on_option_list_option_selected(self, option: OptionList.OptionSelected) -> None: 28 | self.dismiss(str(option.option.prompt)) 29 | 30 | def compose(self) -> ComposeResult: 31 | with Container(classes="screen-container full-height"): 32 | yield Label("Prompt history", classes="title") 33 | yield OptionList(id="prompt-history") 34 | -------------------------------------------------------------------------------- /src/oterm/app/pull_model.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from ollama import ResponseError 4 | from textual import on, work 5 | from textual.app import ComposeResult 6 | from textual.containers import Container, Horizontal 7 | from textual.screen import ModalScreen 8 | from textual.widgets import Button, Input, Label, TextArea 9 | 10 | from oterm.ollamaclient import OllamaLLM 11 | 12 | 13 | class PullModel(ModalScreen[str]): 14 | model: str = "" 15 | BINDINGS = [ 16 | ("escape", "cancel", "Cancel"), 17 | ] 18 | 19 | def __init__(self, model: str) -> None: 20 | self.model = model 21 | super().__init__() 22 | 23 | def action_cancel(self) -> None: 24 | self.dismiss() 25 | 26 | @work 27 | async def pull_model(self) -> None: 28 | log = self.query_one(".log", TextArea) 29 | stream = OllamaLLM.pull(self.model) 30 | try: 31 | for response in stream: 32 | log.text += response.model_dump_json() + "\n" 33 | await asyncio.sleep(0.1) 34 | await asyncio.sleep(1.0) 35 | except ResponseError as e: 36 | log.text += f"Error: {e}\n" 37 | self.app.notify("Model pulled successfully") 38 | 39 | @on(Input.Changed) 40 | async def on_model_change(self, ev: Input.Changed) -> None: 41 | self.model = ev.value 42 | 43 | @on(Button.Pressed) 44 | @on(Input.Submitted) 45 | async def on_pull(self, ev: Button.Pressed) -> None: 46 | self.pull_model() 47 | 48 | def compose(self) -> ComposeResult: 49 | with Container( 50 | id="pull-model-container", classes="screen-container full-height" 51 | ): 52 | yield Label("Pull model", classes="title") 53 | with Horizontal(): 54 | yield Input(self.model) 55 | yield Button("Pull", variant="primary") 56 | yield TextArea(classes="parameters log", read_only=True) 57 | -------------------------------------------------------------------------------- /src/oterm/app/splash.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Any 3 | 4 | from textualeffects.effects import EffectType 5 | from textualeffects.widgets import SplashScreen 6 | 7 | logo = """ 8 | @@@@@@. :@@@@@@ 9 | @@@@@@@@@ @@@@@@@@@ 10 | @@@@= @@@@@ @@@@@ =@@@@ 11 | %@@@% @@@@. .@@@@ %@@@% 12 | @@@@ .@@@@ @@@@. @@@@ 13 | @@@@ @@@@ @@@@@@@@@@@@ @@@@ @@@@ 14 | @@@@ @@@@@@@@@@@@@@@@@@@@@@@@ @@@@ 15 | @@@@ @@@@@@@@ @@@@@@@@ @@@@ 16 | @@@@ .@@@@@ @@@@@. @@@@ 17 | @@@@@@@@@@@@. .@@@@@@@@@@@@ 18 | @@@@@@@@@@@@ @@@@@@@@@@@@ 19 | #@@@@@* *@@@@@# 20 | @@@@@ @@@@@ 21 | @@@@@ @@@@@ 22 | =@@@@ @@@@= 23 | @@@@ @@@@ 24 | @@@@ - +@@@@@@+ - @@@@ 25 | @@@@ .@@@@@ :@@@@@@@@@@@@@@: @@@@@. @@@@ 26 | @@@@ %@@@@@ @@@@ @@@@ @@@@@% @@@@ 27 | =@@@@ @@ .@@@ @@@. @@ @@@@= 28 | @@@@@ @@@ *@@@@: @@@ @@@@@ 29 | %@@@@ @@@ @@ @@@ @@@@% 30 | @@@@: @@@ @@ @@@ :@@@@ 31 | @@@@. @@@@ @@@@ .@@@@ 32 | :@@@@ @@@@@@@@@@@@@@@@ @@@@. 33 | @@@@- =@@@@@@@@= -@@@@ 34 | @@@@ @@@@ 35 | @@@@: :@@@@ 36 | *@@@ @@@* 37 | @@@@ @@@@ 38 | #@@@@ @@@@# 39 | %@@@@ @@@@@ 40 | #@@@@ @@@@# 41 | @@@@ @@@@ 42 | @@@@= =@@@@ 43 | @@@@ @@@@ 44 | @@@@ @@@@ 45 | @@@@ @@@@ 46 | @. .@ 47 | """ 48 | 49 | effects: list[tuple[EffectType, dict[str, Any]]] = [ 50 | ( 51 | "Beams", 52 | { 53 | "beam_delay": 3, 54 | "beam_gradient_steps": 2, 55 | "beam_gradient_frames": 2, 56 | "final_gradient_steps": 2, 57 | "final_gradient_frames": 2, 58 | "final_wipe_speed": 5, 59 | }, 60 | ), 61 | ( 62 | "BouncyBalls", 63 | { 64 | "ball_delay": 1, 65 | }, 66 | ), 67 | ( 68 | "Expand", 69 | { 70 | "movement_speed": 0.1, 71 | }, 72 | ), 73 | ( 74 | "Pour", 75 | { 76 | "pour_speed": 3, 77 | }, 78 | ), 79 | ( 80 | "Rain", 81 | {}, 82 | ), 83 | ( 84 | "RandomSequence", 85 | {}, 86 | ), 87 | ( 88 | "Scattered", 89 | {}, 90 | ), 91 | ( 92 | "Slide", 93 | {}, 94 | ), 95 | ] 96 | 97 | effect, config = random.choice(effects) 98 | splash = SplashScreen(text=logo, effect=effect, config=config) 99 | -------------------------------------------------------------------------------- /src/oterm/app/widgets/__init__.py: -------------------------------------------------------------------------------- 1 | import oterm.app.widgets.monkey # noqa: F401 2 | -------------------------------------------------------------------------------- /src/oterm/app/widgets/chat.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import random 4 | from pathlib import Path 5 | 6 | from ollama import Message, ResponseError 7 | from textual import on, work 8 | from textual.app import ComposeResult 9 | from textual.binding import Binding 10 | from textual.containers import Horizontal, Vertical, VerticalScroll 11 | from textual.events import Click 12 | from textual.reactive import reactive 13 | from textual.widget import Widget 14 | from textual.widgets import ( 15 | LoadingIndicator, 16 | Markdown, 17 | Static, 18 | TabbedContent, 19 | ) 20 | 21 | from oterm.app.chat_edit import ChatEdit 22 | from oterm.app.chat_rename import ChatRename 23 | from oterm.app.mcp_prompt import MCPPrompt 24 | from oterm.app.prompt_history import PromptHistory 25 | from oterm.app.widgets.image import ImageAdded 26 | from oterm.app.widgets.prompt import FlexibleInput 27 | from oterm.ollamaclient import OllamaLLM, Options 28 | from oterm.store.store import Store 29 | from oterm.tools import available_tool_calls 30 | from oterm.types import ChatModel, MessageModel 31 | from oterm.utils import parse_response 32 | 33 | 34 | class ChatContainer(Widget): 35 | ollama = OllamaLLM() 36 | messages: reactive[list[MessageModel]] = reactive([]) 37 | images: list[tuple[Path, str]] = [] 38 | BINDINGS = [ 39 | Binding("up", "history", "history"), 40 | Binding( 41 | "escape", "cancel_inference", "cancel inference", show=False, priority=True 42 | ), 43 | ] 44 | 45 | def __init__( 46 | self, 47 | *children: Widget, 48 | messages: list[MessageModel] = [], 49 | chat_model: ChatModel, 50 | **kwargs, 51 | ) -> None: 52 | super().__init__(*children, **kwargs) 53 | 54 | self.messages = messages 55 | self.chat_model = chat_model 56 | history = [] 57 | # This is wrong, the images should be a list of Image objects 58 | # See https://github.com/ollama/ollama-python/issues/375 59 | # Temp fix is to do msg.images = images # type: ignore 60 | for msg_model in messages: 61 | message_text = msg_model.text 62 | msg = Message( 63 | role=msg_model.role, 64 | content=( 65 | message_text 66 | if msg_model.role == "user" 67 | else parse_response(message_text).response 68 | ), 69 | ) 70 | msg.images = msg_model.images # type: ignore 71 | history.append(msg) 72 | 73 | used_tool_defs = [ 74 | tool_def 75 | for tool_def in available_tool_calls() 76 | if tool_def["tool"] in chat_model.tools 77 | ] 78 | 79 | self.ollama = OllamaLLM( 80 | model=chat_model.model, 81 | system=chat_model.system, 82 | format=chat_model.format, 83 | options=chat_model.parameters, 84 | keep_alive=chat_model.keep_alive, 85 | history=history, 86 | tool_defs=used_tool_defs, 87 | ) 88 | self.loaded = False 89 | self.loading = False 90 | self.images = [] 91 | 92 | def on_mount(self) -> None: 93 | self.query_one("#prompt").focus() 94 | 95 | async def load_messages(self) -> None: 96 | if self.loaded or self.loading: 97 | return 98 | self.loading = True 99 | message_container = self.query_one("#messageContainer") 100 | for message in self.messages: 101 | chat_item = ChatItem() 102 | chat_item.text = ( 103 | message.text 104 | if message.role == "user" 105 | else parse_response(message.text).formatted_output 106 | ) 107 | chat_item.author = message.role 108 | await message_container.mount(chat_item) 109 | message_container.scroll_end() 110 | self.loading = False 111 | self.loaded = True 112 | 113 | async def response_task(self, message: str) -> None: 114 | message_container = self.query_one("#messageContainer") 115 | 116 | user_chat_item = ChatItem() 117 | user_chat_item.text = message 118 | user_chat_item.author = "user" 119 | message_container.mount(user_chat_item) 120 | 121 | response_chat_item = ChatItem() 122 | response_chat_item.author = "assistant" 123 | message_container.mount(response_chat_item) 124 | loading = LoadingIndicator() 125 | await message_container.mount(loading) 126 | message_container.scroll_end() 127 | 128 | try: 129 | response = "" 130 | 131 | async for text in self.ollama.stream( 132 | message, [img for _, img in self.images] 133 | ): 134 | response = text 135 | response_chat_item.text = text 136 | 137 | parsed = parse_response(response) 138 | 139 | # To not exhaust the tokens, remove the thought process from the history (it seems to be the common practice) 140 | self.ollama.history[-1].content = parsed.response # type: ignore 141 | response_chat_item.text = parsed.formatted_output 142 | 143 | if message_container.can_view_partial(response_chat_item): 144 | message_container.scroll_end() 145 | 146 | store = await Store.get_store() 147 | 148 | # Create and save user message model 149 | user_message = MessageModel( 150 | id=None, 151 | chat_id=self.chat_model.id, # type: ignore 152 | role="user", 153 | text=message, 154 | images=[img for _, img in self.images], 155 | ) 156 | id = await store.save_message(user_message) 157 | user_message.id = id 158 | self.messages.append(user_message) 159 | 160 | # Create and save assistant message model 161 | assistant_message = MessageModel( 162 | id=None, 163 | chat_id=self.chat_model.id, # type: ignore 164 | role="assistant", 165 | text=response, 166 | images=[], 167 | ) 168 | id = await store.save_message(assistant_message) 169 | assistant_message.id = id 170 | self.messages.append(assistant_message) 171 | self.images = [] 172 | 173 | except asyncio.CancelledError: 174 | user_chat_item.remove() 175 | response_chat_item.remove() 176 | input = self.query_one("#prompt", FlexibleInput) 177 | input.text = message 178 | except ResponseError as e: 179 | user_chat_item.remove() 180 | response_chat_item.remove() 181 | self.app.notify( 182 | f"There was an error running your request: {e}", severity="error" 183 | ) 184 | message_container.scroll_end() 185 | 186 | finally: 187 | loading.remove() 188 | 189 | @on(FlexibleInput.Submitted) 190 | async def on_submit(self, event: FlexibleInput.Submitted) -> None: 191 | message = event.value 192 | input = event.input 193 | 194 | input.clear() 195 | if not message.strip(): 196 | input.focus() 197 | return 198 | 199 | self.inference_task = asyncio.create_task(self.response_task(message)) 200 | 201 | def key_escape(self) -> None: 202 | if hasattr(self, "inference_task"): 203 | self.inference_task.cancel() 204 | 205 | @work 206 | async def action_edit_chat(self) -> None: 207 | screen = ChatEdit(chat_model=self.chat_model, edit_mode=True) 208 | 209 | model_info = await self.app.push_screen_wait(screen) 210 | if model_info is None: 211 | return 212 | 213 | self.chat_model = ChatModel.model_validate_json(model_info) 214 | 215 | # Save to database 216 | store = await Store.get_store() 217 | await store.edit_chat(self.chat_model) 218 | 219 | # load the history from messages 220 | history: list[Message] = [] 221 | # This is wrong, the images should be a list of Image objects 222 | # See https://github.com/ollama/ollama-python/issues/375 223 | # Temp fix is to do msg.images = images # type: ignore 224 | for message in self.messages: 225 | msg = Message( 226 | role=message.role, 227 | content=message.text, 228 | ) 229 | msg.images = message.images # type: ignore 230 | history.append(msg) 231 | 232 | # Get tool definitions based on the updated tools list 233 | used_tool_defs = [ 234 | tool_def 235 | for tool_def in available_tool_calls() 236 | if tool_def["tool"] in self.chat_model.tools 237 | ] 238 | 239 | # Recreate the Ollama client with updated parameters 240 | self.ollama = OllamaLLM( 241 | model=self.chat_model.model, 242 | system=self.chat_model.system, 243 | format=self.chat_model.format, 244 | options=self.chat_model.parameters, 245 | keep_alive=self.chat_model.keep_alive, 246 | history=history, # type: ignore 247 | tool_defs=used_tool_defs, 248 | ) 249 | 250 | @work 251 | async def action_rename_chat(self) -> None: 252 | store = await Store.get_store() 253 | screen = ChatRename(self.chat_model.name) 254 | new_name = await self.app.push_screen_wait(screen) 255 | if new_name is None: 256 | return 257 | tabs = self.app.query_one(TabbedContent) 258 | await store.rename_chat(self.chat_model.id, new_name) # type: ignore 259 | tabs.get_tab(f"chat-{self.chat_model.id}").update(new_name) 260 | self.app.notify("Chat renamed") 261 | 262 | async def action_clear_chat(self) -> None: 263 | self.messages = [] 264 | self.images = [] 265 | self.ollama = OllamaLLM( 266 | model=self.ollama.model, 267 | system=self.ollama.system, 268 | format=self.ollama.format, # type: ignore 269 | options=self.chat_model.parameters, 270 | keep_alive=self.ollama.keep_alive, 271 | history=[], # type: ignore 272 | tool_defs=self.ollama.tool_defs, 273 | ) 274 | msg_container = self.query_one("#messageContainer") 275 | for child in msg_container.children: 276 | child.remove() 277 | store = await Store.get_store() 278 | await store.clear_chat(self.chat_model.id) # type: ignore 279 | 280 | async def action_regenerate_llm_message(self) -> None: 281 | if not self.messages[-1:]: 282 | return 283 | # Remove last Ollama response from UI and regenerate it 284 | response_message_id = self.messages[-1].id 285 | self.messages.pop() 286 | message_container = self.query_one("#messageContainer") 287 | message_container.children[-1].remove() 288 | response_chat_item = ChatItem() 289 | response_chat_item.author = "assistant" 290 | message_container.mount(response_chat_item) 291 | loading = LoadingIndicator() 292 | await message_container.mount(loading) 293 | message_container.scroll_end() 294 | 295 | # Remove the last two messages from chat history, we will regenerate them 296 | self.ollama.history = self.ollama.history[:-2] 297 | message = self.messages[-1] 298 | 299 | async def response_task() -> None: 300 | response = await self.ollama.completion( 301 | message.text, 302 | images=message.images, # type: ignore 303 | additional_options=Options(seed=random.randint(0, 32768)), 304 | ) 305 | response_chat_item.text = response 306 | if message_container.can_view_partial(response_chat_item): 307 | message_container.scroll_end() 308 | 309 | # Save to db 310 | store = await Store.get_store() 311 | 312 | # Create a message model for regenerated response 313 | regenerated_message = MessageModel( 314 | id=response_message_id, 315 | chat_id=self.chat_model.id, # type: ignore 316 | role="assistant", 317 | text=response, 318 | images=[], 319 | ) 320 | await store.save_message(regenerated_message) 321 | regenerated_message.id = response_message_id 322 | self.messages.append(regenerated_message) 323 | self.images = [] 324 | loading.remove() 325 | 326 | asyncio.create_task(response_task()) 327 | 328 | async def action_history(self) -> None: 329 | def on_history_selected(text: str | None) -> None: 330 | if text is None: 331 | return 332 | prompt = self.query_one("#prompt", FlexibleInput) 333 | if "\n" in text and not prompt.is_multiline: 334 | prompt.toggle_multiline() 335 | prompt.text = text 336 | prompt.focus() 337 | 338 | prompts = [message.text for message in self.messages if message.role == "user"] 339 | prompts.reverse() 340 | screen = PromptHistory(prompts) 341 | self.app.push_screen(screen, on_history_selected) 342 | 343 | @work 344 | async def action_mcp_prompt(self) -> None: 345 | screen = MCPPrompt() 346 | messages = await self.app.push_screen_wait(screen) 347 | if messages is None: 348 | return 349 | messages = [Message(**msg) for msg in json.loads(messages)] 350 | message_container = self.query_one("#messageContainer") 351 | store = await Store.get_store() 352 | 353 | last_user_message = None 354 | if messages[-1].role == "user": 355 | last_user_message = messages.pop() 356 | 357 | for message in messages: 358 | text = message.content or "" 359 | # Create a message model for the MCP conversation 360 | message_model = MessageModel( 361 | id=None, 362 | chat_id=self.chat_model.id, # type: ignore 363 | role=message.role, # type: ignore 364 | text=text, 365 | images=[], 366 | ) 367 | id = await store.save_message(message_model) 368 | message_model.id = id 369 | self.messages.append(message_model) 370 | chat_item = ChatItem() 371 | chat_item.text = text 372 | chat_item.author = message.role 373 | await message_container.mount(chat_item) 374 | message_container.scroll_end() 375 | if last_user_message is not None and last_user_message.content: 376 | await self.response_task(last_user_message.content) 377 | 378 | @on(ImageAdded) 379 | def on_image_added(self, ev: ImageAdded) -> None: 380 | self.images.append((ev.path, ev.image)) 381 | self.app.notify(f"Image {ev.path} added.") 382 | 383 | def compose(self) -> ComposeResult: 384 | with Vertical(): 385 | yield Static(f"model: {self.ollama.model}", id="info") 386 | yield VerticalScroll( 387 | id="messageContainer", 388 | ) 389 | yield FlexibleInput("", id="prompt", classes="singleline") 390 | 391 | 392 | class ChatItem(Widget): 393 | text: reactive[str] = reactive("") 394 | author: reactive[str] = reactive("") 395 | 396 | @on(Click) 397 | async def on_click(self, event: Click) -> None: 398 | self.app.copy_to_clipboard(self.text) 399 | widgets = self.query(".text") 400 | for widget in widgets: 401 | widget.styles.animate("opacity", 0.5, duration=0.1) 402 | widget.styles.animate("opacity", 1.0, duration=0.1, delay=0.1) 403 | self.app.notify("Message copied to clipboard.") 404 | 405 | async def watch_text(self, text: str) -> None: 406 | if self.author == "user": 407 | return 408 | try: 409 | jsn = json.loads(text) 410 | if isinstance(jsn, dict): 411 | text = f"```json\n{self.text}\n```" 412 | except json.JSONDecodeError: 413 | pass 414 | 415 | txt_widget = self.query_one(".text", Markdown) 416 | await txt_widget.update(text) 417 | 418 | def compose(self) -> ComposeResult: 419 | """A chat item.""" 420 | mrk_down = Markdown(self.text, classes="text") 421 | mrk_down.code_dark_theme = "solarized-dark" 422 | mrk_down.code_light_theme = "solarized-light" 423 | with Horizontal(classes=f"{self.author} chatItem"): 424 | if self.author == "user": 425 | yield Static(self.text, classes="text") 426 | else: 427 | yield mrk_down 428 | -------------------------------------------------------------------------------- /src/oterm/app/widgets/image.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable 2 | from pathlib import Path 3 | 4 | from PIL import Image as PILImage 5 | from textual.message import Message 6 | from textual.widgets import DirectoryTree 7 | 8 | IMG_MAX_SIZE = 80 9 | IMAGE_EXTENSIONS = PILImage.registered_extensions() 10 | 11 | 12 | class ImageAdded(Message): 13 | def __init__(self, path: Path, image: str) -> None: 14 | self.path = path 15 | self.image = image 16 | super().__init__() 17 | 18 | 19 | class ImageDirectoryTree(DirectoryTree): 20 | def filter_paths(self, paths: Iterable[Path]) -> Iterable[Path]: 21 | return [ 22 | path for path in paths if path.suffix in IMAGE_EXTENSIONS or path.is_dir() 23 | ] 24 | -------------------------------------------------------------------------------- /src/oterm/app/widgets/monkey.py: -------------------------------------------------------------------------------- 1 | import textual.widgets._markdown as markdown 2 | from textual import on 3 | from textual.events import Click 4 | 5 | 6 | class MarkdownFence(markdown.MarkdownFence): 7 | @on(Click) 8 | async def on_click(self, event: Click) -> None: 9 | event.stop() 10 | self.app.copy_to_clipboard(self.code) 11 | self.styles.animate("opacity", 0.5, duration=0.1) 12 | self.styles.animate("opacity", 1.0, duration=0.1, delay=0.1) 13 | 14 | 15 | markdown.MarkdownFence = MarkdownFence 16 | -------------------------------------------------------------------------------- /src/oterm/app/widgets/prompt.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import cast 3 | 4 | from textual import events, on 5 | from textual.app import ComposeResult 6 | from textual.binding import Binding 7 | from textual.containers import Horizontal 8 | from textual.css.query import NoMatches 9 | from textual.message import Message 10 | from textual.reactive import reactive 11 | from textual.widget import Widget 12 | from textual.widgets import Button, Input, TextArea 13 | 14 | from oterm.app.image_browser import ImageSelect 15 | from oterm.app.widgets.image import ImageAdded 16 | 17 | 18 | class PostableTextArea(TextArea): 19 | """ 20 | A text area that submits on Enter. 21 | """ 22 | 23 | BINDINGS = TextArea.BINDINGS + [ 24 | Binding( 25 | key="enter", 26 | action="submit", 27 | description="submit", 28 | show=True, 29 | key_display=None, 30 | priority=True, 31 | ), 32 | Binding( 33 | key="shift+enter", 34 | action="newline", 35 | description="newline", 36 | show=True, 37 | key_display=None, 38 | priority=True, 39 | id="newline", 40 | ), 41 | ] 42 | 43 | @dataclass 44 | class Submitted(Message): 45 | input: "PostableTextArea" 46 | value: str 47 | 48 | @property 49 | def control(self) -> "PostableTextArea": 50 | return self.input 51 | 52 | def action_submit(self) -> None: 53 | self.post_message(PostableTextArea.Submitted(self, self.text)) 54 | 55 | def action_newline(self) -> None: 56 | cur = self.cursor_location 57 | lines = self.text.split("\n") 58 | lines[cur[0]] = lines[cur[0]][: cur[1]] + "\n" + lines[cur[0]][cur[1] :] 59 | self.text = "\n".join(lines) 60 | self.cursor_location = (cur[0] + 1, 0) 61 | 62 | 63 | class PastableInput(Input): 64 | BINDINGS = Input.BINDINGS + [ 65 | Binding( 66 | key="ctrl+m", 67 | action="toggle_multiline", 68 | description="multiline", 69 | show=True, 70 | key_display=None, 71 | priority=True, 72 | id="toggle.multiline", 73 | ), 74 | ] 75 | 76 | def action_toggle_multiline(self) -> None: 77 | input = cast(FlexibleInput, self.parent.parent) # type: ignore 78 | input.text = self.value 79 | input.toggle_multiline() 80 | 81 | def _on_paste(self, event: events.Paste) -> None: 82 | if event.text: 83 | self.insert_text_at_cursor(event.text) 84 | if len(event.text.splitlines()) > 1: 85 | input = cast(FlexibleInput, self.parent.parent) # type: ignore 86 | input.text = self.value 87 | input.toggle_multiline() 88 | event.stop() 89 | event.prevent_default() 90 | 91 | 92 | class FlexibleInput(Widget): 93 | is_multiline = reactive(False) 94 | text = reactive("") 95 | 96 | BINDINGS = [ 97 | Binding("ctrl+i", "add_image", "add image", id="add.image"), 98 | ] 99 | 100 | @dataclass 101 | class Submitted(Message): 102 | input: "FlexibleInput" 103 | value: str 104 | 105 | @property 106 | def control(self) -> "FlexibleInput": 107 | return self.input 108 | 109 | def __init__(self, text, *args, **kwargs) -> None: 110 | super().__init__(*args, **kwargs) 111 | self.text = text 112 | 113 | def on_mount(self) -> None: 114 | input = self.query_one("#promptInput", PastableInput) 115 | textarea = self.query_one("#promptArea", TextArea) 116 | textarea.show_line_numbers = False 117 | input.focus() 118 | 119 | def clear(self) -> None: 120 | self.text = "" 121 | self.query_one("#promptInput", PastableInput).value = "" 122 | self.query_one("#promptArea", TextArea).text = "" 123 | 124 | def focus(self, scroll_visible=True) -> "FlexibleInput": 125 | if self.is_multiline: 126 | self.query_one("#promptArea", TextArea).focus() 127 | else: 128 | self.query_one("#promptInput", PastableInput).focus() 129 | return self 130 | 131 | def toggle_multiline(self): 132 | self.is_multiline = not self.is_multiline 133 | input = self.query_one("#promptInput", PastableInput) 134 | textarea = self.query_one("#promptArea", TextArea) 135 | if self.is_multiline: 136 | textarea.text = self.text 137 | self.add_class("multiline") 138 | self.remove_class("singleline") 139 | line_count = textarea.document.line_count 140 | textarea.cursor_location = ( 141 | line_count - 1, 142 | len(textarea.document.get_line(line_count - 1)), 143 | ) 144 | else: 145 | input.value = self.text 146 | self.add_class("singleline") 147 | self.remove_class("multiline") 148 | self.focus() 149 | 150 | def watch_text(self): 151 | try: 152 | if len(self.text.splitlines()) > 1: 153 | self.query_one("#toggle-multiline", Button).disabled = True 154 | else: 155 | self.query_one("#toggle-multiline", Button).disabled = False 156 | 157 | input = self.query_one("#promptInput", PastableInput) 158 | textarea = self.query_one("#promptArea", TextArea) 159 | if self.is_multiline: 160 | if textarea.text != self.text: 161 | textarea.text = self.text 162 | else: 163 | if input.value != self.text: 164 | input.value = self.text 165 | except NoMatches: 166 | pass 167 | 168 | def action_add_image(self) -> None: 169 | async def on_image_selected(image) -> None: 170 | if image is None: 171 | return 172 | path, b64 = image 173 | self.post_message(ImageAdded(path, b64)) 174 | 175 | screen = ImageSelect() 176 | self.app.push_screen(screen, on_image_selected) 177 | 178 | @on(PastableInput.Submitted, "#promptInput") 179 | def on_input_submitted(self, event: PastableInput.Submitted): 180 | self.post_message(self.Submitted(self, event.input.value)) 181 | event.stop() 182 | event.prevent_default() 183 | 184 | @on(PostableTextArea.Submitted, "#promptArea") 185 | def on_textarea_submitted(self, event: PostableTextArea.Submitted): 186 | self.post_message(self.Submitted(self, event.input.text)) 187 | event.stop() 188 | event.prevent_default() 189 | 190 | @on(Button.Pressed, "#toggle-multiline") 191 | def on_toggle_multiline_pressed(self): 192 | self.toggle_multiline() 193 | 194 | @on(PastableInput.Changed, "#promptInput") 195 | def on_input_changed(self, event: PastableInput.Changed): 196 | self.text = event.input.value 197 | 198 | @on(TextArea.Changed, "#promptArea") 199 | def on_area_changed(self, event: TextArea.Changed): 200 | lines = [ 201 | event.text_area.document.get_line(line) 202 | for line in range(event.text_area.document.line_count) 203 | ] 204 | 205 | self.text = "\n".join(lines) 206 | 207 | @on(Button.Pressed, "#post") 208 | async def on_post(self): 209 | self.post_message(self.Submitted(self, self.text)) 210 | 211 | def compose(self) -> ComposeResult: 212 | with Horizontal(): 213 | yield PastableInput( 214 | id="promptInput", 215 | placeholder="Your message…", 216 | ) 217 | yield PostableTextArea(id="promptArea") 218 | with Horizontal(id="button-container"): 219 | yield Button("post", id="post", variant="primary") 220 | yield Button( 221 | "↕", id="toggle-multiline", classes="icon", variant="success" 222 | ) 223 | -------------------------------------------------------------------------------- /src/oterm/app/widgets/tool_select.py: -------------------------------------------------------------------------------- 1 | from ollama import Tool 2 | from textual import on 3 | from textual.app import ComposeResult 4 | from textual.containers import Horizontal, ScrollableContainer, Vertical 5 | from textual.reactive import reactive 6 | from textual.widget import Widget 7 | from textual.widgets import Checkbox 8 | 9 | from oterm.tools import available_tool_calls, available_tool_defs 10 | 11 | 12 | class ToolSelector(Widget): 13 | selected: reactive[list[Tool]] = reactive([]) 14 | 15 | def __init__( 16 | self, 17 | name: str | None = None, 18 | id: str | None = None, 19 | classes: str | None = None, 20 | disabled: bool = False, 21 | selected: list[Tool] = [], 22 | ) -> None: 23 | super().__init__(name=name, id=id, classes=classes, disabled=disabled) 24 | self.selected = selected if selected is not None else [] 25 | # Check if selected tools are still available from the loaded tools. 26 | available = [tool_def["tool"] for tool_def in available_tool_calls()] 27 | self.selected = [tool for tool in selected if tool in available] 28 | 29 | def on_mount(self) -> None: 30 | pass 31 | 32 | @on(Checkbox.Changed) 33 | def on_checkbox_toggled(self, ev: Checkbox.Changed): 34 | name = ev.control.name 35 | checked = ev.value 36 | 37 | for server in available_tool_defs.keys(): 38 | if server == name: 39 | # We don't need to change selected, 40 | # it will change when the checkboxes are checked/unchecked, 41 | for tool_def in available_tool_defs[server]: 42 | tool_checkbox = self.query_one( 43 | f"#{server}-{tool_def['tool']['function']['name']}", Checkbox 44 | ) 45 | if tool_checkbox.value != checked: 46 | tool_checkbox.value = checked 47 | return 48 | 49 | for tool_def in available_tool_calls(): 50 | if tool_def["tool"].function.name == str(name): # type: ignore 51 | tool = tool_def["tool"] 52 | if checked: 53 | self.selected.append(tool) 54 | else: 55 | try: 56 | self.selected.remove(tool) 57 | except ValueError: 58 | pass 59 | break 60 | 61 | def all_server_tools_selected(self, server: str) -> bool: 62 | """Check if all tools from a server are selected.""" 63 | for tool_def in available_tool_defs[server]: 64 | if tool_def["tool"] not in self.selected: 65 | return False 66 | return True 67 | 68 | def compose(self) -> ComposeResult: 69 | with ScrollableContainer( 70 | id="tool-selector", 71 | ): 72 | for server in available_tool_defs: 73 | with Horizontal(classes="tool-group"): 74 | yield Checkbox( 75 | name=server, 76 | label=server, 77 | tooltip=f"Select all tools from {server}", 78 | classes="tool-group-select-all", 79 | value=self.all_server_tools_selected(server), 80 | ) 81 | with Vertical(classes="tools"): 82 | for tool_def in available_tool_defs[server]: 83 | name = tool_def["tool"]["function"]["name"] 84 | yield Checkbox( 85 | id=f"{server}-{name}", 86 | name=name, 87 | label=name, 88 | tooltip=f"{tool_def['tool']['function']['description']}", 89 | value=tool_def["tool"] in self.selected, 90 | classes="tool", 91 | ) 92 | -------------------------------------------------------------------------------- /src/oterm/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/src/oterm/cli/__init__.py -------------------------------------------------------------------------------- /src/oterm/cli/oterm.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from importlib import metadata 3 | 4 | import typer 5 | from rich.pretty import pprint 6 | 7 | from oterm.app.oterm import app 8 | from oterm.config import envConfig 9 | from oterm.store.store import Store 10 | 11 | cli = typer.Typer(context_settings={"help_option_names": ["-h", "--help"]}) 12 | 13 | 14 | async def upgrade_db(): 15 | await Store.get_store() 16 | 17 | 18 | @cli.command() 19 | def oterm( 20 | version: bool = typer.Option(None, "--version", "-v"), 21 | upgrade: bool = typer.Option(None, "--upgrade"), 22 | config: bool = typer.Option(None, "--config"), 23 | sqlite: bool = typer.Option(None, "--db"), 24 | data_dir: bool = typer.Option(None, "--data-dir"), 25 | ): 26 | if version: 27 | typer.echo(f"oterm v{metadata.version('oterm')}") 28 | exit(0) 29 | if upgrade: 30 | asyncio.run(upgrade_db()) 31 | exit(0) 32 | if sqlite: 33 | typer.echo(envConfig.OTERM_DATA_DIR / "store.db") 34 | exit(0) 35 | if data_dir: 36 | typer.echo(envConfig.OTERM_DATA_DIR) 37 | exit(0) 38 | if config: 39 | typer.echo(pprint(envConfig)) 40 | exit(0) 41 | app.run() 42 | 43 | 44 | if __name__ == "__main__": 45 | cli() 46 | -------------------------------------------------------------------------------- /src/oterm/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from pathlib import Path 4 | 5 | from dotenv import load_dotenv 6 | from pydantic import BaseModel 7 | 8 | from oterm.utils import get_default_data_dir 9 | 10 | load_dotenv() 11 | 12 | 13 | class EnvConfig(BaseModel): 14 | ENV: str = "development" 15 | OLLAMA_HOST: str = "127.0.0.1:11434" 16 | OLLAMA_URL: str = "" 17 | OTERM_VERIFY_SSL: bool = True 18 | OTERM_DATA_DIR: Path = get_default_data_dir() 19 | OPEN_WEATHER_MAP_API_KEY: str = "" 20 | 21 | 22 | envConfig = EnvConfig.model_validate(os.environ) 23 | if envConfig.OLLAMA_URL == "": 24 | envConfig.OLLAMA_URL = f"http://{envConfig.OLLAMA_HOST}" 25 | 26 | 27 | class AppConfig: 28 | def __init__(self, path: Path | None = None): 29 | if path is None: 30 | path = envConfig.OTERM_DATA_DIR / "config.json" 31 | self._path = path 32 | self._data = { 33 | "theme": "textual-dark", 34 | "splash-screen": True, 35 | } 36 | try: 37 | with open(self._path) as f: 38 | saved = json.load(f) 39 | self._data = self._data | saved 40 | except FileNotFoundError: 41 | Path.mkdir(self._path.parent, parents=True, exist_ok=True) 42 | self.save() 43 | 44 | def set(self, key, value): 45 | self._data[key] = value 46 | self.save() 47 | 48 | def get(self, key, default=None): 49 | return self._data.get(key, default) 50 | 51 | def save(self): 52 | with open(self._path, "w") as f: 53 | json.dump(self._data, f) 54 | 55 | 56 | # Expose AppConfig object for app to import 57 | appConfig = AppConfig() 58 | -------------------------------------------------------------------------------- /src/oterm/log.py: -------------------------------------------------------------------------------- 1 | import rich.repr 2 | from textual import Logger, LogGroup # type: ignore 3 | 4 | log_lines: list[tuple[LogGroup, str]] = [] 5 | 6 | 7 | @rich.repr.auto 8 | class OtermLogger(Logger): 9 | @property 10 | def debug(self) -> "OtermLogger": 11 | """Logs debug messages.""" 12 | return OtermLogger(self._log, LogGroup.DEBUG) 13 | 14 | @property 15 | def info(self) -> "OtermLogger": 16 | """Logs information.""" 17 | res = OtermLogger(self._log, LogGroup.INFO) 18 | return res 19 | 20 | @property 21 | def warning(self) -> "OtermLogger": 22 | """Logs warnings.""" 23 | return OtermLogger(self._log, LogGroup.WARNING) 24 | 25 | @property 26 | def error(self) -> "OtermLogger": 27 | """Logs errors.""" 28 | return OtermLogger(self._log, LogGroup.ERROR) 29 | 30 | def __call__(self, *args: object, **kwargs) -> None: 31 | output = " ".join(str(arg) for arg in args) 32 | if kwargs: 33 | key_values = " ".join(f"{key}={value!r}" for key, value in kwargs.items()) 34 | output = f"{output} {key_values}" if output else key_values 35 | log_lines.append((self._group, output)) 36 | super().__call__(*args, **kwargs) 37 | 38 | 39 | log = OtermLogger(None) 40 | -------------------------------------------------------------------------------- /src/oterm/ollamaclient.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import json 3 | from ast import literal_eval 4 | from collections.abc import AsyncGenerator, AsyncIterator, Iterator, Mapping, Sequence 5 | from pathlib import Path 6 | from typing import ( 7 | Any, 8 | Literal, 9 | ) 10 | 11 | from ollama import ( 12 | AsyncClient, 13 | ChatResponse, 14 | Client, 15 | ListResponse, 16 | Message, 17 | Options, 18 | ProgressResponse, 19 | ShowResponse, 20 | ) 21 | from pydantic.json_schema import JsonSchemaValue 22 | 23 | from oterm.config import envConfig 24 | from oterm.log import log 25 | from oterm.types import ToolCall 26 | 27 | 28 | def parse_format(format_text: str) -> JsonSchemaValue | Literal["", "json"]: 29 | try: 30 | jsn = json.loads(format_text) 31 | if isinstance(jsn, dict): 32 | return jsn 33 | except json.JSONDecodeError: 34 | if format_text in ("", "json"): 35 | return format_text 36 | raise Exception(f"Invalid Ollama format: '{format_text}'") 37 | 38 | 39 | class OllamaLLM: 40 | def __init__( 41 | self, 42 | model="llama3.2", 43 | system: str | None = None, 44 | history: Sequence[Mapping[str, Any] | Message] = [], 45 | format: str = "", 46 | options: Options = Options(), 47 | keep_alive: int = 5, 48 | tool_defs: Sequence[ToolCall] = [], 49 | ): 50 | self.model = model 51 | self.system = system 52 | self.history = list(history) 53 | self.format = format 54 | self.keep_alive = keep_alive 55 | self.options = options 56 | self.tool_defs = tool_defs 57 | self.tools = [tool["tool"] for tool in tool_defs] 58 | 59 | if system: 60 | system_prompt: Message = Message(role="system", content=system) 61 | self.history = [system_prompt] + self.history 62 | 63 | async def completion( 64 | self, 65 | prompt: str = "", 66 | images: list[Path | bytes | str] = [], 67 | additional_options: Options = Options(), 68 | tool_call_messages=[], 69 | ) -> str: 70 | client = AsyncClient( 71 | host=envConfig.OLLAMA_URL, verify=envConfig.OTERM_VERIFY_SSL 72 | ) 73 | if prompt: 74 | user_prompt: Message = Message(role="user", content=prompt) 75 | if images: 76 | # This is a bug in Ollama the images should be a list of Image objects 77 | # user_prompt.images = [Image(value=image) for image in images] 78 | user_prompt.images = images # type: ignore 79 | self.history.append(user_prompt) 80 | options = { 81 | k: v for k, v in self.options.model_dump().items() if v is not None 82 | } | {k: v for k, v in additional_options.model_dump().items() if v is not None} 83 | 84 | response: ChatResponse = await client.chat( 85 | model=self.model, 86 | messages=self.history + tool_call_messages, 87 | keep_alive=f"{self.keep_alive}m", 88 | options=options, 89 | format=parse_format(self.format), 90 | tools=self.tools, 91 | ) 92 | message = response.message 93 | tool_calls = message.tool_calls 94 | if tool_calls: 95 | tool_messages = [message] 96 | for tool_call in tool_calls: 97 | tool_name = tool_call["function"]["name"] 98 | for tool_def in self.tool_defs: 99 | if tool_def["tool"]["function"]["name"] == tool_name: 100 | tool_callable = tool_def["callable"] 101 | tool_arguments = tool_call["function"]["arguments"] 102 | try: 103 | log.debug( 104 | f"Calling tool: {tool_name} with {tool_arguments}" 105 | ) 106 | if inspect.iscoroutinefunction(tool_callable): 107 | tool_response = await tool_callable(**tool_arguments) # type: ignore 108 | else: 109 | tool_response = tool_callable(**tool_arguments) # type: ignore 110 | log.debug("Tool response:", tool_response) 111 | except Exception as e: 112 | log.error(f"Error calling tool {tool_name}", e) 113 | tool_response = str(e) 114 | tool_messages.append( 115 | { # type: ignore 116 | "role": "tool", 117 | "content": tool_response, 118 | "name": tool_name, 119 | } 120 | ) 121 | return await self.completion( 122 | tool_call_messages=tool_messages, 123 | ) 124 | 125 | self.history.append(message) 126 | text_response = message.content 127 | return text_response or "" 128 | 129 | async def stream( 130 | self, 131 | prompt: str = "", 132 | images: list[Path | bytes | str] = [], 133 | additional_options: Options = Options(), 134 | tool_call_messages: list = [], 135 | ) -> AsyncGenerator[str, Any]: 136 | """Stream a chat response with support for tool calls. 137 | 138 | When tool calls are encountered during streaming, they are executed after the stream 139 | completes and the result is incorporated into the response. 140 | """ 141 | client = AsyncClient( 142 | host=envConfig.OLLAMA_URL, verify=envConfig.OTERM_VERIFY_SSL 143 | ) 144 | 145 | # Add user prompt to history if provided 146 | if prompt: 147 | user_prompt: Message = Message(role="user", content=prompt) 148 | if images: 149 | user_prompt.images = images # type: ignore 150 | self.history.append(user_prompt) 151 | 152 | # Process options 153 | options = { 154 | k: v for k, v in self.options.model_dump().items() if v is not None 155 | } | {k: v for k, v in additional_options.model_dump().items() if v is not None} 156 | 157 | # Start the streaming chat 158 | stream: AsyncIterator[ChatResponse] = await client.chat( 159 | model=self.model, 160 | messages=self.history + tool_call_messages, 161 | options=options, 162 | keep_alive=f"{self.keep_alive}m", 163 | format=parse_format(self.format), 164 | tools=self.tools, 165 | stream=True, 166 | ) 167 | 168 | text = "" 169 | current_message = None 170 | pending_tool_call = False 171 | 172 | async for response in stream: 173 | message = response.message 174 | content = message.content if message.content else "" 175 | tool_calls = message.tool_calls 176 | 177 | # If we have tool calls, process them at the end of the stream 178 | if tool_calls and not pending_tool_call: 179 | pending_tool_call = True 180 | current_message = message 181 | 182 | # Add content to the accumulated text only if not processing tool calls 183 | if not pending_tool_call and content: 184 | text += content 185 | yield text 186 | 187 | # After streaming is complete, handle any tool calls 188 | if pending_tool_call and current_message and current_message.tool_calls: 189 | # Process each tool call 190 | tool_messages = [current_message] # type: ignore 191 | for tool_call in current_message.tool_calls: 192 | tool_name = tool_call["function"]["name"] 193 | 194 | tool_args = tool_call["function"]["arguments"] 195 | # Execute the tool 196 | for tool_def in self.tool_defs: 197 | if tool_def["tool"]["function"]["name"] == tool_name: 198 | tool_callable = tool_def["callable"] 199 | 200 | try: 201 | log.debug(f"Calling tool: {tool_name} with {tool_args}") 202 | # Execute the tool 203 | if inspect.iscoroutinefunction(tool_callable): 204 | tool_response = await tool_callable(**tool_args) # type: ignore 205 | else: 206 | tool_response = tool_callable(**tool_args) # type: ignore 207 | 208 | log.debug(f"Tool response: {tool_response}") 209 | 210 | # Add tool response to messages 211 | tool_messages.append( 212 | { # type: ignore 213 | "role": "tool", 214 | "content": str(tool_response), 215 | "name": tool_name, 216 | } 217 | ) 218 | except Exception as e: 219 | log.error(f"Error calling tool {tool_name}: {e}") 220 | return 221 | 222 | # Use a new variable for the follow-up response to avoid duplication 223 | async for text_chunk in self.stream( 224 | tool_call_messages=tool_messages, 225 | additional_options=additional_options, 226 | ): 227 | yield text_chunk 228 | text = text_chunk 229 | 230 | elif text: # Only regular content was present 231 | self.history.append(Message(role="assistant", content=text)) 232 | 233 | @staticmethod 234 | def list() -> ListResponse: 235 | client = Client(host=envConfig.OLLAMA_URL, verify=envConfig.OTERM_VERIFY_SSL) 236 | return client.list() 237 | 238 | @staticmethod 239 | def show(model: str) -> ShowResponse: 240 | client = Client(host=envConfig.OLLAMA_URL, verify=envConfig.OTERM_VERIFY_SSL) 241 | return client.show(model) 242 | 243 | @staticmethod 244 | def pull(model: str) -> Iterator[ProgressResponse]: 245 | client = Client(host=envConfig.OLLAMA_URL, verify=envConfig.OTERM_VERIFY_SSL) 246 | stream: Iterator[ProgressResponse] = client.pull(model, stream=True) 247 | yield from stream 248 | 249 | 250 | def parse_ollama_parameters(parameter_text: str) -> Options: 251 | lines = parameter_text.split("\n") 252 | params = Options() 253 | valid_params = set(Options.model_fields.keys()) 254 | for line in lines: 255 | if line: 256 | key, value = line.split(maxsplit=1) 257 | try: 258 | value = literal_eval(value) 259 | except (SyntaxError, ValueError): 260 | pass 261 | if key not in valid_params: 262 | continue 263 | if params.get(key): 264 | if not isinstance(params[key], list): 265 | params[key] = [params[key], value] 266 | else: 267 | params[key].append(value) 268 | else: 269 | params[key] = value 270 | return params 271 | 272 | 273 | def jsonify_options(options: Options) -> str: 274 | return json.dumps( 275 | { 276 | key: value 277 | for key, value in options.model_dump().items() 278 | if value is not None 279 | }, 280 | indent=2, 281 | ) 282 | -------------------------------------------------------------------------------- /src/oterm/store/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/src/oterm/store/__init__.py -------------------------------------------------------------------------------- /src/oterm/store/store.py: -------------------------------------------------------------------------------- 1 | import json 2 | from importlib import metadata 3 | from pathlib import Path 4 | 5 | import aiosqlite 6 | from packaging.version import parse 7 | 8 | from oterm.config import envConfig 9 | from oterm.store.upgrades import upgrades 10 | from oterm.types import ChatModel, MessageModel, Tool 11 | from oterm.utils import int_to_semantic_version, semantic_version_to_int 12 | 13 | 14 | class Store: 15 | db_path: Path 16 | 17 | _store: "Store | None" = None 18 | 19 | @classmethod 20 | async def get_store(cls) -> "Store": 21 | if cls._store is not None: 22 | return cls._store 23 | self = Store() 24 | data_path = envConfig.OTERM_DATA_DIR 25 | data_path.mkdir(parents=True, exist_ok=True) 26 | self.db_path = data_path / "store.db" 27 | 28 | if not self.db_path.exists(): 29 | # Create tables and set user_version 30 | async with aiosqlite.connect(self.db_path) as connection: 31 | await connection.executescript( 32 | """ 33 | CREATE TABLE IF NOT EXISTS "chat" ( 34 | "id" INTEGER, 35 | "name" TEXT, 36 | "model" TEXT NOT NULL, 37 | "system" TEXT, 38 | "format" TEXT, 39 | "parameters" TEXT DEFAULT "{}", 40 | "keep_alive" INTEGER DEFAULT 5, 41 | "tools" TEXT DEFAULT "[]", 42 | "type" TEXT DEFAULT "chat", 43 | PRIMARY KEY("id" AUTOINCREMENT) 44 | ); 45 | 46 | CREATE TABLE IF NOT EXISTS "message" ( 47 | "id" INTEGER, 48 | "chat_id" INTEGER NOT NULL, 49 | "author" TEXT NOT NULL, 50 | "text" TEXT NOT NULL, 51 | "images" TEXT DEFAULT "[]", 52 | PRIMARY KEY("id" AUTOINCREMENT) 53 | FOREIGN KEY("chat_id") REFERENCES "chat"("id") ON DELETE CASCADE 54 | ); 55 | """ 56 | ) 57 | await self.set_user_version(metadata.version("oterm")) 58 | else: 59 | # Upgrade database 60 | current_version: str = metadata.version("oterm") 61 | db_version = await self.get_user_version() 62 | for version, steps in upgrades: 63 | if parse(current_version) >= parse(version) and parse(version) > parse( 64 | db_version 65 | ): 66 | for step in steps: 67 | await step(self.db_path) 68 | await self.set_user_version(current_version) 69 | cls._store = self 70 | return self 71 | 72 | async def get_user_version(self) -> str: 73 | async with aiosqlite.connect(self.db_path) as connection: 74 | res = await connection.execute("PRAGMA user_version;") 75 | res = await res.fetchone() 76 | return int_to_semantic_version(res[0] if res else 0) 77 | 78 | async def set_user_version(self, version: str) -> None: 79 | async with aiosqlite.connect(self.db_path) as connection: 80 | await connection.execute( 81 | f"PRAGMA user_version = {semantic_version_to_int(version)};" 82 | ) 83 | 84 | async def save_chat(self, chat_model: ChatModel) -> int: 85 | async with aiosqlite.connect(self.db_path) as connection: 86 | res = await connection.execute_insert( 87 | """ 88 | INSERT OR REPLACE 89 | INTO chat(id, name, model, system, format, parameters, keep_alive, tools, type) 90 | VALUES(:id, :name, :model, :system, :format, :parameters, :keep_alive, :tools, :type) RETURNING id;""", 91 | { 92 | "id": chat_model.id, 93 | "name": chat_model.name, 94 | "model": chat_model.model, 95 | "system": chat_model.system, 96 | "format": chat_model.format, 97 | "parameters": json.dumps( 98 | chat_model.parameters.model_dump(exclude_unset=True) 99 | ), 100 | "keep_alive": chat_model.keep_alive, 101 | "tools": json.dumps( 102 | [tool.model_dump() for tool in chat_model.tools] 103 | ), 104 | "type": chat_model.type, 105 | }, 106 | ) 107 | await connection.commit() 108 | 109 | return res[0] if res else 0 110 | 111 | async def rename_chat(self, id: int, name: str) -> None: 112 | async with aiosqlite.connect(self.db_path) as connection: 113 | await connection.execute( 114 | "UPDATE chat SET name = :name WHERE id = :id;", {"id": id, "name": name} 115 | ) 116 | await connection.commit() 117 | 118 | async def edit_chat(self, chat_model: ChatModel) -> None: 119 | async with aiosqlite.connect(self.db_path) as connection: 120 | await connection.execute( 121 | """ 122 | UPDATE chat 123 | SET name = :name, 124 | system = :system, 125 | format = :format, 126 | parameters = :parameters, 127 | keep_alive = :keep_alive, 128 | tools = :tools 129 | WHERE id = :id; 130 | """, 131 | { 132 | "id": chat_model.id, 133 | "name": chat_model.name, 134 | "system": chat_model.system, 135 | "format": chat_model.format, 136 | "parameters": json.dumps( 137 | chat_model.parameters.model_dump(exclude_unset=True) 138 | ), 139 | "keep_alive": chat_model.keep_alive, 140 | "tools": json.dumps( 141 | [tool.model_dump() for tool in chat_model.tools] 142 | ), 143 | }, 144 | ) 145 | await connection.commit() 146 | 147 | async def get_chats(self, type="chat") -> list[ChatModel]: 148 | async with aiosqlite.connect(self.db_path) as connection: 149 | chats = await connection.execute_fetchall( 150 | """ 151 | SELECT id, name, model, system, format, parameters, keep_alive, tools 152 | FROM chat WHERE type = :type; 153 | """, 154 | {"type": type}, 155 | ) 156 | 157 | return [ 158 | ChatModel( 159 | id=id, 160 | name=name, 161 | model=model, 162 | system=system, 163 | format=format, 164 | parameters=json.loads(parameters), 165 | keep_alive=keep_alive, 166 | tools=[Tool(**t) for t in json.loads(tools)], 167 | type=type, 168 | ) 169 | for id, name, model, system, format, parameters, keep_alive, tools in chats 170 | ] 171 | 172 | async def get_chat(self, id: int) -> ChatModel | None: 173 | async with aiosqlite.connect(self.db_path) as connection: 174 | chat = await connection.execute_fetchall( 175 | """ 176 | SELECT id, name, model, system, format, parameters, keep_alive, tools, type 177 | FROM chat 178 | WHERE id = :id; 179 | """, 180 | {"id": id}, 181 | ) 182 | chat = next(iter(chat), None) 183 | if chat: 184 | id, name, model, system, format, parameters, keep_alive, tools, type = ( 185 | chat 186 | ) 187 | return ChatModel( 188 | id=id, 189 | name=name, 190 | model=model, 191 | system=system, 192 | format=format, 193 | parameters=json.loads(parameters), 194 | keep_alive=keep_alive, 195 | tools=[Tool(**t) for t in json.loads(tools)], 196 | type=type, 197 | ) 198 | return None 199 | 200 | async def delete_chat(self, id: int) -> None: 201 | async with aiosqlite.connect(self.db_path) as connection: 202 | await connection.execute("PRAGMA foreign_keys = on;") 203 | await connection.execute("DELETE FROM chat WHERE id = :id;", {"id": id}) 204 | await connection.commit() 205 | 206 | async def save_message(self, message_model: MessageModel) -> int: 207 | async with aiosqlite.connect(self.db_path) as connection: 208 | res = await connection.execute_insert( 209 | """ 210 | INSERT OR REPLACE 211 | INTO message(id, chat_id, author, text, images) 212 | VALUES(:id, :chat_id, :author, :text, :images) RETURNING id; 213 | """, 214 | { 215 | "id": message_model.id, 216 | "chat_id": message_model.chat_id, 217 | "author": message_model.role, 218 | "text": message_model.text, 219 | "images": json.dumps(message_model.images), 220 | }, 221 | ) 222 | await connection.commit() 223 | return res[0] if res else 0 224 | 225 | async def get_messages(self, chat_id: int) -> list[MessageModel]: 226 | async with aiosqlite.connect(self.db_path) as connection: 227 | messages = await connection.execute_fetchall( 228 | """ 229 | SELECT id, author, text, images 230 | FROM message 231 | WHERE chat_id = :chat_id; 232 | """, 233 | {"chat_id": chat_id}, 234 | ) 235 | return [ 236 | MessageModel( 237 | id=id, 238 | chat_id=chat_id, 239 | role=author, 240 | text=text, 241 | images=json.loads(images), 242 | ) 243 | for id, author, text, images in messages 244 | ] 245 | 246 | async def clear_chat(self, chat_id: int) -> None: 247 | async with aiosqlite.connect(self.db_path) as connection: 248 | await connection.execute( 249 | "DELETE FROM message WHERE chat_id = :chat_id;", {"chat_id": chat_id} 250 | ) 251 | await connection.commit() 252 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/__init__.py: -------------------------------------------------------------------------------- 1 | from oterm.store.upgrades.v0_1_6 import upgrades as v0_1_6_upgrades 2 | from oterm.store.upgrades.v0_1_11 import upgrades as v_0_1_11_upgrades 3 | from oterm.store.upgrades.v0_2_0 import upgrades as v0_2_0_upgrades 4 | from oterm.store.upgrades.v0_2_4 import upgrades as v0_2_4_upgrades 5 | from oterm.store.upgrades.v0_2_8 import upgrades as v0_2_8_upgrades 6 | from oterm.store.upgrades.v0_3_0 import upgrades as v0_3_0_upgrades 7 | from oterm.store.upgrades.v0_4_0 import upgrades as v0_4_0_upgrades 8 | from oterm.store.upgrades.v0_5_1 import upgrades as v0_5_1_upgrades 9 | from oterm.store.upgrades.v0_6_0 import upgrades as v0_6_0_upgrades 10 | from oterm.store.upgrades.v0_7_0 import upgrades as v0_7_0_upgrades 11 | from oterm.store.upgrades.v0_9_0 import upgrades as v0_9_0_upgrades 12 | from oterm.store.upgrades.v0_12_0 import upgrades as v0_12_0_upgrades 13 | 14 | upgrades = ( 15 | v0_1_6_upgrades 16 | + v_0_1_11_upgrades 17 | + v0_2_0_upgrades 18 | + v0_2_4_upgrades 19 | + v0_2_8_upgrades 20 | + v0_3_0_upgrades 21 | + v0_4_0_upgrades 22 | + v0_5_1_upgrades 23 | + v0_6_0_upgrades 24 | + v0_7_0_upgrades 25 | + v0_9_0_upgrades 26 | + v0_12_0_upgrades 27 | ) 28 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_12_0.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def update_roles(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | await connection.executescript( 10 | """ 11 | UPDATE message SET author = 'assistant' WHERE author = 'ollama'; 12 | UPDATE message SET author = 'user' WHERE author = 'me'; 13 | """ 14 | ) 15 | 16 | 17 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 18 | ("0.12.0", [update_roles]), 19 | ] 20 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_1_11.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def add_format_to_chat(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE chat ADD COLUMN format TEXT; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | 19 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 20 | ("0.1.11", [add_format_to_chat]), 21 | ("0.1.13", [add_format_to_chat]), 22 | ] 23 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_1_6.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def add_template_system_to_chat(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | await connection.executescript( 10 | """ 11 | ALTER TABLE chat ADD COLUMN template TEXT; 12 | ALTER TABLE chat ADD COLUMN system TEXT; 13 | """ 14 | ) 15 | 16 | 17 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 18 | ("0.1.6", [add_template_system_to_chat]) 19 | ] 20 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_2_0.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def drop_template(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE chat DROP COLUMN template; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | 19 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 20 | ("0.2.0", [drop_template]) 21 | ] 22 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_2_4.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def update_format(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | UPDATE chat SET format = '' WHERE format is NULL; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | 19 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 20 | ("0.2.4", [update_format]) 21 | ] 22 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_2_8.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def keep_alive(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE chat ADD COLUMN keep_alive INTEGER DEFAULT 5; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | 19 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 20 | ("0.2.8", [keep_alive]) 21 | ] 22 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_3_0.py: -------------------------------------------------------------------------------- 1 | import json 2 | from collections.abc import Awaitable, Callable 3 | from pathlib import Path 4 | 5 | import aiosqlite 6 | 7 | from oterm.ollamaclient import OllamaLLM, parse_ollama_parameters 8 | 9 | 10 | async def parameters(db_path: Path) -> None: 11 | async with aiosqlite.connect(db_path) as connection: 12 | try: 13 | await connection.executescript( 14 | """ 15 | ALTER TABLE chat ADD COLUMN parameters TEXT DEFAULT "{}"; 16 | """ 17 | ) 18 | except aiosqlite.OperationalError: 19 | pass 20 | 21 | # Update with default parameters 22 | chat_models = await connection.execute_fetchall("SELECT id, model FROM chat") 23 | for chat_id, model in chat_models: 24 | info = OllamaLLM.show(model) 25 | parameters = parse_ollama_parameters(info["parameters"]) 26 | await connection.execute( 27 | "UPDATE chat SET parameters = ? WHERE id = ?", 28 | (json.dumps(parameters), chat_id), 29 | ) 30 | await connection.commit() 31 | 32 | 33 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 34 | ("0.3.0", [parameters]) 35 | ] 36 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_4_0.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def context(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE chat DROP COLUMN context; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | 19 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 20 | ("0.4.0", [context]) 21 | ] 22 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_5_1.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def add_id_to_messages(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | CREATE TABLE message_temp ( 13 | id INTEGER PRIMARY KEY AUTOINCREMENT, 14 | chat_id INTEGER NOT NULL, 15 | author TEXT NOT NULL, 16 | text TEXT NOT NULL 17 | ); 18 | INSERT INTO message_temp (chat_id, author, text) SELECT chat_id, author, text FROM message; 19 | DROP TABLE message; 20 | ALTER TABLE message_temp RENAME TO message; 21 | """ 22 | ) 23 | except aiosqlite.OperationalError: 24 | pass 25 | 26 | 27 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 28 | ("0.5.1", [add_id_to_messages]) 29 | ] 30 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_6_0.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def tools(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE chat ADD COLUMN tools TEXT DEFAULT "[]"; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | await connection.commit() 19 | 20 | 21 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 22 | ("0.6.0", [tools]) 23 | ] 24 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_7_0.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def images(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE message ADD COLUMN images TEXT DEFAULT "[]"; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | await connection.commit() 19 | 20 | 21 | async def orphan_messages(db_path: Path) -> None: 22 | async with aiosqlite.connect(db_path) as connection: 23 | try: 24 | await connection.executescript( 25 | """ 26 | DELETE FROM message WHERE chat_id NOT IN (SELECT id FROM chat); 27 | """ 28 | ) 29 | except aiosqlite.OperationalError: 30 | pass 31 | 32 | await connection.commit() 33 | 34 | 35 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 36 | ("0.7.0", [images, orphan_messages]), 37 | ] 38 | -------------------------------------------------------------------------------- /src/oterm/store/upgrades/v0_9_0.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from pathlib import Path 3 | 4 | import aiosqlite 5 | 6 | 7 | async def chat_type(db_path: Path) -> None: 8 | async with aiosqlite.connect(db_path) as connection: 9 | try: 10 | await connection.executescript( 11 | """ 12 | ALTER TABLE chat ADD COLUMN type TEXT DEFAULT "chat"; 13 | """ 14 | ) 15 | except aiosqlite.OperationalError: 16 | pass 17 | 18 | await connection.commit() 19 | 20 | 21 | upgrades: list[tuple[str, list[Callable[[Path], Awaitable[None]]]]] = [ 22 | ("0.9.0", [chat_type]), 23 | ] 24 | -------------------------------------------------------------------------------- /src/oterm/tools/__init__.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | from oterm.types import ToolCall 4 | 5 | available_tool_defs: dict[str, list[ToolCall]] = {} 6 | 7 | 8 | def available_tool_calls() -> list[ToolCall]: 9 | return list(itertools.chain.from_iterable(available_tool_defs.values())) 10 | -------------------------------------------------------------------------------- /src/oterm/tools/date_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from oterm.types import Tool 4 | 5 | DateTimeTool = Tool( 6 | type="function", 7 | function=Tool.Function( 8 | name="date_time", 9 | description="Function to get the current date and time", 10 | parameters=Tool.Function.Parameters( 11 | type="object", 12 | properties={}, 13 | required=[], 14 | ), 15 | ), 16 | ) 17 | 18 | 19 | def date_time() -> str: 20 | return datetime.now().isoformat() 21 | -------------------------------------------------------------------------------- /src/oterm/tools/external.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable, Sequence 2 | from importlib import import_module 3 | 4 | from ollama import Tool 5 | 6 | from oterm.log import log 7 | from oterm.types import ExternalToolDefinition, ToolCall 8 | 9 | 10 | def load_external_tools( 11 | external_tools: Sequence[ExternalToolDefinition], 12 | ) -> Sequence[ToolCall]: 13 | tools = [] 14 | for tool_def in external_tools: 15 | tool_path = tool_def["tool"] 16 | 17 | try: 18 | module, tool = tool_path.split(":") 19 | module = import_module(module) 20 | tool = getattr(module, tool) 21 | if not isinstance(tool, Tool): 22 | raise Exception(f"Expected Tool, got {type(tool)}") 23 | except ModuleNotFoundError as e: 24 | log.error(f"Error loading tool {tool_path}: {e}") 25 | continue 26 | 27 | callable_path = tool_def["callable"] 28 | try: 29 | module, function = callable_path.split(":") 30 | module = import_module(module) 31 | callable = getattr(module, function) 32 | if not isinstance(callable, Callable | Awaitable): 33 | raise Exception(f"Expected Callable, got {type(callable)}") 34 | except ModuleNotFoundError as e: 35 | log.error(f"Error loading callable {callable_path}: {e}") 36 | continue 37 | log.info(f"Loaded tool {tool.function.name} from {tool_path}") # type: ignore 38 | tools.append({"tool": tool, "callable": callable}) 39 | 40 | return tools 41 | -------------------------------------------------------------------------------- /src/oterm/tools/mcp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/src/oterm/tools/mcp/__init__.py -------------------------------------------------------------------------------- /src/oterm/tools/mcp/client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Annotated, Any 3 | 4 | from fastmcp.client import Client 5 | from fastmcp.client.transports import SSETransport, StdioTransport, WSTransport 6 | from mcp import McpError, StdioServerParameters 7 | from mcp import Tool as MCPTool 8 | from mcp.types import ( 9 | EmbeddedResource, 10 | ImageContent, 11 | Prompt, 12 | PromptMessage, 13 | TextContent, 14 | ) 15 | from pydantic import BaseModel, ValidationError 16 | 17 | from oterm.log import log 18 | from oterm.tools.mcp.logging import Logger 19 | from oterm.tools.mcp.sampling import sampling_handler 20 | 21 | IsSSEURL = Annotated[str, lambda v: v.startswith("http://") or v.startswith("https://")] 22 | 23 | 24 | class SSEServerParameters(BaseModel): 25 | """Parameters for the SSE server.""" 26 | 27 | url: IsSSEURL 28 | 29 | 30 | IsWSURL = Annotated[str, lambda v: v.startswith("ws://") or v.startswith("wss://")] 31 | 32 | 33 | class WSServerParameters(BaseModel): 34 | """Parameters for the WS server.""" 35 | 36 | url: IsWSURL 37 | 38 | 39 | class MCPClient: 40 | def __init__( 41 | self, 42 | name: str, 43 | config: StdioServerParameters | SSEServerParameters | WSServerParameters, 44 | ): 45 | self.name = name 46 | 47 | self.client: Client | None = None 48 | try: 49 | cfg = StdioServerParameters.model_validate(config) 50 | self.transport = StdioTransport( 51 | command=cfg.command, 52 | args=cfg.args, 53 | env=cfg.env, 54 | cwd=str(cfg.cwd) if cfg.cwd else None, 55 | ) 56 | return 57 | except ValidationError: 58 | pass 59 | try: 60 | cfg = SSEServerParameters.model_validate(config) 61 | self.transport = SSETransport( 62 | url=cfg.url, 63 | ) 64 | return 65 | except (ValidationError, ValueError): 66 | pass 67 | try: 68 | cfg = WSServerParameters.model_validate(config) 69 | self.transport = WSTransport( 70 | url=cfg.url, 71 | ) 72 | except ValidationError: 73 | raise ValueError("Invalid transport type") 74 | 75 | async def initialize(self) -> Client | None: 76 | """Initialize the server connection. 77 | 78 | Returns: 79 | The initialized client or None if initialization fails. 80 | """ 81 | 82 | # We set up "done" as a future to signal when the client should shutdown. 83 | self.closed = asyncio.Event() 84 | self.done = asyncio.Event() 85 | # We wait for the client to be initialized before returning from initialize() 86 | client_initialized = asyncio.Event() 87 | 88 | async def task(): 89 | assert self.transport is not None, "Transport is not initialized" 90 | try: 91 | async with Client( 92 | self.transport, 93 | log_handler=Logger(), 94 | sampling_handler=sampling_handler, 95 | ) as client: 96 | self.client = client 97 | client_initialized.set() 98 | await self.done.wait() 99 | self.closed.set() 100 | 101 | except Exception as e: 102 | log.error(f"Error initializing MCP server: {e}") 103 | client_initialized.set() 104 | 105 | self.task = asyncio.create_task(task()) 106 | try: 107 | await asyncio.wait_for(client_initialized.wait(), timeout=5) 108 | except asyncio.TimeoutError: 109 | self.client = None 110 | log.error("Timeout while initializing MCP server", self.name) 111 | return self.client 112 | 113 | async def get_available_tools(self) -> list[MCPTool]: 114 | """List available tools from the server. 115 | 116 | Returns: 117 | A list of available tools. 118 | 119 | Raises: 120 | RuntimeError: If the server is not initialized. 121 | """ 122 | if self.client is None: 123 | raise RuntimeError("Client is not initialized") 124 | try: 125 | tools_response = await self.client.list_tools() 126 | except McpError: 127 | return [] 128 | 129 | return tools_response 130 | 131 | async def get_available_prompts(self) -> list[Prompt]: 132 | """List available prompts from the server. 133 | 134 | Returns: 135 | A list of available prompts. 136 | 137 | Raises: 138 | RuntimeError: If the server is not initialized. 139 | """ 140 | if self.client is None: 141 | raise RuntimeError(f"Server {self.name} not initialized") 142 | 143 | try: 144 | prompts = await self.client.list_prompts() 145 | except McpError: 146 | return [] 147 | 148 | for prompt in prompts: 149 | log.info(f"Loaded prompt {prompt.name} from {self.name}") 150 | 151 | return prompts 152 | 153 | async def call_tool( 154 | self, 155 | tool_name: str, 156 | arguments: dict[str, Any], 157 | ) -> list[TextContent | ImageContent | EmbeddedResource]: 158 | """Execute a tool 159 | 160 | Args: 161 | tool_name: Name of the tool to execute. 162 | arguments: Tool arguments. 163 | 164 | Returns: 165 | Tool execution result. 166 | 167 | Raises: 168 | RuntimeError: If server is not initialized. 169 | Exception: If tool execution fails after all retries. 170 | """ 171 | if not self.client: 172 | raise RuntimeError(f"Server {self.name} not initialized") 173 | 174 | try: 175 | result = await self.client.call_tool(tool_name, arguments) 176 | return result 177 | except Exception as e: 178 | log.error(f"Error executing tool: {e}.") 179 | return [] 180 | 181 | async def call_prompt( 182 | self, 183 | prompt_name: str, 184 | arguments: dict[str, str], 185 | ) -> list[PromptMessage]: 186 | """Execute a prompt 187 | 188 | Args: 189 | prompt_name: Name of the prompt 190 | arguments: Prompt arguments. 191 | """ 192 | 193 | if self.client is None: 194 | raise RuntimeError(f"Server {self.name} not initialized") 195 | try: 196 | result = await self.client.get_prompt(prompt_name, arguments) 197 | return result.messages 198 | except Exception as e: 199 | log.error(f"Error getting prompt: {e}.") 200 | return [] 201 | 202 | async def teardown(self) -> None: 203 | if self.client is None: 204 | raise RuntimeError("Client is already closed") 205 | self.done.set() 206 | await self.closed.wait() 207 | self.client = None 208 | self.transport = None 209 | -------------------------------------------------------------------------------- /src/oterm/tools/mcp/logging.py: -------------------------------------------------------------------------------- 1 | from mcp.client.session import LoggingFnT 2 | from mcp.types import LoggingMessageNotificationParams 3 | 4 | from oterm.log import log 5 | 6 | 7 | # This is here to log the messages from the MCP server, when 8 | # there is an upstream fix for https://github.com/modelcontextprotocol/python-sdk/issues/341 9 | class Logger(LoggingFnT): 10 | async def __call__(self, params: LoggingMessageNotificationParams) -> None: 11 | if params.level == "error" or params.level == "critical": 12 | log.error(params.data) 13 | elif params.level == "warning": 14 | log.warning(params.data) 15 | elif params.level == "info": 16 | log.info(params.data) 17 | elif params.level == "debug": 18 | log.debug(params.data) 19 | -------------------------------------------------------------------------------- /src/oterm/tools/mcp/prompts.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | from mcp.types import ImageContent, PromptMessage, TextContent 4 | from ollama import Message 5 | 6 | from oterm.log import log 7 | from oterm.tools.mcp.client import MCPClient 8 | from oterm.types import PromptCall 9 | 10 | available_prompt_defs: dict[str, list[PromptCall]] = {} 11 | 12 | 13 | def available_prompt_calls() -> list[PromptCall]: 14 | """Return a list of all available prompt calls.""" 15 | return list(itertools.chain.from_iterable(available_prompt_defs.values())) 16 | 17 | 18 | class MCPPromptCallable: 19 | def __init__(self, name: str, server_name: str, client: MCPClient): 20 | self.name = name 21 | self.server_name = server_name 22 | self.client = client 23 | 24 | async def call(self, **kwargs) -> list[PromptMessage]: 25 | log.info(f"Calling Prompt {self.name} in {self.server_name} with {kwargs}") 26 | res = await self.client.call_prompt(self.name, kwargs) 27 | log.info(f"Prompt {self.name} returned {res}") 28 | return res 29 | 30 | 31 | def mcp_prompt_to_ollama_messages(mcp_prompt: list[PromptMessage]) -> list[Message]: 32 | """Convert an MCP prompt to Ollama messages""" 33 | 34 | messages: list[Message] = [] 35 | for m in mcp_prompt: 36 | if isinstance(m.content, TextContent): 37 | messages.append(Message(role=m.role, content=m.content.text)) 38 | elif isinstance(m.content, ImageContent): 39 | messages.append(Message(role=m.role, images=[m.content.data])) # type: ignore 40 | 41 | return messages 42 | -------------------------------------------------------------------------------- /src/oterm/tools/mcp/sampling.py: -------------------------------------------------------------------------------- 1 | from difflib import get_close_matches 2 | 3 | from mcp.shared.context import RequestContext 4 | from mcp.types import ( 5 | CreateMessageRequestParams, 6 | CreateMessageResult, 7 | ModelHint, 8 | SamplingMessage, 9 | TextContent, 10 | ) 11 | from ollama import ListResponse, Message, Options 12 | 13 | from oterm.log import log 14 | from oterm.ollamaclient import OllamaLLM 15 | 16 | _DEFAULT_MODEL = "llama3.2" 17 | 18 | 19 | async def sampling_handler( 20 | messages: list[SamplingMessage], 21 | params: CreateMessageRequestParams, 22 | context: RequestContext, 23 | ) -> CreateMessageResult: 24 | """Handle sampling messages. 25 | 26 | Args: 27 | context: The request context. 28 | params: The parameters for the message. 29 | 30 | Returns: 31 | The result of the sampling. 32 | """ 33 | log.info("Request for sampling", params.model_dump_json()) 34 | msgs = [ 35 | Message(role=msg.role, content=msg.content.text) 36 | for msg in messages 37 | if type(msg.content) is TextContent 38 | ] 39 | system = params.systemPrompt 40 | options = Options(temperature=params.temperature, stop=params.stopSequences) 41 | model = _DEFAULT_MODEL 42 | if params.modelPreferences and params.modelPreferences.hints: 43 | model_hints = params.modelPreferences.hints 44 | model_from_hints = await search_model(model_hints) 45 | if model_from_hints: 46 | model = model_from_hints.model or _DEFAULT_MODEL 47 | client = OllamaLLM( 48 | model=model, 49 | system=system, 50 | history=msgs, 51 | options=options, 52 | ) 53 | response = await client.completion() 54 | 55 | return CreateMessageResult( 56 | content=TextContent(text=response, type="text"), 57 | role="user", 58 | model=model, 59 | ) 60 | 61 | 62 | async def search_model(hints: list[ModelHint]) -> ListResponse.Model | None: 63 | """ 64 | Fuzzy search for a model. 65 | """ 66 | log.info("Searching for model based on hints", [h.name for h in hints]) 67 | available_models = OllamaLLM.list().models 68 | available_model_names = [model.model for model in available_models if model.model] 69 | 70 | hint = " ".join([h.name for h in hints if h.name]) 71 | matches = get_close_matches(hint, available_model_names, n=1, cutoff=0.1) 72 | if matches: 73 | # Return the first matching model 74 | for model in available_models: 75 | if model.model == matches[0]: 76 | log.info("Found matching model", model.model) 77 | return model 78 | 79 | # If no matches are found, return None 80 | log.warning("No matching model found for the provided hints.") 81 | return None 82 | -------------------------------------------------------------------------------- /src/oterm/tools/mcp/setup.py: -------------------------------------------------------------------------------- 1 | from mcp import Tool as MCPTool 2 | 3 | from oterm.config import appConfig 4 | from oterm.log import log 5 | from oterm.tools.mcp.client import MCPClient 6 | from oterm.tools.mcp.prompts import MCPPromptCallable 7 | from oterm.tools.mcp.tools import MCPToolCallable, mcp_tool_to_ollama_tool 8 | from oterm.types import PromptCall, ToolCall 9 | 10 | mcp_clients: list[MCPClient] = [] 11 | 12 | 13 | async def setup_mcp_servers() -> tuple[ 14 | dict[str, list[ToolCall]], dict[str, list[PromptCall]] 15 | ]: 16 | mcp_servers = appConfig.get("mcpServers") 17 | tool_calls: dict[str, list[ToolCall]] = {} 18 | prompt_calls: dict[str, list[PromptCall]] = {} 19 | 20 | if mcp_servers: 21 | for server, config in mcp_servers.items(): 22 | client = MCPClient(server, config) 23 | await client.initialize() 24 | if not client.client: 25 | continue 26 | mcp_clients.append(client) 27 | 28 | log.info(f"Initialized MCP server {server}") 29 | 30 | mcp_tools: list[MCPTool] = await client.get_available_tools() 31 | mcp_prompts = await client.get_available_prompts() 32 | 33 | if mcp_tools: 34 | tool_calls[server] = [] 35 | for mcp_tool in mcp_tools: 36 | tool = mcp_tool_to_ollama_tool(mcp_tool) 37 | mcpToolCallable = MCPToolCallable(mcp_tool.name, server, client) 38 | tool_calls[server].append( 39 | {"tool": tool, "callable": mcpToolCallable.call} 40 | ) 41 | log.info(f"Loaded MCP tool {mcp_tool.name} from {server}") 42 | 43 | if mcp_prompts: 44 | prompt_calls[server] = [] 45 | 46 | for prompt in mcp_prompts: 47 | mcpPromptCallable = MCPPromptCallable(prompt.name, server, client) 48 | prompt_calls[server].append( 49 | {"prompt": prompt, "callable": mcpPromptCallable.call} 50 | ) 51 | log.info(f"Loaded MCP prompt {prompt.name} from {server}") 52 | 53 | return tool_calls, prompt_calls 54 | 55 | 56 | async def teardown_mcp_servers(): 57 | log.info("Tearing down MCP servers") 58 | # Important to tear down in reverse order 59 | mcp_clients.reverse() 60 | for client in mcp_clients: 61 | await client.teardown() 62 | -------------------------------------------------------------------------------- /src/oterm/tools/mcp/tools.py: -------------------------------------------------------------------------------- 1 | from mcp import Tool as MCPTool 2 | from mcp.types import TextContent 3 | 4 | from oterm.tools.mcp.client import MCPClient 5 | from oterm.types import Tool 6 | 7 | 8 | class MCPToolCallable: 9 | def __init__(self, name: str, server_name: str, client: MCPClient): 10 | self.name = name 11 | self.server_name = server_name 12 | self.client = client 13 | 14 | async def call(self, **kwargs) -> str: 15 | res = await self.client.call_tool(self.name, kwargs) 16 | text_content = [m.text for m in res if type(m) is TextContent] 17 | return "\n".join(text_content) 18 | 19 | 20 | def mcp_tool_to_ollama_tool(mcp_tool: MCPTool) -> Tool: 21 | """Convert an MCP tool to an Ollama tool""" 22 | 23 | return Tool( 24 | function=Tool.Function( 25 | name=mcp_tool.name, 26 | description=mcp_tool.description, 27 | parameters=Tool.Function.Parameters.model_validate(mcp_tool.inputSchema), 28 | ), 29 | ) 30 | -------------------------------------------------------------------------------- /src/oterm/tools/shell.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | from oterm.types import Tool 4 | 5 | ShellTool = Tool( 6 | type="function", 7 | function=Tool.Function( 8 | name="shell", 9 | description="Function to execute commands in the user's shell and return the output.", 10 | parameters=Tool.Function.Parameters( 11 | type="object", 12 | properties={ 13 | "command": Tool.Function.Parameters.Property( 14 | type="string", description="The shell command to execute." 15 | ) 16 | }, 17 | required=["command"], 18 | ), 19 | ), 20 | ) 21 | 22 | 23 | def shell_command(command="") -> str: 24 | return subprocess.run(command, shell=True, capture_output=True).stdout.decode( 25 | "utf-8" 26 | ) 27 | -------------------------------------------------------------------------------- /src/oterm/tools/think.py: -------------------------------------------------------------------------------- 1 | from oterm.types import Tool 2 | 3 | ThinkTool = Tool( 4 | type="function", 5 | function=Tool.Function( 6 | name="think", 7 | description="Use the tool to think about something. It will not obtain new information or change the database, but just append the thought to the log. Use it when complex reasoning or some cache memory is needed.", 8 | parameters=Tool.Function.Parameters( 9 | type="object", 10 | properties={ 11 | "thought": Tool.Function.Parameters.Property( 12 | type="string", description="A thought to think about." 13 | ), 14 | }, 15 | required=["thought"], 16 | ), 17 | ), 18 | ) 19 | 20 | 21 | async def think(thought: str) -> str: 22 | return thought 23 | -------------------------------------------------------------------------------- /src/oterm/types.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable, Callable 2 | from typing import Any, Literal, TypedDict 3 | 4 | from mcp.types import Prompt 5 | from ollama import Options, Tool 6 | from pydantic import BaseModel, Field 7 | 8 | 9 | class ParsedResponse(BaseModel): 10 | thought: str 11 | response: str 12 | formatted_output: str 13 | 14 | 15 | class ToolCall(TypedDict): 16 | tool: Tool 17 | callable: Callable | Awaitable 18 | 19 | 20 | class PromptCall(TypedDict): 21 | prompt: Prompt 22 | callable: Callable | Awaitable 23 | 24 | 25 | class ExternalToolDefinition(TypedDict): 26 | tool: str 27 | callable: str 28 | 29 | 30 | class OtermOllamaOptions(Options): 31 | # Patch stop to allow for a single string. 32 | # This is an issue with the gemma model which has a single stop parameter. 33 | # Remove when fixed upstream and close #187 34 | # Using 'any' to avoid type conflict with parent class 35 | stop: Any = None # type: ignore 36 | 37 | class Config: 38 | extra = "forbid" 39 | 40 | 41 | class ChatModel(BaseModel): 42 | """Chat model for storing chat metadata""" 43 | 44 | id: int | None = None 45 | name: str = "" 46 | model: str = "" 47 | system: str | None = None 48 | format: str = "" 49 | parameters: OtermOllamaOptions = Field(default_factory=OtermOllamaOptions) 50 | keep_alive: int = 5 51 | tools: list[Tool] = Field(default_factory=list) 52 | type: str = "chat" 53 | 54 | 55 | class MessageModel(BaseModel): 56 | """Message model for storing chat messages""" 57 | 58 | id: int | None = None 59 | chat_id: int 60 | role: Literal["user", "assistant", "system", "tool"] 61 | text: str 62 | images: list[str] = Field(default_factory=list) 63 | -------------------------------------------------------------------------------- /src/oterm/utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | from collections.abc import Callable 4 | from functools import wraps 5 | from importlib import metadata 6 | from pathlib import Path 7 | 8 | import httpx 9 | from packaging.version import Version, parse 10 | 11 | from oterm.types import ParsedResponse 12 | 13 | 14 | def debounce(wait: float) -> Callable: 15 | """ 16 | A decorator to debounce a function, ensuring it is called only after a specified delay 17 | and always executes after the last call. 18 | 19 | Args: 20 | wait (float): The debounce delay in seconds. 21 | 22 | Returns: 23 | Callable: The decorated function. 24 | """ 25 | 26 | def decorator(func: Callable) -> Callable: 27 | last_call = None 28 | task = None 29 | 30 | @wraps(func) 31 | async def debounced(*args, **kwargs): 32 | nonlocal last_call, task 33 | last_call = asyncio.get_event_loop().time() 34 | 35 | if task: 36 | task.cancel() 37 | 38 | async def call_func(): 39 | await asyncio.sleep(wait) 40 | if asyncio.get_event_loop().time() - last_call >= wait: # type: ignore 41 | await func(*args, **kwargs) 42 | 43 | task = asyncio.create_task(call_func()) 44 | 45 | return debounced 46 | 47 | return decorator 48 | 49 | 50 | def parse_response(input_text: str) -> ParsedResponse: 51 | """ 52 | Parse a response from the chatbot. 53 | """ 54 | 55 | thought = "" 56 | response = input_text 57 | formatted_output = input_text 58 | 59 | # If the response contains a think tag, split the response into the thought process and the actual response 60 | thought_end = input_text.find("") 61 | if input_text.startswith("") and thought_end != -1: 62 | thought = input_text[7:thought_end].lstrip("\n").rstrip("\n").strip() 63 | response = input_text[thought_end + 8 :].lstrip("\n").rstrip("\n") 64 | # transform the think tag into a markdown blockquote (for clarity) 65 | if thought.strip(): 66 | thought = "\n".join([f"> {line}" for line in thought.split("\n")]) 67 | formatted_output = ( 68 | "> ### \n" + thought + "\n> ### \n" + response 69 | ) 70 | 71 | return ParsedResponse( 72 | thought=thought, response=response, formatted_output=formatted_output 73 | ) 74 | 75 | 76 | def get_default_data_dir() -> Path: 77 | """ 78 | Get the user data directory for the current system platform. 79 | 80 | Linux: ~/.local/share/oterm 81 | macOS: ~/Library/Application Support/oterm 82 | Windows: C:/Users//AppData/Roaming/oterm 83 | 84 | :return: User Data Path 85 | :rtype: Path 86 | """ 87 | home = Path.home() 88 | 89 | system_paths = { 90 | "win32": home / "AppData/Roaming/oterm", 91 | "linux": home / ".local/share/oterm", 92 | "darwin": home / "Library/Application Support/oterm", 93 | } 94 | 95 | data_path = system_paths[sys.platform] 96 | return data_path 97 | 98 | 99 | def semantic_version_to_int(version: str) -> int: 100 | """ 101 | Convert a semantic version string to an integer. 102 | 103 | :param version: Semantic version string 104 | :type version: str 105 | :return: Integer representation of semantic version 106 | :rtype: int 107 | """ 108 | major, minor, patch = version.split(".") 109 | major = int(major) << 16 110 | minor = int(minor) << 8 111 | patch = int(patch) 112 | return major + minor + patch 113 | 114 | 115 | def int_to_semantic_version(version: int) -> str: 116 | """ 117 | Convert an integer to a semantic version string. 118 | 119 | :param version: Integer representation of semantic version 120 | :type version: int 121 | :return: Semantic version string 122 | :rtype: str 123 | """ 124 | major = version >> 16 125 | minor = (version >> 8) & 255 126 | patch = version & 255 127 | return f"{major}.{minor}.{patch}" 128 | 129 | 130 | async def is_up_to_date() -> tuple[bool, Version, Version]: 131 | """ 132 | Checks whether oterm is current. 133 | 134 | :return: A tuple containing a boolean indicating whether oterm is current, the running version and the latest version 135 | :rtype: tuple[bool, Version, Version] 136 | """ 137 | 138 | async with httpx.AsyncClient() as client: 139 | running_version = parse(metadata.version("oterm")) 140 | try: 141 | response = await client.get("https://pypi.org/pypi/oterm/json") 142 | data = response.json() 143 | pypi_version = parse(data["info"]["version"]) 144 | except Exception: 145 | # If no network connection, do not raise alarms. 146 | pypi_version = running_version 147 | return running_version >= pypi_version, running_version, pypi_version 148 | 149 | 150 | async def check_ollama() -> bool: 151 | """ 152 | Check if the Ollama server is up and running 153 | """ 154 | from oterm.config import envConfig 155 | 156 | up = False 157 | try: 158 | async with httpx.AsyncClient() as client: 159 | response = await client.get(envConfig.OLLAMA_URL) 160 | up = response.status_code == 200 161 | except httpx.HTTPError: 162 | up = False 163 | finally: 164 | if not up: 165 | from oterm.app.oterm import app 166 | 167 | app.notify( 168 | f"The Ollama server is not reachable at {envConfig.OLLAMA_URL}, please check your connection or set the OLLAMA_URL environment variable. oterm will now quit.", 169 | severity="error", 170 | timeout=10, 171 | ) 172 | 173 | async def quit(): 174 | await asyncio.sleep(10.0) 175 | try: 176 | from oterm.tools.mcp.setup import teardown_mcp_servers 177 | 178 | await teardown_mcp_servers() 179 | exit() 180 | 181 | except Exception: 182 | pass 183 | 184 | asyncio.create_task(quit()) 185 | return up 186 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from collections.abc import AsyncGenerator 2 | from io import BytesIO 3 | from pathlib import Path 4 | 5 | import ollama 6 | import pytest 7 | import pytest_asyncio 8 | from mcp import StdioServerParameters 9 | from PIL import Image 10 | 11 | from oterm.tools.mcp.client import MCPClient 12 | 13 | DEFAULT_MODEL = "llama3.2" 14 | 15 | 16 | @pytest_asyncio.fixture(autouse=True) 17 | async def load_test_models(): 18 | try: 19 | ollama.show(DEFAULT_MODEL) 20 | except ollama.ResponseError: 21 | ollama.pull(DEFAULT_MODEL) 22 | yield 23 | 24 | 25 | @pytest.fixture(scope="session") 26 | def default_model() -> str: 27 | return DEFAULT_MODEL 28 | 29 | 30 | @pytest.fixture(scope="session") 31 | def llama_image() -> bytes: 32 | buffered = BytesIO() 33 | image = Image.open("tests/data/lama.jpg") 34 | image.save(buffered, format="JPEG") 35 | return buffered.getvalue() 36 | 37 | 38 | @pytest.fixture(scope="session") 39 | def mcp_server_config() -> dict: 40 | mcp_server_executable = Path(__file__).parent / "tools" / "mcp_servers.py" 41 | return { 42 | "stdio": { 43 | "command": "mcp", 44 | "args": ["run", mcp_server_executable.absolute().as_posix()], 45 | }, 46 | "sse": { 47 | "url": "http://localhost:8000/sse", 48 | }, 49 | "ws": { 50 | "url": "ws://localhost:8000/ws", 51 | }, 52 | } 53 | 54 | 55 | @pytest_asyncio.fixture(scope="function") 56 | async def mcp_client(mcp_server_config) -> AsyncGenerator[MCPClient, None]: 57 | client = MCPClient( 58 | "test_server", 59 | StdioServerParameters.model_validate(mcp_server_config["stdio"]), 60 | ) 61 | await client.initialize() 62 | 63 | yield client 64 | await client.teardown() 65 | -------------------------------------------------------------------------------- /tests/data/lama.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/tests/data/lama.jpg -------------------------------------------------------------------------------- /tests/test_llm_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from ollama import ResponseError 3 | 4 | from oterm.ollamaclient import OllamaLLM 5 | from oterm.tools.date_time import DateTimeTool 6 | 7 | 8 | @pytest.mark.asyncio 9 | async def test_generate(default_model): 10 | llm = OllamaLLM(model=default_model) 11 | res = await llm.completion(prompt="Please add 42 and 42") 12 | assert "84" in res 13 | 14 | 15 | @pytest.mark.asyncio 16 | async def test_llm_context(default_model): 17 | llm = OllamaLLM(model=default_model) 18 | await llm.completion("I am testing oterm, a python client for Ollama.") 19 | # There should now be a context saved for the conversation. 20 | res = await llm.completion("Do you remember what I am testing?") 21 | assert "oterm" in res.lower() 22 | 23 | 24 | @pytest.mark.asyncio 25 | async def test_multi_modal_llm(llama_image): 26 | llm = OllamaLLM(model="llava") 27 | res = await llm.completion("Describe this image", images=[llama_image]) 28 | assert "llama" in res or "animal" in res 29 | 30 | 31 | @pytest.mark.asyncio 32 | async def test_errors(): 33 | llm = OllamaLLM(model="non-existent-model") 34 | try: 35 | await llm.completion("This should fail.") 36 | except ResponseError as e: 37 | assert 'model "non-existent-model" not found' in str(e) 38 | 39 | 40 | @pytest.mark.asyncio 41 | async def test_iterator(default_model): 42 | llm = OllamaLLM(model=default_model) 43 | response = "" 44 | async for text in llm.stream("Please add 2 and 2"): 45 | response = text 46 | assert "4" in response 47 | 48 | 49 | @pytest.mark.asyncio 50 | async def test_tool_streaming(default_model): 51 | llm = OllamaLLM( 52 | model=default_model, 53 | tool_defs=[ 54 | {"tool": DateTimeTool, "callable": lambda: "2025-01-01"}, 55 | ], 56 | ) 57 | response = "" 58 | async for text in llm.stream( 59 | "What is the current date in YYYY-MM-DD format?. Use the date_time tool to answer." 60 | ): 61 | response = text 62 | assert "2025-01-01" in response 63 | -------------------------------------------------------------------------------- /tests/test_ollama_api.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from ollama import ResponseError 3 | 4 | from oterm.ollamaclient import OllamaLLM, jsonify_options, parse_ollama_parameters 5 | 6 | 7 | def test_list(): 8 | llm = OllamaLLM() 9 | response = llm.list() 10 | models = response.get("models", []) 11 | assert [model for model in models if model.model == "llama3.2:latest"] 12 | 13 | 14 | def test_show(): 15 | llm = OllamaLLM() 16 | response = llm.show("llama3.2") 17 | assert response 18 | assert response.modelfile 19 | assert response.parameters 20 | assert response.template 21 | assert response.details 22 | assert response.modelinfo 23 | 24 | params = parse_ollama_parameters(response.parameters) 25 | assert params.stop == ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"] 26 | assert params.temperature is None 27 | json = jsonify_options(params) 28 | assert json == ( 29 | "{\n" 30 | ' "stop": [\n' 31 | ' "<|start_header_id|>",\n' 32 | ' "<|end_header_id|>",\n' 33 | ' "<|eot_id|>"\n' 34 | " ]\n" 35 | "}" 36 | ) 37 | 38 | 39 | def test_pull(): 40 | llm = OllamaLLM() 41 | stream = llm.pull("llama3.2:latest") 42 | entries = [entry.status for entry in stream] 43 | assert "pulling manifest" in entries 44 | assert "success" in entries 45 | 46 | with pytest.raises(ResponseError) as excinfo: 47 | stream = llm.pull("non-existing:latest") 48 | entries = [entry for entry in stream] 49 | assert excinfo.value == "pull model manifest: file does not exist" 50 | assert "success" not in entries 51 | -------------------------------------------------------------------------------- /tests/test_store.py: -------------------------------------------------------------------------------- 1 | from oterm.utils import int_to_semantic_version, semantic_version_to_int 2 | 3 | 4 | def test_sqlite_user_version(): 5 | version = "0.1.5" 6 | assert semantic_version_to_int(version) == 261 7 | assert int_to_semantic_version(261) == version 8 | 9 | version = "0.0.0" 10 | assert semantic_version_to_int(version) == 0 11 | assert int_to_semantic_version(0) == version 12 | 13 | version = "255.255.255" 14 | assert semantic_version_to_int(version) == 16777215 15 | assert int_to_semantic_version(16777215) == version 16 | -------------------------------------------------------------------------------- /tests/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggozad/oterm/5933088b9eec8ecdc147b7ba9582a45548b4aebe/tests/tools/__init__.py -------------------------------------------------------------------------------- /tests/tools/mcp_servers.py: -------------------------------------------------------------------------------- 1 | from mcp import SamplingMessage 2 | from mcp.server.fastmcp import Context, FastMCP 3 | from mcp.server.fastmcp.prompts.base import AssistantMessage, Message, UserMessage 4 | from mcp.types import ModelHint, ModelPreferences, TextContent 5 | 6 | mcp = FastMCP("TestServer", port=8080) 7 | 8 | 9 | @mcp.resource("config://app") 10 | def get_config() -> str: 11 | return "Oracle MCP server" 12 | 13 | 14 | @mcp.tool(name="oracle", description="Ask the oracle a question.") 15 | async def oracle(query: str, ctx: Context) -> str: 16 | return "Oracle says: oterm" 17 | 18 | 19 | @mcp.tool(name="puzzle_solver", description="Solves a puzzle by asking an advanced AI.") 20 | async def puzzle_solver(puzzle_description: str, ctx: Context) -> str: 21 | """ 22 | This tool is included to make a sampling request to the server. 23 | It takes a puzzle description and returns the answer. 24 | """ 25 | session = ctx.session 26 | sampling_response = await session.create_message( 27 | messages=[ 28 | SamplingMessage( 29 | role="user", 30 | content=TextContent( 31 | text=f"Please solve this puzzle: {puzzle_description}", type="text" 32 | ), 33 | ) 34 | ], 35 | model_preferences=ModelPreferences( 36 | hints=[ModelHint(name="mistral")], 37 | ), 38 | max_tokens=100, 39 | ) 40 | return sampling_response.content.text # type: ignore 41 | 42 | 43 | @mcp.prompt(name="oracle_prompt", description="Prompt to ask the oracle a question.") 44 | async def oracle_prompt(question: str) -> str: 45 | return f"Oracle: {question}" 46 | 47 | 48 | @mcp.prompt(name="debug_error", description="Prompt to debug an error.") 49 | async def debug_error(error: str, language: str = "python") -> list[Message]: 50 | return [ 51 | UserMessage(f"I'm seeing this {language} error: {error}"), 52 | AssistantMessage("I'll help debug that. What have you tried so far?"), 53 | ] 54 | 55 | 56 | if __name__ == "__main__": 57 | mcp.run(transport="stdio") 58 | -------------------------------------------------------------------------------- /tests/tools/test_custom_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from oterm.tools.date_time import DateTimeTool, date_time 4 | from oterm.tools.external import load_external_tools 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_loading_custom_tool(): 9 | # Test loading a callable from a well-defined module 10 | tools = load_external_tools( 11 | [ 12 | { 13 | "tool": "oterm.tools.date_time:DateTimeTool", 14 | "callable": "oterm.tools.date_time:date_time", 15 | } 16 | ] 17 | ) 18 | 19 | assert len(tools) == 1 20 | assert tools[0]["tool"] == DateTimeTool 21 | assert tools[0]["callable"] == date_time 22 | -------------------------------------------------------------------------------- /tests/tools/test_date_time_tool.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import pytest 4 | 5 | from oterm.ollamaclient import OllamaLLM 6 | from oterm.tools.date_time import DateTimeTool, date_time 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_date_time(default_model): 11 | llm = OllamaLLM( 12 | model=default_model, tool_defs=[{"tool": DateTimeTool, "callable": date_time}] 13 | ) 14 | res = await llm.completion( 15 | "What is the time in 24h format? Use the date_time tool to answer this question." 16 | ) 17 | time = datetime.time(datetime.now()) 18 | assert f"{time.hour:02}:{time.minute:02}" in res 19 | -------------------------------------------------------------------------------- /tests/tools/test_mcp_prompts.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from mcp.types import Prompt, PromptMessage, TextContent 3 | from ollama import Message 4 | 5 | from oterm.tools.mcp.prompts import MCPPromptCallable, mcp_prompt_to_ollama_messages 6 | 7 | 8 | @pytest.mark.asyncio 9 | async def test_mcp_simple_string_prompt(mcp_client): 10 | await mcp_client.initialize() 11 | prompts = await mcp_client.get_available_prompts() 12 | for prompt in prompts: 13 | assert Prompt.model_validate(prompt) 14 | 15 | oracle_prompt = [p for p in prompts if p.name == "oracle_prompt"][0] 16 | assert oracle_prompt.name == "oracle_prompt" 17 | assert oracle_prompt.description == "Prompt to ask the oracle a question." 18 | args = oracle_prompt.arguments or [] 19 | assert len(args) == 1 20 | arg = args[0] 21 | assert arg.name == "question" 22 | assert arg.required 23 | 24 | mcpPromptCallable = MCPPromptCallable(oracle_prompt.name, "test_server", mcp_client) 25 | res = await mcpPromptCallable.call(question="What is the best client for Ollama?") 26 | 27 | assert res == [ 28 | PromptMessage( 29 | role="user", 30 | content=TextContent( 31 | type="text", 32 | text="Oracle: What is the best client for Ollama?", 33 | annotations=None, 34 | ), 35 | ), 36 | ] 37 | 38 | assert mcp_prompt_to_ollama_messages(res) == [ 39 | Message(role="user", content="Oracle: What is the best client for Ollama?") 40 | ] 41 | 42 | 43 | @pytest.mark.asyncio 44 | async def test_mcp_multiple_messages_prompt(mcp_client): 45 | prompts = await mcp_client.get_available_prompts() 46 | for prompt in prompts: 47 | assert Prompt.model_validate(prompt) 48 | 49 | debug_prompt = [p for p in prompts if p.name == "debug_error"][0] 50 | assert debug_prompt.name == "debug_error" 51 | assert debug_prompt.description == "Prompt to debug an error." 52 | args = debug_prompt.arguments or [] 53 | assert len(args) == 2 54 | arg = args[0] 55 | assert arg.name == "error" 56 | assert arg.required 57 | 58 | arg = args[1] 59 | assert arg.name == "language" 60 | assert arg.required == False # noqa 61 | 62 | mcpPromptCallable = MCPPromptCallable(debug_prompt.name, "test_server", mcp_client) 63 | res = await mcpPromptCallable.call(error="Assertion error") 64 | 65 | assert res == [ 66 | PromptMessage( 67 | role="user", 68 | content=TextContent( 69 | type="text", 70 | text="I'm seeing this python error: Assertion error", 71 | annotations=None, 72 | ), 73 | ), 74 | PromptMessage( 75 | role="assistant", 76 | content=TextContent( 77 | type="text", 78 | text="I'll help debug that. What have you tried so far?", 79 | annotations=None, 80 | ), 81 | ), 82 | ] 83 | 84 | assert mcp_prompt_to_ollama_messages(res) == [ 85 | Message(role="user", content="I'm seeing this python error: Assertion error"), 86 | Message( 87 | role="assistant", 88 | content="I'll help debug that. What have you tried so far?", 89 | ), 90 | ] 91 | -------------------------------------------------------------------------------- /tests/tools/test_mcp_sampling.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from oterm.ollamaclient import OllamaLLM 4 | from oterm.tools.mcp.client import MCPClient 5 | from oterm.tools.mcp.tools import MCPToolCallable 6 | from oterm.types import Tool 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_mcp_sampling(mcp_client: MCPClient, default_model): 11 | """ 12 | Test the sampling capbilities of oterm. 13 | Here we go full circle and use the MCP client to call the server 14 | to call the client again with a sampling request. 15 | """ 16 | 17 | await mcp_client.initialize() 18 | 19 | tools = await mcp_client.get_available_tools() 20 | puzzle_solver = tools[1] 21 | oterm_tool = Tool( 22 | function=Tool.Function( 23 | name=puzzle_solver.name, 24 | description=puzzle_solver.description, 25 | parameters=Tool.Function.Parameters.model_validate( 26 | puzzle_solver.inputSchema 27 | ), 28 | ), 29 | ) 30 | mcpToolCallable = MCPToolCallable(puzzle_solver.name, "test_server", mcp_client) 31 | llm = OllamaLLM( 32 | model=default_model, 33 | tool_defs=[{"tool": oterm_tool, "callable": mcpToolCallable.call}], 34 | ) 35 | 36 | res = await llm.completion( 37 | """ 38 | Solve the following puzzle by calling the puzzle solver tool. 39 | Jack is looking at Anne. Anne is looking at George. 40 | Jack is married, George is not, and we don't know if Anne is married. 41 | Is a married person looking at an unmarried person? 42 | Just answer yes or no.""" 43 | ) 44 | assert "no" in res.lower() or "yes" in res.lower() 45 | -------------------------------------------------------------------------------- /tests/tools/test_mcp_tools.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from mcp.types import Tool as MCPTool 3 | 4 | from oterm.ollamaclient import OllamaLLM 5 | from oterm.tools.mcp.client import MCPClient 6 | from oterm.tools.mcp.tools import MCPToolCallable 7 | from oterm.types import Tool 8 | 9 | 10 | @pytest.mark.asyncio 11 | async def test_mcp_tools(mcp_client: MCPClient, default_model): 12 | tools = await mcp_client.get_available_tools() 13 | for oracle in tools: 14 | assert MCPTool.model_validate(oracle) 15 | 16 | oracle = tools[0] 17 | oterm_tool = Tool( 18 | function=Tool.Function( 19 | name=oracle.name, 20 | description=oracle.description, 21 | parameters=Tool.Function.Parameters.model_validate(oracle.inputSchema), 22 | ), 23 | ) 24 | 25 | mcpToolCallable = MCPToolCallable(oracle.name, "test_server", mcp_client) 26 | llm = OllamaLLM( 27 | model=default_model, 28 | tool_defs=[{"tool": oterm_tool, "callable": mcpToolCallable.call}], 29 | ) 30 | 31 | res = await llm.completion("Ask the oracle what is the best client for Ollama.") 32 | assert ( 33 | "oterm" in res or "orterm" in res 34 | ) # wtf is with orterm being the best client? 35 | -------------------------------------------------------------------------------- /tests/tools/test_mcp_transports.py: -------------------------------------------------------------------------------- 1 | from fastmcp.client.transports import SSETransport, StdioTransport, WSTransport 2 | 3 | from oterm.tools.mcp.client import MCPClient 4 | 5 | 6 | def test_stdio_transport(mcp_server_config): 7 | """ 8 | Test the MCP client with a StdioServerParameters. 9 | """ 10 | 11 | client = MCPClient("test_stdio", mcp_server_config["stdio"]) 12 | assert isinstance(client.transport, StdioTransport) 13 | 14 | 15 | def test_sse_transport(mcp_server_config): 16 | client = MCPClient("test_sse", mcp_server_config["sse"]) 17 | assert isinstance(client.transport, SSETransport) 18 | 19 | 20 | def test_ws_transport(mcp_server_config): 21 | client = MCPClient("test_ws", mcp_server_config["ws"]) 22 | assert isinstance(client.transport, WSTransport) 23 | -------------------------------------------------------------------------------- /tests/tools/test_shell_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from oterm.ollamaclient import OllamaLLM 4 | from oterm.tools.shell import ShellTool, shell_command 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_shell(default_model): 9 | llm = OllamaLLM( 10 | model=default_model, 11 | tool_defs=[ 12 | {"tool": ShellTool, "callable": shell_command}, 13 | ], 14 | ) 15 | res = await llm.completion( 16 | "What is the current directory? Use the shell tool available and execute the command." 17 | ) 18 | assert "oterm" in res 19 | -------------------------------------------------------------------------------- /tests/tools/test_think_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from oterm.ollamaclient import OllamaLLM 4 | from oterm.tools.think import ThinkTool, think 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_think(default_model): 9 | llm = OllamaLLM( 10 | model=default_model, tool_defs=[{"tool": ThinkTool, "callable": think}] 11 | ) 12 | res = await llm.completion( 13 | """ 14 | Cannibals ambush a safari in the jungle and capture three men. The cannibals give the men a single chance to escape uneaten. 15 | The captives are lined up in order of height, and are tied to stakes. The man in the rear can see the backs of his two friends, the man in the middle can see the back of the man in front, and the man in front cannot see anyone. The cannibals show the men five hats. Three of the hats are black and two of the hats are white. 16 | Blindfolds are then placed over each man's eyes and a hat is placed on each man's head. The two hats left over are hidden. The blindfolds are then removed and it is said to the men that if one of them can guess what color hat he is wearing they can all leave unharmed. 17 | The man in the rear who can see both of his friends' hats but not his own says, "I don't know". The middle man who can see the hat of the man in front, but not his own says, "I don't know". The front man who cannot see ANYBODY'S hat says "I know!" 18 | What was the color of his hat? Reply just with the color of the hat. 19 | """ 20 | ) 21 | assert res.lower() == "black" or res.lower() == "white" 22 | --------------------------------------------------------------------------------