├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .golangci.yaml ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── README.md ├── cmd └── server │ ├── config.go │ └── main.go ├── codecov.yml ├── config.example.yaml ├── embed.go ├── go.mod ├── go.sum ├── internal ├── handlers │ ├── chat.go │ ├── home.go │ ├── main.go │ └── main_test.go ├── models │ └── chat.go └── services │ ├── anthropic.go │ ├── bolt.go │ ├── helper.go │ ├── ollama.go │ ├── openai.go │ ├── openrouter.go │ └── parameter.go ├── static ├── css │ └── styles.css └── js │ └── home.js └── templates ├── layout └── base.html ├── pages └── home.html └── partials ├── ai_message.html ├── chat_title.html ├── chatbox.html ├── list_prompts.html ├── list_resources.html ├── list_servers.html ├── list_tools.html ├── user_message.html └── welcome.html /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | test: 11 | name: Test 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | 16 | - name: Set up Go 17 | uses: actions/setup-go@v4 18 | with: 19 | go-version: '1.23' 20 | cache: true 21 | 22 | - name: Install dependencies 23 | run: go mod download 24 | 25 | - name: Run tests with coverage 26 | run: go test -coverprofile=coverage.txt -covermode=atomic ./... -p 1 -race -v 27 | 28 | - name: Upload coverage to Codecov 29 | uses: codecov/codecov-action@v5 30 | with: 31 | token: ${{ secrets.CODECOV_TOKEN }} 32 | slug: MegaGrindStone/mcp-web-ui 33 | 34 | - name: Run golangci-lint 35 | uses: golangci/golangci-lint-action@v3 36 | with: 37 | version: latest 38 | 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config.yaml 2 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | # Timeout for analysis, e.g. 30s, 5m. 3 | # Default: 1m 4 | timeout: 3m 5 | 6 | 7 | # This file contains only configs which differ from defaults. 8 | # All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml 9 | linters-settings: 10 | cyclop: 11 | # The maximal code complexity to report. 12 | # Default: 10 13 | max-complexity: 30 14 | # The maximal average package complexity. 15 | # If it's higher than 0.0 (float) the check is enabled 16 | # Default: 0.0 17 | package-average: 15.0 18 | 19 | errcheck: 20 | # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. 21 | # Such cases aren't reported by default. 22 | # Default: false 23 | check-type-assertions: true 24 | 25 | exhaustive: 26 | # Program elements to check for exhaustiveness. 27 | # Default: [ switch ] 28 | check: 29 | - switch 30 | - map 31 | 32 | gocognit: 33 | # Minimal code complexity to report. 34 | # Default: 30 (but we recommend 10-20) 35 | min-complexity: 50 36 | 37 | funlen: 38 | # Checks the number of lines in a function. 39 | # If lower than 0, disable the check. 40 | # Default: 60 41 | lines: 175 42 | # Checks the number of statements in a function. 43 | # If lower than 0, disable the check. 44 | # Default: 40 45 | statements: 100 46 | 47 | nestif: 48 | # Minimal complexity of if statements to report. 49 | # Default: 5 50 | min-complexity: 8 51 | 52 | dupl: 53 | # Tokens count to trigger issue. 54 | # Default: 150 55 | threshold: 200 56 | 57 | gocritic: 58 | # Settings passed to gocritic. 59 | # The settings key is the name of a supported gocritic checker. 60 | # The list of supported checkers can be find in https://go-critic.github.io/overview. 61 | settings: 62 | captLocal: 63 | # Whether to restrict checker to params only. 64 | # Default: true 65 | paramsOnly: false 66 | underef: 67 | # Whether to skip (*x).method() calls where x is a pointer receiver. 68 | # Default: true 69 | skipRecvDeref: false 70 | 71 | gomodguard: 72 | blocked: 73 | # List of blocked modules. 74 | # Default: [] 75 | modules: 76 | - github.com/golang/protobuf: 77 | recommendations: 78 | - google.golang.org/protobuf 79 | reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" 80 | - github.com/satori/go.uuid: 81 | recommendations: 82 | - github.com/google/uuid 83 | reason: "satori's package is not maintained" 84 | - github.com/gofrs/uuid: 85 | recommendations: 86 | - github.com/google/uuid 87 | reason: "gofrs' package is not go module" 88 | 89 | govet: 90 | # Enable all analyzers. 91 | # Default: false 92 | enable-all: true 93 | # Disable analyzers by name. 94 | # Run `go tool vet help` to see all analyzers. 95 | # Default: [] 96 | disable: 97 | - fieldalignment # too strict 98 | # Settings per analyzer. 99 | settings: 100 | shadow: 101 | # Whether to be strict about shadowing; can be noisy. 102 | # Default: false 103 | strict: true 104 | 105 | gosec: 106 | excludes: 107 | - G204 108 | 109 | nakedret: 110 | # Make an issue if func has more lines of code than this setting, and it has naked returns. 111 | # Default: 30 112 | max-func-lines: 0 113 | 114 | nolintlint: 115 | # Exclude following linters from requiring an explanation. 116 | # Default: [] 117 | allow-no-explanation: [ funlen, gocognit, lll ] 118 | # Enable to require an explanation of nonzero length after each nolint directive. 119 | # Default: false 120 | require-explanation: true 121 | # Enable to require nolint directives to mention the specific linter being suppressed. 122 | # Default: false 123 | require-specific: true 124 | 125 | rowserrcheck: 126 | # database/sql is always checked 127 | # Default: [] 128 | packages: 129 | - github.com/jmoiron/sqlx 130 | 131 | tenv: 132 | # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. 133 | # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. 134 | # Default: false 135 | all: true 136 | 137 | 138 | linters: 139 | disable-all: true 140 | enable: 141 | ## enabled by default 142 | - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases 143 | - gosimple # specializes in simplifying a code 144 | - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string 145 | - ineffassign # detects when assignments to existing variables are not used 146 | - staticcheck # is a go vet on steroids, applying a ton of static analysis checks 147 | - typecheck # like the front-end of a Go compiler, parses and type-checks Go code 148 | - unused # checks for unused constants, variables, functions and types 149 | ## disabled by default 150 | - asasalint # checks for pass []any as any in variadic func(...any) 151 | - asciicheck # checks that your code does not contain non-ASCII identifiers 152 | - bidichk # checks for dangerous unicode character sequences 153 | - bodyclose # checks whether HTTP response body is closed successfully 154 | - cyclop # checks function and package cyclomatic complexity 155 | - dupl # tool for code clone detection 156 | - durationcheck # checks for two durations multiplied together 157 | - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error 158 | - errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13 159 | - exhaustive # checks exhaustiveness of enum switch statements 160 | - funlen # tool for detection of long functions 161 | - gochecknoinits # checks that no init functions are present in Go code 162 | - gocognit # computes and checks the cognitive complexity of functions 163 | - goconst # finds repeated strings that could be replaced by a constant 164 | - gocritic # provides diagnostics that check for bugs, performance and style issues 165 | - gocyclo # computes and checks the cyclomatic complexity of functions 166 | - godot # checks if comments end in a period 167 | - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt 168 | - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod 169 | - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations 170 | - goprintffuncname # checks that printf-like functions are named with f at the end 171 | - gosec # inspects source code for security problems 172 | - lll # reports long lines 173 | - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap) 174 | - makezero # finds slice declarations with non-zero initial length 175 | - nakedret # finds naked returns in functions greater than a specified function length 176 | - nestif # reports deeply nested if statements 177 | - nilerr # finds the code that returns nil even if it checks that the error is not nil 178 | - nilnil # checks that there is no simultaneous return of nil error and an invalid value 179 | - noctx # finds sending http request without context.Context 180 | - nolintlint # reports ill-formed or insufficient nolint directives 181 | - nonamedreturns # reports all named returns 182 | - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL 183 | - predeclared # finds code that shadows one of Go's predeclared identifiers 184 | - promlinter # checks Prometheus metrics naming via promlint 185 | - reassign # checks that package variables are not reassigned 186 | - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint 187 | - stylecheck # is a replacement for golint 188 | - tenv # detects using os.Setenv instead of t.Setenv since Go1.17 189 | - testableexamples # checks if examples are testable (have an expected output) 190 | - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes 191 | - unconvert # removes unnecessary type conversions 192 | - unparam # reports unused function parameters 193 | - usestdlibvars # detects the possibility to use variables/constants from the Go standard library 194 | - whitespace # detects leading and trailing whitespace 195 | 196 | issues: 197 | # Maximum count of issues with the same text. 198 | # Set to 0 to disable. 199 | # Default: 3 200 | max-same-issues: 50 201 | include: 202 | - EXC0012 # EXC0012 revive: Annoying issue about not having a comment. The rare codebase has such comments 203 | - EXC0014 # EXC0014 revive: Annoying issue about not having a comment. The rare codebase has such comments 204 | 205 | exclude-rules: 206 | - source: "^//\\s*go:generate\\s" 207 | linters: [ lll ] 208 | - source: "(noinspection|TODO)" 209 | linters: [ godot ] 210 | - source: "//noinspection" 211 | linters: [ gocritic ] 212 | - source: "^\\s+if _, ok := err\\.\\([^.]+\\.InternalError\\); ok {" 213 | linters: [ errorlint ] 214 | - text: 'shadow: declaration of "(err|ctx)" shadows declaration at' 215 | linters: [ govet ] 216 | - path: "_test\\.go" 217 | linters: 218 | - bodyclose 219 | - dupl 220 | - funlen 221 | - goconst 222 | - gosec 223 | - noctx 224 | - wrapcheck 225 | 226 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ### Changed 11 | 12 | - The user message now aligns to the left. 13 | 14 | ## [0.2.0] - 2025-04-17 15 | 16 | This update introduces comprehensive Model Control Protocol (MCP) functionality for enhanced LLM interaction management, including prompts and resource handling systems, while also adding conversation title refresh capabilities and fixing Anthropic API compatibility issues. 17 | 18 | ### Added 19 | 20 | - Add chat title refresh functionality allowing automated regeneration of conversation titles using the LLM 21 | - Implement Model Control Protocol (MCP) Prompts system for standardized LLM interactions and enhanced prompt management 22 | - Add testing interface for Model Control Protocol client components to improve development reliability 23 | - Implement resource handling system for Model Control Protocol to manage and integrate external content 24 | 25 | ### Fixed 26 | 27 | - Resolve Anthropic API compatibility issue by automatically trimming whitespace from stop sequence parameters 28 | 29 | ## [0.1.1] - 2025-04-05 30 | 31 | This update introduces configuration flexibility for OpenAI API endpoints, improves system logging capabilities, and fixes a navigation issue with non-existent chat conversations, collectively enhancing both the system's stability and customization options. 32 | 33 | ### Added 34 | 35 | - Add custom endpoint configuration option for OpenAI provider, allowing connection to alternative API servers 36 | 37 | ### Changed 38 | 39 | - Enhance Model Control Protocol (MCP) client with dedicated logger for improved diagnostics and troubleshooting 40 | 41 | ### Fixed 42 | 43 | - Resolve error handling when navigating to non-existent chat conversations in the UI 44 | 45 | ## [0.1.0] - 2025-03-03 46 | 47 | This release introduces a complete web-based chat interface for LLMs with support for multiple providers (Ollama, Anthropic, OpenAI, OpenRouter), persistent conversation storage, and extensive customization options. The addition of containerized deployment and structured logging improves the system's operability, while the ability to use external tools with Anthropic models extends the functional capabilities. 48 | 49 | ### Added 50 | 51 | - Add web-based user interface for chatting with Large Language Models (LLMs) 52 | - Integrate multiple LLM providers: Ollama, Anthropic, OpenAI and OpenRouter 53 | - Implement Bolt database for persistent storage of chat history and messages 54 | - Add configuration file system for managing LLM provider settings 55 | - Add customizable LLM parameters for fine-tuning model behavior 56 | - Implement dedicated LLM instance for generating conversation titles 57 | - Add ability to customize system prompts and conversation title generation 58 | - Enable tools interaction capability for Anthropic models 59 | - Display system objects (servers, tools, resources, prompts) in the user interface 60 | - Add structured logging for improved monitoring and troubleshooting 61 | - Include Dockerfile for containerized deployment 62 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23-alpine AS build_base 2 | 3 | RUN apk add --no-cache git 4 | 5 | # Set the Current Working Directory inside the container 6 | WORKDIR /tmp/app 7 | 8 | # We want to populate the module cache based on the go.{mod,sum} files. 9 | COPY go.mod . 10 | COPY go.sum . 11 | 12 | RUN go mod download 13 | 14 | COPY . . 15 | 16 | # Build the Go app 17 | RUN go build ./cmd/server/ 18 | 19 | # Start fresh from a smaller image 20 | FROM alpine:latest 21 | COPY --from=build_base /tmp/app/server /server 22 | 23 | # Prepare the config directory 24 | ENV HOME /root 25 | RUN mkdir $HOME/.config 26 | RUN mkdir $HOME/.config/mcpwebui 27 | 28 | # Run the binary program produced by `go install` 29 | CMD ["./server"] 30 | 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Gerard Adam 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Web UI 2 | 3 | ![CI](https://github.com/MegaGrindStone/mcp-web-ui/actions/workflows/ci.yml/badge.svg) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/MegaGrindStone/mcp-web-ui)](https://goreportcard.com/report/github.com/MegaGrindStone/mcp-web-ui) 5 | [![codecov](https://codecov.io/gh/MegaGrindStone/mcp-web-ui/branch/main/graph/badge.svg)](https://codecov.io/gh/MegaGrindStone/mcp-web-ui) 6 | 7 | MCP Web UI is a web-based user interface that serves as a Host within the Model Context Protocol (MCP) architecture. It provides a powerful and user-friendly interface for interacting with Large Language Models (LLMs) while managing context aggregation and coordination between clients and servers. 8 | 9 | ## 🌟 Overview 10 | 11 | MCP Web UI is designed to simplify and enhance interactions with AI language models by providing: 12 | - A unified interface for multiple LLM providers 13 | - Real-time, streaming chat experiences 14 | - Flexible configuration and model management 15 | - Robust context handling using the MCP protocol 16 | 17 | ### Demo Video 18 | 19 | [![YouTube](http://i.ytimg.com/vi/DnC-z0CpRpM/hqdefault.jpg)](https://www.youtube.com/watch?v=DnC-z0CpRpM) 20 | 21 | ## 🚀 Features 22 | 23 | - 🤖 **Multi-Provider LLM Integration**: 24 | - Anthropic (Claude models) 25 | - OpenAI (GPT models) 26 | - Ollama (local models) 27 | - OpenRouter (multiple providers) 28 | - 💬 **Intuitive Chat Interface** 29 | - 🔄 **Real-time Response Streaming** via Server-Sent Events (SSE) 30 | - 🔧 **Dynamic Configuration Management** 31 | - 📊 **Advanced Context Aggregation** 32 | - 💾 **Persistent Chat History** using BoltDB 33 | - 🎯 **Flexible Model Selection** 34 | 35 | ## 📋 Prerequisites 36 | 37 | - Go 1.23+ 38 | - Docker (optional) 39 | - API keys for desired LLM providers 40 | 41 | ## 🛠 Installation 42 | 43 | ### Quick Start 44 | 45 | 1. Clone the repository: 46 | ```bash 47 | git clone https://github.com/MegaGrindStone/mcp-web-ui.git 48 | cd mcp-web-ui 49 | ``` 50 | 51 | 2. Configure your environment: 52 | ```bash 53 | mkdir -p $HOME/.config/mcpwebui 54 | cp config.example.yaml $HOME/.config/mcpwebui/config.yaml 55 | ``` 56 | 57 | 3. Set up API keys: 58 | ```bash 59 | export ANTHROPIC_API_KEY=your_anthropic_key 60 | export OPENAI_API_KEY=your_openai_key 61 | export OPENROUTER_API_KEY=your_openrouter_key 62 | ``` 63 | 64 | ### Running the Application 65 | 66 | #### Local Development 67 | ```bash 68 | go mod download 69 | go run ./cmd/server/main.go 70 | ``` 71 | 72 | #### Docker Deployment 73 | ```bash 74 | docker build -t mcp-web-ui . 75 | docker run -p 8080:8080 \ 76 | -v $HOME/.config/mcpwebui/config.yaml:/app/config.yaml \ 77 | -e ANTHROPIC_API_KEY \ 78 | -e OPENAI_API_KEY \ 79 | -e OPENROUTER_API_KEY \ 80 | mcp-web-ui 81 | ``` 82 | 83 | ## 🔧 Configuration 84 | 85 | The configuration file (`config.yaml`) provides comprehensive settings for customizing the MCP Web UI. Here's a detailed breakdown: 86 | 87 | ### Server Configuration 88 | - `port`: The port on which the server will run (default: 8080) 89 | - `logLevel`: Logging verbosity (options: debug, info, warn, error; default: info) 90 | - `logMode`: Log output format (options: json, text; default: text) 91 | 92 | ### Prompt Configuration 93 | - `systemPrompt`: Default system prompt for the AI assistant 94 | - `titleGeneratorPrompt`: Prompt used to generate chat titles 95 | 96 | ### LLM (Language Model) Configuration 97 | The `llm` section supports multiple providers with provider-specific configurations: 98 | 99 | #### Common LLM Parameters 100 | - `provider`: Choose from: ollama, anthropic, openai, openrouter 101 | - `model`: Specific model name (e.g., 'claude-3-5-sonnet-20241022') 102 | - `parameters`: Fine-tune model behavior: 103 | - `temperature`: Randomness of responses (0.0-1.0) 104 | - `topP`: Nucleus sampling threshold 105 | - `topK`: Number of highest probability tokens to keep 106 | - `frequencyPenalty`: Reduce repetition of token sequences 107 | - `presencePenalty`: Encourage discussing new topics 108 | - `maxTokens`: Maximum response length 109 | - `stop`: Sequences to stop generation 110 | - And more provider-specific parameters 111 | 112 | #### Provider-Specific Configurations 113 | - **Ollama**: 114 | - `host`: Ollama server URL (default: http://localhost:11434) 115 | 116 | - **Anthropic**: 117 | - `apiKey`: Anthropic API key (can use ANTHROPIC_API_KEY env variable) 118 | - `maxTokens`: Maximum token limit 119 | - Note: Stop sequences containing only whitespace are ignored, and whitespace is trimmed from valid sequences as Anthropic doesn't support whitespace in stop sequences 120 | 121 | - **OpenAI**: 122 | - `apiKey`: OpenAI API key (can use OPENAI_API_KEY env variable) 123 | - `endpoint`: OpenAI API endpoint (default: https://api.openai.com/v1) 124 | - For using alternative OpenAI-compatible APIs, see [this discussion thread](https://github.com/MegaGrindStone/mcp-web-ui/discussions/7) 125 | 126 | - **OpenRouter**: 127 | - `apiKey`: OpenRouter API key (can use OPENROUTER_API_KEY env variable) 128 | 129 | ### Title Generator Configuration 130 | The `genTitleLLM` section allows separate configuration for title generation, defaulting to the main LLM if not specified. 131 | 132 | ### MCP Server Configurations 133 | - `mcpSSEServers`: Configure Server-Sent Events (SSE) servers 134 | - `url`: SSE server URL 135 | - `maxPayloadSize`: Maximum payload size 136 | 137 | - `mcpStdIOServers`: Configure Standard Input/Output servers 138 | - `command`: Command to run server 139 | - `args`: Arguments for the server command 140 | 141 | #### Example MCP Server Configurations 142 | 143 | **SSE Server Example:** 144 | ```yaml 145 | mcpSSEServers: 146 | filesystem: 147 | url: https://yoursseserver.com 148 | maxPayloadSize: 1048576 # 1MB 149 | ``` 150 | 151 | **StdIO Server Examples:** 152 | 153 | 1. Using the [official filesystem MCP server](https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem): 154 | ```yaml 155 | mcpStdIOServers: 156 | filesystem: 157 | command: npx 158 | args: 159 | - -y 160 | - "@modelcontextprotocol/server-filesystem" 161 | - "/path/to/your/files" 162 | ``` 163 | This example can be used directly as the official filesystem mcp server is an executable package that can be run with npx. Just update the path to point to your desired directory. 164 | 165 | 2. Using [go-mcp filesystem MCP server](https://github.com/MegaGrindStone/go-mcp/tree/main/servers/filesystem): 166 | ```yaml 167 | mcpStdIOServers: 168 | filesystem: 169 | command: go 170 | args: 171 | - run 172 | - github.com/your_username/your_app # Replace with your app 173 | - -path 174 | - "/data/mcp/filesystem" # Path to expose to MCP clients 175 | ``` 176 | For this example, you'll need to create a new Go application that imports the `github.com/MegaGrindStone/go-mcp/servers/filesystem` package. The flag naming (like `-path` in this example) is completely customizable based on how you structure your own application - it doesn't have to be called "path". [This example](https://github.com/MegaGrindStone/go-mcp/blob/main/example/filesystem/main.go) is merely a starting point showing one possible implementation where a flag is used to specify which directory to expose. You're free to design your own application structure and command-line interface according to your specific needs. 177 | 178 | ### Example Configuration Snippet 179 | ```yaml 180 | port: 8080 181 | logLevel: info 182 | systemPrompt: You are a helpful assistant. 183 | 184 | llm: 185 | provider: anthropic 186 | model: claude-3-5-sonnet-20241022 187 | maxTokens: 1000 188 | parameters: 189 | temperature: 0.7 190 | 191 | genTitleLLM: 192 | provider: openai 193 | model: gpt-3.5-turbo 194 | ``` 195 | 196 | ## 🏗 Project Structure 197 | 198 | - `cmd/`: Application entry point 199 | - `internal/handlers/`: Web request handlers 200 | - `internal/models/`: Data models 201 | - `internal/services/`: LLM provider integrations 202 | - `static/`: Static assets (CSS) 203 | - `templates/`: HTML templates 204 | 205 | ## 🤝 Contributing 206 | 207 | 1. Fork the repository 208 | 2. Create a feature branch 209 | 3. Commit your changes 210 | 4. Push and create a Pull Request 211 | 212 | ## 📄 License 213 | 214 | MIT License 215 | -------------------------------------------------------------------------------- /cmd/server/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "os" 7 | 8 | "github.com/MegaGrindStone/mcp-web-ui/internal/handlers" 9 | "github.com/MegaGrindStone/mcp-web-ui/internal/services" 10 | "gopkg.in/yaml.v3" 11 | ) 12 | 13 | type llmConfig interface { 14 | llm(string, *slog.Logger) (handlers.LLM, error) 15 | titleGen(string, *slog.Logger) (handlers.TitleGenerator, error) 16 | } 17 | 18 | // BaseLLMConfig contains the common fields for all LLM configurations. 19 | type BaseLLMConfig struct { 20 | Provider string `yaml:"provider"` 21 | Model string `yaml:"model"` 22 | Parameters services.LLMParameters `yaml:"parameters"` 23 | } 24 | 25 | type config struct { 26 | Port string `yaml:"port"` 27 | LogLevel string `yaml:"logLevel"` 28 | LogMode string `yaml:"logMode"` 29 | SystemPrompt string `yaml:"systemPrompt"` 30 | TitleGeneratorPrompt string `yaml:"titleGeneratorPrompt"` 31 | LLM llmConfig `yaml:"llm"` 32 | GenTitleLLM llmConfig `yaml:"genTitleLLM"` 33 | MCPSSEServers map[string]mcpSSEServerConfig `yaml:"mcpSSEServers"` 34 | MCPStdIOServers map[string]mcpStdIOServerConfig `yaml:"mcpStdIOServers"` 35 | } 36 | 37 | type ollamaConfig struct { 38 | BaseLLMConfig `yaml:",inline"` 39 | Host string `yaml:"host"` 40 | } 41 | 42 | type anthropicConfig struct { 43 | BaseLLMConfig `yaml:",inline"` 44 | APIKey string `yaml:"apiKey"` 45 | MaxTokens int `yaml:"maxTokens"` 46 | } 47 | 48 | type openaiConfig struct { 49 | BaseLLMConfig `yaml:",inline"` 50 | APIKey string `yaml:"apiKey"` 51 | Endpoint string `yaml:"endpoint"` 52 | } 53 | 54 | type openrouterConfig struct { 55 | BaseLLMConfig `yaml:",inline"` 56 | APIKey string `yaml:"apiKey"` 57 | } 58 | 59 | type mcpSSEServerConfig struct { 60 | URL string `yaml:"url"` 61 | MaxPayloadSize int `yaml:"maxPayloadSize"` 62 | } 63 | 64 | type mcpStdIOServerConfig struct { 65 | Command string `yaml:"command"` 66 | Args []string `yaml:"args"` 67 | } 68 | 69 | func (c *config) UnmarshalYAML(value *yaml.Node) error { 70 | var rawConfig struct { 71 | Port string `yaml:"port"` 72 | LogLevel string `yaml:"logLevel"` 73 | LogMode string `yaml:"logMode"` 74 | SystemPrompt string `yaml:"systemPrompt"` 75 | TitleGeneratorPrompt string `yaml:"titleGeneratorPrompt"` 76 | LLM map[string]any `yaml:"llm"` 77 | GenTitleLLM map[string]any `yaml:"genTitleLLM"` 78 | MCPSSEServers map[string]mcpSSEServerConfig `yaml:"mcpSSEServers"` 79 | MCPStdIOServers map[string]mcpStdIOServerConfig `yaml:"mcpStdIOServers"` 80 | } 81 | 82 | if err := value.Decode(&rawConfig); err != nil { 83 | return err 84 | } 85 | 86 | c.Port = rawConfig.Port 87 | c.LogLevel = rawConfig.LogLevel 88 | c.LogMode = rawConfig.LogMode 89 | c.SystemPrompt = rawConfig.SystemPrompt 90 | c.TitleGeneratorPrompt = rawConfig.TitleGeneratorPrompt 91 | 92 | llmProvider, ok := rawConfig.LLM["provider"].(string) 93 | if !ok { 94 | return fmt.Errorf("llm provider is required") 95 | } 96 | 97 | llmRawYAML, err := yaml.Marshal(rawConfig.LLM) 98 | if err != nil { 99 | return err 100 | } 101 | genTitleLLMRawYAML, err := yaml.Marshal(rawConfig.GenTitleLLM) 102 | if err != nil { 103 | return err 104 | } 105 | 106 | var llm llmConfig 107 | switch llmProvider { 108 | case "ollama": 109 | llm = &ollamaConfig{} 110 | case "anthropic": 111 | llm = &anthropicConfig{} 112 | case "openai": 113 | llm = &openaiConfig{} 114 | case "openrouter": 115 | llm = &openrouterConfig{} 116 | default: 117 | return fmt.Errorf("unknown llm provider: %s", llmProvider) 118 | } 119 | 120 | if err := yaml.Unmarshal(llmRawYAML, llm); err != nil { 121 | return err 122 | } 123 | 124 | var genTitleLLM llmConfig 125 | useSameLLM := true 126 | genTitleLLM = llm 127 | genTitleLLMProvider, ok := rawConfig.GenTitleLLM["provider"].(string) 128 | if ok { 129 | useSameLLM = false 130 | switch genTitleLLMProvider { 131 | case "ollama": 132 | genTitleLLM = &ollamaConfig{} 133 | case "anthropic": 134 | genTitleLLM = &anthropicConfig{} 135 | case "openai": 136 | genTitleLLM = &openaiConfig{} 137 | case "openrouter": 138 | genTitleLLM = &openrouterConfig{} 139 | } 140 | } 141 | 142 | if !useSameLLM { 143 | if err := yaml.Unmarshal(genTitleLLMRawYAML, genTitleLLM); err != nil { 144 | return err 145 | } 146 | } 147 | 148 | c.LLM = llm 149 | c.GenTitleLLM = genTitleLLM 150 | c.MCPSSEServers = rawConfig.MCPSSEServers 151 | c.MCPStdIOServers = rawConfig.MCPStdIOServers 152 | 153 | return nil 154 | } 155 | 156 | func (o ollamaConfig) newOllama(systemPrompt string, logger *slog.Logger) (services.Ollama, error) { 157 | if o.Model == "" { 158 | return services.Ollama{}, fmt.Errorf("model is required") 159 | } 160 | 161 | host := o.Host 162 | if host == "" { 163 | host = os.Getenv("OLLAMA_HOST") 164 | } 165 | return services.NewOllama(host, o.Model, systemPrompt, o.Parameters, logger), nil 166 | } 167 | 168 | func (o ollamaConfig) llm(systemPrompt string, logger *slog.Logger) (handlers.LLM, error) { 169 | return o.newOllama(systemPrompt, logger) 170 | } 171 | 172 | func (o ollamaConfig) titleGen(systemPrompt string, logger *slog.Logger) (handlers.TitleGenerator, error) { 173 | return o.newOllama(systemPrompt, logger) 174 | } 175 | 176 | func (a anthropicConfig) newAnthropic(systemPrompt string, _ *slog.Logger) (services.Anthropic, error) { 177 | if a.Model == "" { 178 | return services.Anthropic{}, fmt.Errorf("model is required") 179 | } 180 | if a.MaxTokens == 0 { 181 | return services.Anthropic{}, fmt.Errorf("max_tokens is required") 182 | } 183 | 184 | apiKey := a.APIKey 185 | if apiKey == "" { 186 | apiKey = os.Getenv("ANTHROPIC_API_KEY") 187 | } 188 | 189 | return services.NewAnthropic(apiKey, a.Model, systemPrompt, a.MaxTokens, a.Parameters), nil 190 | } 191 | 192 | func (a anthropicConfig) llm(systemPrompt string, logger *slog.Logger) (handlers.LLM, error) { 193 | return a.newAnthropic(systemPrompt, logger) 194 | } 195 | 196 | func (a anthropicConfig) titleGen(systemPrompt string, logger *slog.Logger) (handlers.TitleGenerator, error) { 197 | return a.newAnthropic(systemPrompt, logger) 198 | } 199 | 200 | func (o openaiConfig) newOpenAI(systemPrompt string, logger *slog.Logger) (services.OpenAI, error) { 201 | if o.Model == "" { 202 | return services.OpenAI{}, fmt.Errorf("model is required") 203 | } 204 | 205 | apiKey := o.APIKey 206 | if apiKey == "" { 207 | apiKey = os.Getenv("OPENAI_API_KEY") 208 | } 209 | return services.NewOpenAI(apiKey, o.Model, systemPrompt, o.Endpoint, o.Parameters, logger), nil 210 | } 211 | 212 | func (o openaiConfig) llm(systemPrompt string, logger *slog.Logger) (handlers.LLM, error) { 213 | return o.newOpenAI(systemPrompt, logger) 214 | } 215 | 216 | func (o openaiConfig) titleGen(systemPrompt string, logger *slog.Logger) (handlers.TitleGenerator, error) { 217 | return o.newOpenAI(systemPrompt, logger) 218 | } 219 | 220 | func (o openrouterConfig) newOpenRouter(systemPrompt string, logger *slog.Logger) (services.OpenRouter, error) { 221 | if o.Model == "" { 222 | return services.OpenRouter{}, fmt.Errorf("model is required") 223 | } 224 | 225 | apiKey := o.APIKey 226 | if apiKey == "" { 227 | apiKey = os.Getenv("OPENROUTER_API_KEY") 228 | } 229 | return services.NewOpenRouter(apiKey, o.Model, systemPrompt, o.Parameters, logger), nil 230 | } 231 | 232 | func (o openrouterConfig) llm(systemPrompt string, logger *slog.Logger) (handlers.LLM, error) { 233 | return o.newOpenRouter(systemPrompt, logger) 234 | } 235 | 236 | func (o openrouterConfig) titleGen(systemPrompt string, logger *slog.Logger) (handlers.TitleGenerator, error) { 237 | return o.newOpenRouter(systemPrompt, logger) 238 | } 239 | -------------------------------------------------------------------------------- /cmd/server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "io/fs" 8 | "log" 9 | "log/slog" 10 | "net/http" 11 | "os" 12 | "os/exec" 13 | "os/signal" 14 | "path/filepath" 15 | "syscall" 16 | "time" 17 | 18 | "github.com/MegaGrindStone/go-mcp" 19 | mcpwebui "github.com/MegaGrindStone/mcp-web-ui" 20 | "github.com/MegaGrindStone/mcp-web-ui/internal/handlers" 21 | "github.com/MegaGrindStone/mcp-web-ui/internal/services" 22 | "gopkg.in/yaml.v3" 23 | ) 24 | 25 | func main() { 26 | cfg, cfgDir := loadConfig() 27 | 28 | logger, logFile := initLogger(cfg, cfgDir) 29 | defer logFile.Close() 30 | 31 | sysPrompt := cfg.SystemPrompt 32 | if sysPrompt == "" { 33 | sysPrompt = "You are a helpful assistant." 34 | } 35 | llm, err := cfg.LLM.llm(sysPrompt, logger) 36 | if err != nil { 37 | panic(err) 38 | } 39 | titleGenPrompt := cfg.TitleGeneratorPrompt 40 | if titleGenPrompt == "" { 41 | titleGenPrompt = "Generate a title for this chat with only one sentence with maximum 5 words." 42 | } 43 | titleGen, err := cfg.GenTitleLLM.titleGen(titleGenPrompt, logger) 44 | if err != nil { 45 | panic(err) 46 | } 47 | 48 | dbPath := filepath.Join(cfgDir, "/mcpwebui/store.db") 49 | boltDB, err := services.NewBoltDB(dbPath) 50 | if err != nil { 51 | panic(err) 52 | } 53 | 54 | mcpClientInfo := mcp.Info{ 55 | Name: "mcp-web-ui", 56 | Version: "0.1.0", 57 | } 58 | 59 | mcpClients, stdIOCmds := populateMCPClients(cfg, mcpClientInfo, logger) 60 | mcpClis := make([]handlers.MCPClient, len(mcpClients)) 61 | 62 | for i, cli := range mcpClients { 63 | logger.Info("Connecting to MCP server", slog.Int("index", i)) 64 | 65 | connectCtx, connectCancel := context.WithTimeout(context.Background(), 30*time.Second) 66 | 67 | if err := cli.Connect(connectCtx); err != nil { 68 | connectCancel() 69 | logger.Error("Error connecting to MCP server", slog.Int("index", i), slog.String("err", err.Error())) 70 | continue 71 | } 72 | connectCancel() 73 | 74 | mcpClis[i] = cli 75 | 76 | logger.Info("Connected to MCP server", slog.String("name", mcpClients[i].ServerInfo().Name)) 77 | } 78 | 79 | m, err := handlers.NewMain(llm, titleGen, boltDB, mcpClis, logger) 80 | if err != nil { 81 | panic(err) 82 | } 83 | 84 | // Serve static files 85 | staticFS, err := fs.Sub(mcpwebui.StaticFS, "static") 86 | if err != nil { 87 | panic(err) 88 | } 89 | fileServer := http.FileServer(http.FS(staticFS)) 90 | 91 | // Create custom mux 92 | mux := http.NewServeMux() 93 | mux.Handle("/static/", http.StripPrefix("/static/", fileServer)) 94 | mux.HandleFunc("/", m.HandleHome) 95 | mux.HandleFunc("/chats", m.HandleChats) 96 | mux.HandleFunc("/refresh-title", m.HandleRefreshTitle) 97 | mux.HandleFunc("/sse/messages", m.HandleSSE) 98 | mux.HandleFunc("/sse/chats", m.HandleSSE) 99 | 100 | // Create custom server 101 | srv := &http.Server{ 102 | Addr: ":" + cfg.Port, 103 | Handler: mux, 104 | ReadHeaderTimeout: 5 * time.Second, 105 | } 106 | 107 | srv.RegisterOnShutdown(func() { 108 | for _, cli := range mcpClients { 109 | disconnectCtx, disconnectCancel := context.WithTimeout(context.Background(), 30*time.Second) 110 | if err := cli.Disconnect(disconnectCtx); err != nil { 111 | logger.Error("Failed to disconnect from MCP server", slog.String("err", err.Error())) 112 | } 113 | disconnectCancel() 114 | } 115 | 116 | for _, cmd := range stdIOCmds { 117 | if err := cmd.Process.Kill(); err != nil { 118 | logger.Error("Failed to kill stdIO command", slog.String("err", err.Error())) 119 | } 120 | _ = cmd.Wait() 121 | } 122 | 123 | if err := m.Shutdown(context.Background()); err != nil { 124 | logger.Error("Failed to shutdown sse server", slog.String("err", err.Error())) 125 | } 126 | }) 127 | 128 | // Channel to listen for errors coming from the listener 129 | serverErrors := make(chan error, 1) 130 | 131 | // Start server in goroutine 132 | go func() { 133 | logger.Info("Server starting on", slog.String("port", cfg.Port)) 134 | serverErrors <- srv.ListenAndServe() 135 | }() 136 | 137 | // Channel to listen for interrupt/terminate signals 138 | shutdown := make(chan os.Signal, 1) 139 | signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM) 140 | 141 | // Blocking select waiting for either interrupt or server error 142 | select { 143 | case err := <-serverErrors: 144 | logger.Error("Server error", slog.String("err", err.Error())) 145 | 146 | case sig := <-shutdown: 147 | logger.Info("Start shutdown", slog.String("signal", sig.String())) 148 | 149 | // Create context with timeout for shutdown 150 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 151 | defer cancel() 152 | 153 | // Gracefully shutdown the server 154 | if err := srv.Shutdown(ctx); err != nil { 155 | logger.Error("Graceful shutdown failed", slog.String("err", err.Error())) 156 | logger.Info("Forcing server close") 157 | if err := srv.Close(); err != nil { 158 | logger.Error("Failed to forcing server close", slog.String("err", err.Error())) 159 | } 160 | } 161 | } 162 | } 163 | 164 | func loadConfig() (config, string) { 165 | cfgDir, err := os.UserConfigDir() 166 | if err != nil { 167 | log.Fatal(fmt.Errorf("error getting user config dir: %w", err)) 168 | } 169 | cfgPath := filepath.Join(cfgDir, "/mcpwebui") 170 | if err := os.MkdirAll(cfgPath, 0755); err != nil { 171 | log.Fatal(fmt.Errorf("error creating config directory: %w", err)) 172 | } 173 | 174 | cfgFilePath := filepath.Join(cfgDir, "/mcpwebui/config.yaml") 175 | cfgFile, err := os.Open(cfgFilePath) 176 | if err != nil { 177 | log.Fatal(fmt.Errorf("error opening config file: %w", err)) 178 | } 179 | defer cfgFile.Close() 180 | 181 | cfg := config{} 182 | if err := yaml.NewDecoder(cfgFile).Decode(&cfg); err != nil { 183 | panic(fmt.Errorf("error decoding config file: %w", err)) 184 | } 185 | return cfg, cfgDir 186 | } 187 | 188 | func initLogger(cfg config, cfgDir string) (*slog.Logger, *os.File) { 189 | logLevel := new(slog.LevelVar) 190 | switch cfg.LogLevel { 191 | case "debug": 192 | logLevel.Set(slog.LevelDebug) 193 | case "info": 194 | logLevel.Set(slog.LevelInfo) 195 | case "warn": 196 | logLevel.Set(slog.LevelWarn) 197 | case "error": 198 | logLevel.Set(slog.LevelError) 199 | default: 200 | log.Printf("Invalid log level '%s', defaulting to 'info'", cfg.LogLevel) 201 | logLevel.Set(slog.LevelInfo) 202 | } 203 | 204 | logFile, err := os.Create(filepath.Join(cfgDir, "mcpwebui/mcpwebui.log")) 205 | if err != nil { 206 | log.Fatalf("Error creating log file: %v", err) 207 | } 208 | 209 | var lg *slog.Logger 210 | switch cfg.LogMode { 211 | case "json": 212 | lg = slog.New(slog.NewJSONHandler(logFile, &slog.HandlerOptions{Level: logLevel})) 213 | default: 214 | lg = slog.New(slog.NewTextHandler(logFile, &slog.HandlerOptions{Level: logLevel})) 215 | } 216 | 217 | // llmJSON, err := json.Marshal(cfg.LLM) 218 | // if err != nil { 219 | // log.Fatalf("Error marshaling LLM config: %v", err) 220 | // } 221 | // 222 | // titleGenJSON, err := json.Marshal(cfg.GenTitleLLM) 223 | // if err != nil { 224 | // log.Fatalf("Error marshaling title generator config: %v", err) 225 | // } 226 | 227 | logger := lg.With( 228 | slog.Group("config", 229 | slog.String("port", cfg.Port), 230 | slog.String("logLevel", cfg.LogLevel), 231 | slog.String("logMode", cfg.LogMode), 232 | 233 | // These two configuration can be very long, and would potenially fill up the log file. 234 | // slog.String("systemPrompt", cfg.SystemPrompt), 235 | // slog.String("titleGeneratorPrompt", cfg.TitleGeneratorPrompt), 236 | 237 | // These two configuration would leak the llm credentials in the log file. 238 | // slog.Any("llm", llmJSON), 239 | // slog.Any("genTitleLLM", titleGenJSON), 240 | 241 | slog.Any("mcpSSEServers", cfg.MCPSSEServers), 242 | slog.Any("mcpStdIOServers", cfg.MCPStdIOServers), 243 | ), 244 | ) 245 | 246 | return logger, logFile 247 | } 248 | 249 | func populateMCPClients(cfg config, mcpClientInfo mcp.Info, logger *slog.Logger) ([]*mcp.Client, []*exec.Cmd) { 250 | var mcpClients []*mcp.Client 251 | 252 | for _, mcpSSEServerConfig := range cfg.MCPSSEServers { 253 | sseClient := mcp.NewSSEClient(mcpSSEServerConfig.URL, nil, 254 | mcp.WithSSEClientMaxPayloadSize(mcpSSEServerConfig.MaxPayloadSize), mcp.WithSSEClientLogger(logger)) 255 | cli := mcp.NewClient(mcpClientInfo, sseClient, mcp.WithClientLogger(logger)) 256 | mcpClients = append(mcpClients, cli) 257 | } 258 | 259 | cmds := make([]*exec.Cmd, 0, len(cfg.MCPStdIOServers)) 260 | for _, mcpStdIOServerConfig := range cfg.MCPStdIOServers { 261 | cmd := exec.Command(mcpStdIOServerConfig.Command, mcpStdIOServerConfig.Args...) 262 | cmds = append(cmds, cmd) 263 | 264 | in, err := cmd.StdinPipe() 265 | if err != nil { 266 | panic(err) 267 | } 268 | out, err := cmd.StdoutPipe() 269 | if err != nil { 270 | panic(err) 271 | } 272 | stderr, err := cmd.StderrPipe() 273 | if err != nil { 274 | panic(err) 275 | } 276 | 277 | if err := cmd.Start(); err != nil { 278 | panic(err) 279 | } 280 | 281 | // Listen for stderr output and log it 282 | go func() { 283 | errScanner := bufio.NewScanner(stderr) 284 | for errScanner.Scan() { 285 | logger.Error("StdIO error", slog.String("err", errScanner.Text())) 286 | } 287 | }() 288 | 289 | cliStdIO := mcp.NewStdIO(out, in, mcp.WithStdIOLogger(logger)) 290 | 291 | cli := mcp.NewClient(mcpClientInfo, cliStdIO, mcp.WithClientLogger(logger)) 292 | mcpClients = append(mcpClients, cli) 293 | } 294 | 295 | return mcpClients, cmds 296 | } 297 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "cmd" 3 | - "internal/services" 4 | - "static" 5 | - "templates" 6 | -------------------------------------------------------------------------------- /config.example.yaml: -------------------------------------------------------------------------------- 1 | port: 8080 2 | logLevel: info # Choose one of the following: debug, info, warn, error, default to info 3 | logMode: text # Choose one of the following: json, text, default to text 4 | systemPrompt: You are a helpful assistant. 5 | titleGeneratorPrompt: Generate a title for this chat with only one sentence with maximum 5 words. 6 | # Choose one of the following LLM providers: ollama, anthropic 7 | llm: 8 | provider: ollama 9 | model: claude-3-5-sonnet-20241022 10 | parameters: # This is optional, and only used by some LLM providers. 11 | temperature: 0.5 12 | topP: 0.9 13 | topK: 40 14 | frequencyPenalty: 0.0 15 | presencePenalty: 0.0 16 | repetitionPenalty: 1.0 17 | minP: 0.0 18 | topA: 0.0 19 | seed: 0 20 | maxTokens: 1000 21 | logitBias: 22 | dummyTokenID: 0.0 23 | logprobs: true 24 | topLogprobs: 10 25 | stop: 26 | - "\n" 27 | - "\n\n" 28 | includeReasoning: true 29 | # ollama 30 | host: http://localhost:11434 # Default to environment variable OLLAMA_HOST 31 | # anthropic 32 | apiKey: YOUR_API_KEY # Default to environment variable ANTHROPIC_API_KEY 33 | maxTokens: 1000 34 | # openai 35 | apiKey: YOUR_API_KEY # Default to environment variable OPENAI_API_KEY 36 | endpoint: "" # Default to "https://api.openai.com/v1" 37 | # openrouter 38 | apiKey: YOUR_API_KEY # Default to environment variable OPENROUTER_API_KEY 39 | genTitleLLM: # Default to the same LLM as the main LLM 40 | provider: anthropic 41 | model: claude-3-5-sonnet-20241022 42 | apiKey: YOUR_API_KEY # Default to environment variable ANTHROPIC_API_KEY 43 | maxTokens: 1000 44 | parameters: # This is optional, and only used by some LLM providers. 45 | temperature: 0.5 46 | topP: 0.9 47 | topK: 40 48 | frequencyPenalty: 0.0 49 | presencePenalty: 0.0 50 | repetitionPenalty: 1.0 51 | minP: 0.0 52 | topA: 0.0 53 | seed: 0 54 | maxTokens: 1000 55 | logprobs: true 56 | topLogprobs: 10 57 | stop: 58 | - "\n" 59 | - "\n\n" 60 | # Note: For Anthropic provider, stop sequences containing only whitespace are ignored, 61 | # and whitespace is trimmed from valid sequences as Anthropic doesn't support whitespace 62 | # in stop sequences 63 | includeReasoning: true 64 | mcpSSEServers: 65 | filesystem: 66 | url: https://yoursseserver.com 67 | maxPayloadSize: 1048576 # 1MB 68 | mcpStdIOServers: 69 | filesystem: 70 | command: npx 71 | args: 72 | - -y 73 | - "@modelcontextprotocol/server-filesystem" 74 | - "/home/gs/repository/go-mcp" 75 | -------------------------------------------------------------------------------- /embed.go: -------------------------------------------------------------------------------- 1 | package mcpwebui 2 | 3 | import "embed" 4 | 5 | // TemplateFS contains the embedded HTML templates used for rendering the web interface. These templates 6 | // are organized in a directory structure that separates layouts, pages, and partial views. 7 | // 8 | //go:embed templates/* 9 | var TemplateFS embed.FS 10 | 11 | // StaticFS contains the embedded static assets such as JavaScript, CSS, and image files required for 12 | // the web interface's functionality and styling. 13 | // 14 | //go:embed static/* 15 | var StaticFS embed.FS 16 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/MegaGrindStone/mcp-web-ui 2 | 3 | go 1.23.4 4 | 5 | require ( 6 | github.com/google/uuid v1.6.0 7 | github.com/ollama/ollama v0.5.7 8 | github.com/tmaxmax/go-sse v0.10.0 9 | ) 10 | 11 | require ( 12 | github.com/MegaGrindStone/go-mcp v0.6.2 13 | github.com/sashabaranov/go-openai v1.36.1 14 | github.com/yuin/goldmark v1.7.8 15 | github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 16 | go.etcd.io/bbolt v1.3.11 17 | gopkg.in/yaml.v3 v3.0.1 18 | ) 19 | 20 | require ( 21 | github.com/alecthomas/chroma v0.10.0 // indirect 22 | github.com/dlclark/regexp2 v1.7.0 // indirect 23 | golang.org/x/sys v0.28.0 // indirect 24 | ) 25 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/MegaGrindStone/go-mcp v0.6.2-0.20250415040504-b9c7bb4e9f64 h1:7edutpgG1hxTVK1PJLLClUtC86wjeNhmtoXN2fbnbBQ= 2 | github.com/MegaGrindStone/go-mcp v0.6.2-0.20250415040504-b9c7bb4e9f64/go.mod h1:Lc+AiPnsHAF/U9acWMilgzKg4hdkzPpymscNrOysMHM= 3 | github.com/MegaGrindStone/go-mcp v0.6.2-0.20250415132835-0299b9b30e9b h1:S7AqQUMkSIkbCwRlEmg9yUUh1Skzt901y74iYFSNm+4= 4 | github.com/MegaGrindStone/go-mcp v0.6.2-0.20250415132835-0299b9b30e9b/go.mod h1:Lc+AiPnsHAF/U9acWMilgzKg4hdkzPpymscNrOysMHM= 5 | github.com/MegaGrindStone/go-mcp v0.6.2 h1:LbeJ859c1xGZlLLtv1LrVKuyRIOGTOf79klUzal0J5c= 6 | github.com/MegaGrindStone/go-mcp v0.6.2/go.mod h1:Lc+AiPnsHAF/U9acWMilgzKg4hdkzPpymscNrOysMHM= 7 | github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek= 8 | github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s= 9 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 10 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 11 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= 13 | github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= 14 | github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= 15 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 16 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 17 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 18 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 19 | github.com/ollama/ollama v0.5.7 h1:YFxF3UYc3TbOH/j/OhJoxl4LOvPQRcuKUdI5txs/pkc= 20 | github.com/ollama/ollama v0.5.7/go.mod h1:bBFyCnwY8C8zCas/t9ParGkmKSSM6H31fV/37K9kifo= 21 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 22 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 23 | github.com/sashabaranov/go-openai v1.36.1 h1:EVfRXwIlW2rUzpx6vR+aeIKCK/xylSrVYAx1TMTSX3g= 24 | github.com/sashabaranov/go-openai v1.36.1/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= 25 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 26 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 27 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 28 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 29 | github.com/tmaxmax/go-sse v0.10.0 h1:j9F93WB4Hxt8wUf6oGffMm4dutALvUPoDDxfuDQOSqA= 30 | github.com/tmaxmax/go-sse v0.10.0/go.mod h1:u/2kZQR1tyngo1lKaNCj1mJmhXGZWS1Zs5yiSOD+Eg8= 31 | github.com/yuin/goldmark v1.4.5/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= 32 | github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic= 33 | github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= 34 | github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 h1:yHfZyN55+5dp1wG7wDKv8HQ044moxkyGq12KFFMFDxg= 35 | github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594/go.mod h1:U9ihbh+1ZN7fR5Se3daSPoz1CGF9IYtSvWwVQtnzGHU= 36 | go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= 37 | go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= 38 | golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= 39 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 40 | golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= 41 | golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 42 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 43 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 44 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 45 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 46 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 47 | -------------------------------------------------------------------------------- /internal/handlers/chat.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "log/slog" 8 | "net/http" 9 | "strings" 10 | "time" 11 | 12 | "github.com/MegaGrindStone/go-mcp" 13 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 14 | "github.com/google/uuid" 15 | "github.com/tmaxmax/go-sse" 16 | ) 17 | 18 | type chat struct { 19 | ID string 20 | Title string 21 | 22 | Active bool 23 | } 24 | 25 | type message struct { 26 | ID string 27 | Role string 28 | Content string 29 | Timestamp time.Time 30 | 31 | StreamingState string 32 | } 33 | 34 | // SSE event types for real-time updates. 35 | var ( 36 | chatsSSEType = sse.Type("chats") 37 | messagesSSEType = sse.Type("messages") 38 | ) 39 | 40 | func callToolError(err error) json.RawMessage { 41 | contents := []mcp.Content{ 42 | { 43 | Type: mcp.ContentTypeText, 44 | Text: err.Error(), 45 | }, 46 | } 47 | 48 | res, _ := json.Marshal(contents) 49 | return res 50 | } 51 | 52 | // HandleChats processes chat interactions through HTTP POST requests, 53 | // managing both new chat creation and message handling. It supports three input methods: 54 | // 1. Regular messages via the "message" form field 55 | // 2. Predefined prompts via "prompt_name" and "prompt_args" form fields 56 | // 3. Attached resources via the "attached_resources" JSON array of resource URIs 57 | // 58 | // When resources are attached, they're processed and appended to the latest user message. 59 | // Resources are retrieved from registered MCP clients based on their URIs. 60 | // 61 | // The handler expects an optional "chat_id" field. If no chat_id is provided, 62 | // it creates a new chat session. For new chats, it asynchronously generates a title 63 | // based on the first message or prompt. 64 | // 65 | // The function handles different rendering strategies based on whether it's a new chat 66 | // (complete chatbox template) or an existing chat (individual message templates). For 67 | // all chats, it adds messages to the database and initiates asynchronous AI response 68 | // generation that will be streamed via Server-Sent Events (SSE). 69 | // 70 | // The function returns appropriate HTTP error responses for invalid methods, missing required fields, 71 | // resource processing failures, or internal processing errors. For successful requests, it renders 72 | // the appropriate templates with messages marked with correct streaming states. 73 | func (m Main) HandleChats(w http.ResponseWriter, r *http.Request) { 74 | if r.Method != http.MethodPost { 75 | m.logger.Error("Method not allowed", slog.String("method", r.Method)) 76 | http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) 77 | return 78 | } 79 | 80 | var err error 81 | chatID := r.FormValue("chat_id") 82 | isNewChat := false 83 | 84 | if chatID == "" { 85 | chatID, err = m.newChat() 86 | if err != nil { 87 | m.logger.Error("Failed to create new chat", slog.String(errLoggerKey, err.Error())) 88 | http.Error(w, err.Error(), http.StatusInternalServerError) 89 | return 90 | } 91 | isNewChat = true 92 | } else { 93 | if err := m.continueChat(r.Context(), chatID); err != nil { 94 | m.logger.Error("Failed to continue chat", slog.String(errLoggerKey, err.Error())) 95 | http.Error(w, err.Error(), http.StatusInternalServerError) 96 | return 97 | } 98 | } 99 | 100 | var userMessages []models.Message 101 | var addedMessageIDs []string 102 | var firstMessageForTitle string 103 | 104 | // Process input based on type (prompt or regular message) 105 | promptName := r.FormValue("prompt_name") 106 | if promptName != "" { 107 | // Handle prompt-based input 108 | promptArgs := r.FormValue("prompt_args") 109 | userMessages, firstMessageForTitle, err = m.processPromptInput(r.Context(), promptName, promptArgs) 110 | if err != nil { 111 | m.logger.Error("Failed to process prompt", 112 | slog.String("promptName", promptName), 113 | slog.String(errLoggerKey, err.Error())) 114 | http.Error(w, err.Error(), http.StatusInternalServerError) 115 | return 116 | } 117 | } else { 118 | // Handle regular message input 119 | msg := r.FormValue("message") 120 | if msg == "" { 121 | m.logger.Error("Message is required") 122 | http.Error(w, "Message is required", http.StatusBadRequest) 123 | return 124 | } 125 | 126 | firstMessageForTitle = msg 127 | userMessages = []models.Message{m.processUserMessage(msg)} 128 | } 129 | 130 | // Handle attached resources 131 | attachedResourcesJSON := r.FormValue("attached_resources") 132 | if attachedResourcesJSON != "" && attachedResourcesJSON != "[]" { 133 | var resourceURIs []string 134 | if err := json.Unmarshal([]byte(attachedResourcesJSON), &resourceURIs); err != nil { 135 | m.logger.Error("Failed to unmarshal attached resources", 136 | slog.String("attachedResources", attachedResourcesJSON), 137 | slog.String(errLoggerKey, err.Error())) 138 | http.Error(w, "Invalid attached resources format", http.StatusBadRequest) 139 | return 140 | } 141 | 142 | // Process resources and add resource contents to user message 143 | if len(resourceURIs) > 0 { 144 | resourceContents, err := m.processAttachedResources(r.Context(), resourceURIs) 145 | if err != nil { 146 | m.logger.Error("Failed to process attached resources", 147 | slog.String("resourceURIs", fmt.Sprintf("%v", resourceURIs)), 148 | slog.String(errLoggerKey, err.Error())) 149 | http.Error(w, err.Error(), http.StatusInternalServerError) 150 | return 151 | } 152 | 153 | // Add resource contents to the last user message 154 | if len(userMessages) > 0 { 155 | lastMsgIdx := len(userMessages) - 1 156 | userMessages[lastMsgIdx].Contents = append(userMessages[lastMsgIdx].Contents, resourceContents...) 157 | } 158 | } 159 | } 160 | 161 | // Add all user messages to the chat 162 | for _, msg := range userMessages { 163 | msgID, err := m.store.AddMessage(r.Context(), chatID, msg) 164 | if err != nil { 165 | m.logger.Error("Failed to add message", 166 | slog.String("message", fmt.Sprintf("%+v", msg)), 167 | slog.String(errLoggerKey, err.Error())) 168 | http.Error(w, err.Error(), http.StatusInternalServerError) 169 | return 170 | } 171 | addedMessageIDs = append(addedMessageIDs, msgID) 172 | } 173 | 174 | // Initialize empty AI message to be streamed later 175 | am := models.Message{ 176 | ID: uuid.New().String(), 177 | Role: models.RoleAssistant, 178 | Timestamp: time.Now(), 179 | } 180 | aiMsgID, err := m.store.AddMessage(r.Context(), chatID, am) 181 | if err != nil { 182 | m.logger.Error("Failed to add AI message", 183 | slog.String("message", fmt.Sprintf("%+v", am)), 184 | slog.String(errLoggerKey, err.Error())) 185 | http.Error(w, err.Error(), http.StatusInternalServerError) 186 | return 187 | } 188 | 189 | messages, err := m.store.Messages(r.Context(), chatID) 190 | if err != nil { 191 | m.logger.Error("Failed to get messages", 192 | slog.String("chatID", chatID), 193 | slog.String(errLoggerKey, err.Error())) 194 | http.Error(w, err.Error(), http.StatusInternalServerError) 195 | return 196 | } 197 | 198 | // Start async processes for chat response and title generation 199 | go m.chat(chatID, messages) 200 | 201 | if isNewChat { 202 | go m.generateChatTitle(chatID, firstMessageForTitle) 203 | m.renderNewChatResponse(w, chatID, messages, aiMsgID) 204 | return 205 | } 206 | 207 | // For existing chats, render each message separately 208 | m.renderExistingChatResponse(w, messages, addedMessageIDs, am, aiMsgID) 209 | } 210 | 211 | // HandleRefreshTitle handles requests to regenerate a chat title. It accepts POST requests with a chat_id 212 | // parameter, retrieves the first user message from the chat history, and uses the title generator to create 213 | // a new title. The handler updates the chat title in the database and returns the new title to be displayed. 214 | // 215 | // The function expects a "chat_id" form field identifying which chat's title should be refreshed. 216 | // After updating the database, it asynchronously notifies all connected clients through Server-Sent Events (SSE) 217 | // to maintain UI consistency across sessions while immediately returning the new title text to the requesting client. 218 | // 219 | // The function returns appropriate HTTP error responses for invalid methods, missing required fields, 220 | // or when no messages are found for title generation. On success, it returns just the title text to be 221 | // inserted into the targeted span element via HTMX. 222 | func (m Main) HandleRefreshTitle(w http.ResponseWriter, r *http.Request) { 223 | if r.Method != http.MethodPost { 224 | m.logger.Error("Method not allowed", slog.String("method", r.Method)) 225 | http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) 226 | return 227 | } 228 | 229 | chatID := r.FormValue("chat_id") 230 | if chatID == "" { 231 | m.logger.Error("Chat ID is required") 232 | http.Error(w, "Chat ID is required", http.StatusBadRequest) 233 | return 234 | } 235 | 236 | // Get messages to find first user message 237 | messages, err := m.store.Messages(r.Context(), chatID) 238 | if err != nil { 239 | m.logger.Error("Failed to get messages", 240 | slog.String("chatID", chatID), 241 | slog.String(errLoggerKey, err.Error())) 242 | http.Error(w, err.Error(), http.StatusInternalServerError) 243 | return 244 | } 245 | 246 | if len(messages) == 0 { 247 | m.logger.Error("No messages found for chat", slog.String("chatID", chatID)) 248 | http.Error(w, "No messages found for chat", http.StatusNotFound) 249 | return 250 | } 251 | 252 | // Find first user message for title generation 253 | var firstUserMessage string 254 | for _, msg := range messages { 255 | if msg.Role == models.RoleUser && len(msg.Contents) > 0 && msg.Contents[0].Type == models.ContentTypeText { 256 | firstUserMessage = msg.Contents[0].Text 257 | break 258 | } 259 | } 260 | 261 | if firstUserMessage == "" { 262 | m.logger.Error("No user message found for title generation", slog.String("chatID", chatID)) 263 | http.Error(w, "No user message found for title generation", http.StatusInternalServerError) 264 | return 265 | } 266 | 267 | // Generate and update title 268 | title, err := m.titleGenerator.GenerateTitle(r.Context(), firstUserMessage) 269 | if err != nil { 270 | m.logger.Error("Error generating chat title", 271 | slog.String("message", firstUserMessage), 272 | slog.String(errLoggerKey, err.Error())) 273 | http.Error(w, "Failed to generate title", http.StatusInternalServerError) 274 | return 275 | } 276 | 277 | updatedChat := models.Chat{ 278 | ID: chatID, 279 | Title: title, 280 | } 281 | if err := m.store.UpdateChat(r.Context(), updatedChat); err != nil { 282 | m.logger.Error("Failed to update chat title", 283 | slog.String(errLoggerKey, err.Error())) 284 | http.Error(w, "Failed to update chat title", http.StatusInternalServerError) 285 | return 286 | } 287 | 288 | // Update all clients via SSE asynchronously 289 | go func() { 290 | divs, err := m.chatDivs(chatID) 291 | if err != nil { 292 | m.logger.Error("Failed to generate chat divs", 293 | slog.String(errLoggerKey, err.Error())) 294 | return 295 | } 296 | 297 | msg := sse.Message{ 298 | Type: chatsSSEType, 299 | } 300 | msg.AppendData(divs) 301 | if err := m.sseSrv.Publish(&msg, chatsSSETopic); err != nil { 302 | m.logger.Error("Failed to publish chats", 303 | slog.String(errLoggerKey, err.Error())) 304 | } 305 | }() 306 | 307 | // Return just the title text for HTMX to insert into the span 308 | fmt.Fprintf(w, "%s", title) 309 | } 310 | 311 | // processPromptInput handles prompt-based inputs, extracting arguments and retrieving 312 | // prompt messages from the MCP client. 313 | func (m Main) processPromptInput(ctx context.Context, promptName, promptArgs string) ([]models.Message, string, error) { 314 | var args map[string]string 315 | if err := json.Unmarshal([]byte(promptArgs), &args); err != nil { 316 | return nil, "", fmt.Errorf("invalid prompt arguments: %w", err) 317 | } 318 | 319 | // Get the prompt data directly from the server 320 | clientIdx, ok := m.promptsMap[promptName] 321 | if !ok { 322 | return nil, "", fmt.Errorf("prompt not found: %s", promptName) 323 | } 324 | 325 | promptResult, err := m.mcpClients[clientIdx].GetPrompt(ctx, mcp.GetPromptParams{ 326 | Name: promptName, 327 | Arguments: args, 328 | }) 329 | if err != nil { 330 | return nil, "", fmt.Errorf("failed to get prompt: %w", err) 331 | } 332 | 333 | // Convert prompt messages to our internal model format 334 | messages := make([]models.Message, 0, len(promptResult.Messages)) 335 | firstMessageText := "" 336 | 337 | for i, promptMsg := range promptResult.Messages { 338 | // For now, ignore non-text content 339 | if promptMsg.Content.Type != mcp.ContentTypeText { 340 | continue 341 | } 342 | content := promptMsg.Content.Text 343 | 344 | // Save the first message for title generation 345 | if i == 0 { 346 | firstMessageText = content 347 | } 348 | 349 | messages = append(messages, models.Message{ 350 | ID: uuid.New().String(), 351 | Role: models.Role(promptMsg.Role), 352 | Contents: []models.Content{ 353 | { 354 | Type: models.ContentTypeText, 355 | Text: content, 356 | }, 357 | }, 358 | Timestamp: time.Now(), 359 | }) 360 | } 361 | 362 | return messages, firstMessageText, nil 363 | } 364 | 365 | // processUserMessage handles standard user message inputs. 366 | func (m Main) processUserMessage(message string) models.Message { 367 | return models.Message{ 368 | ID: uuid.New().String(), 369 | Role: models.RoleUser, 370 | Contents: []models.Content{ 371 | { 372 | Type: models.ContentTypeText, 373 | Text: message, 374 | }, 375 | }, 376 | Timestamp: time.Now(), 377 | } 378 | } 379 | 380 | // processAttachedResources processes attached resource URIs from the form data 381 | // and returns content objects for each resource. 382 | func (m Main) processAttachedResources(ctx context.Context, resourceURIs []string) ([]models.Content, error) { 383 | var contents []models.Content 384 | 385 | for _, uri := range resourceURIs { 386 | clientIdx, ok := m.resourcesMap[uri] 387 | if !ok { 388 | return nil, fmt.Errorf("resource not found: %s", uri) 389 | } 390 | 391 | result, err := m.mcpClients[clientIdx].ReadResource(ctx, mcp.ReadResourceParams{ 392 | URI: uri, 393 | }) 394 | if err != nil { 395 | return nil, fmt.Errorf("failed to read resource %s: %w", uri, err) 396 | } 397 | 398 | contents = append(contents, models.Content{ 399 | Type: models.ContentTypeResource, 400 | ResourceContents: result.Contents, 401 | }) 402 | } 403 | 404 | return contents, nil 405 | } 406 | 407 | // renderNewChatResponse renders the complete chatbox for new chats. 408 | func (m Main) renderNewChatResponse(w http.ResponseWriter, chatID string, messages []models.Message, aiMsgID string) { 409 | msgs := make([]message, len(messages)) 410 | for i := range messages { 411 | // Mark only the AI message as "loading", others as "ended" 412 | streamingState := "ended" 413 | if messages[i].ID == aiMsgID { 414 | streamingState = "loading" 415 | } 416 | content, err := models.RenderContents(messages[i].Contents) 417 | if err != nil { 418 | m.logger.Error("Failed to render contents", 419 | slog.String("message", fmt.Sprintf("%+v", messages[i])), 420 | slog.String(errLoggerKey, err.Error())) 421 | http.Error(w, err.Error(), http.StatusInternalServerError) 422 | return 423 | } 424 | msgs[i] = message{ 425 | ID: messages[i].ID, 426 | Role: string(messages[i].Role), 427 | Content: content, 428 | Timestamp: messages[i].Timestamp, 429 | StreamingState: streamingState, 430 | } 431 | } 432 | 433 | data := homePageData{ 434 | CurrentChatID: chatID, 435 | Messages: msgs, 436 | } 437 | if err := m.templates.ExecuteTemplate(w, "chatbox", data); err != nil { 438 | http.Error(w, err.Error(), http.StatusInternalServerError) 439 | } 440 | } 441 | 442 | // renderExistingChatResponse renders each message individually for existing chats. 443 | func (m Main) renderExistingChatResponse(w http.ResponseWriter, messages []models.Message, addedMessageIDs []string, 444 | aiMessage models.Message, aiMsgID string, 445 | ) { 446 | for _, msgID := range addedMessageIDs { 447 | for i := range messages { 448 | if messages[i].ID == msgID { 449 | content, err := models.RenderContents(messages[i].Contents) 450 | if err != nil { 451 | m.logger.Error("Failed to render contents", 452 | slog.String("message", fmt.Sprintf("%+v", messages[i])), 453 | slog.String(errLoggerKey, err.Error())) 454 | http.Error(w, err.Error(), http.StatusInternalServerError) 455 | return 456 | } 457 | 458 | templateName := "user_message" 459 | if messages[i].Role == models.RoleAssistant { 460 | templateName = "ai_message" 461 | } 462 | 463 | if err := m.templates.ExecuteTemplate(w, templateName, message{ 464 | ID: msgID, 465 | Role: string(messages[i].Role), 466 | Content: content, 467 | Timestamp: messages[i].Timestamp, 468 | StreamingState: "ended", 469 | }); err != nil { 470 | http.Error(w, err.Error(), http.StatusInternalServerError) 471 | return 472 | } 473 | break 474 | } 475 | } 476 | } 477 | 478 | // Render AI response message (always the last one added) 479 | aiContent, err := models.RenderContents(aiMessage.Contents) 480 | if err != nil { 481 | m.logger.Error("Failed to render contents", 482 | slog.String("message", fmt.Sprintf("%+v", aiMessage)), 483 | slog.String(errLoggerKey, err.Error())) 484 | http.Error(w, err.Error(), http.StatusInternalServerError) 485 | return 486 | } 487 | 488 | if err := m.templates.ExecuteTemplate(w, "ai_message", message{ 489 | ID: aiMsgID, 490 | Role: string(aiMessage.Role), 491 | Content: aiContent, 492 | Timestamp: aiMessage.Timestamp, 493 | StreamingState: "loading", 494 | }); err != nil { 495 | http.Error(w, err.Error(), http.StatusInternalServerError) 496 | } 497 | } 498 | 499 | func (m Main) newChat() (string, error) { 500 | newChat := models.Chat{ 501 | ID: uuid.New().String(), 502 | } 503 | newChatID, err := m.store.AddChat(context.Background(), newChat) 504 | if err != nil { 505 | return "", fmt.Errorf("failed to add chat: %w", err) 506 | } 507 | newChat.ID = newChatID 508 | 509 | divs, err := m.chatDivs(newChat.ID) 510 | if err != nil { 511 | return "", fmt.Errorf("failed to create chat divs: %w", err) 512 | } 513 | 514 | msg := sse.Message{ 515 | Type: chatsSSEType, 516 | } 517 | msg.AppendData(divs) 518 | 519 | if err := m.sseSrv.Publish(&msg, chatsSSETopic); err != nil { 520 | return "", fmt.Errorf("failed to publish chats: %w", err) 521 | } 522 | 523 | return newChat.ID, nil 524 | } 525 | 526 | // continueChat continues chat with given chatID. 527 | // 528 | // If the last content of the last message is not a CallTool type, it will do nothing. 529 | // But if it is, as it may happen due to the corrupted data, this function will call the tool, 530 | // then append the result to the chat. 531 | func (m Main) continueChat(ctx context.Context, chatID string) error { 532 | messages, err := m.store.Messages(ctx, chatID) 533 | if err != nil { 534 | return fmt.Errorf("failed to get messages: %w", err) 535 | } 536 | 537 | if len(messages) == 0 { 538 | return nil 539 | } 540 | 541 | lastMessage := messages[len(messages)-1] 542 | 543 | if lastMessage.Role != models.RoleAssistant { 544 | return nil 545 | } 546 | 547 | if len(lastMessage.Contents) == 0 { 548 | return nil 549 | } 550 | 551 | if lastMessage.Contents[len(lastMessage.Contents)-1].Type != models.ContentTypeCallTool { 552 | return nil 553 | } 554 | 555 | toolRes, success := m.callTool(mcp.CallToolParams{ 556 | Name: lastMessage.Contents[len(lastMessage.Contents)-1].ToolName, 557 | Arguments: lastMessage.Contents[len(lastMessage.Contents)-1].ToolInput, 558 | }) 559 | 560 | lastMessage.Contents = append(lastMessage.Contents, models.Content{ 561 | Type: models.ContentTypeToolResult, 562 | CallToolID: lastMessage.Contents[len(lastMessage.Contents)-1].CallToolID, 563 | }) 564 | 565 | lastMessage.Contents[len(lastMessage.Contents)-1].ToolResult = toolRes 566 | lastMessage.Contents[len(lastMessage.Contents)-1].CallToolFailed = !success 567 | 568 | err = m.store.UpdateMessage(ctx, chatID, lastMessage) 569 | if err != nil { 570 | return fmt.Errorf("failed to update message: %w", err) 571 | } 572 | 573 | return nil 574 | } 575 | 576 | func (m Main) callTool(params mcp.CallToolParams) (json.RawMessage, bool) { 577 | clientIdx, ok := m.toolsMap[params.Name] 578 | if !ok { 579 | m.logger.Error("Tool not found", slog.String("toolName", params.Name)) 580 | return callToolError(fmt.Errorf("tool %s is not found", params.Name)), false 581 | } 582 | 583 | toolRes, err := m.mcpClients[clientIdx].CallTool(context.Background(), params) 584 | if err != nil { 585 | m.logger.Error("Tool call failed", 586 | slog.String("toolName", params.Name), 587 | slog.String(errLoggerKey, err.Error())) 588 | return callToolError(fmt.Errorf("tool call failed: %w", err)), false 589 | } 590 | 591 | resContent, err := json.Marshal(toolRes.Content) 592 | if err != nil { 593 | m.logger.Error("Failed to marshal tool result content", 594 | slog.String("toolName", params.Name), 595 | slog.String(errLoggerKey, err.Error())) 596 | return callToolError(fmt.Errorf("failed to marshal content: %w", err)), false 597 | } 598 | 599 | m.logger.Debug("Tool result content", 600 | slog.String("toolName", params.Name), 601 | slog.String("toolResult", string(resContent))) 602 | 603 | return resContent, !toolRes.IsError 604 | } 605 | 606 | func (m Main) chat(chatID string, messages []models.Message) { 607 | // Ensure SSE connection cleanup on function exit 608 | defer func() { 609 | e := &sse.Message{Type: sse.Type("closeMessage")} 610 | e.AppendData("bye") 611 | _ = m.sseSrv.Publish(e) 612 | }() 613 | 614 | aiMsg := messages[len(messages)-1] 615 | contentIdx := -1 616 | 617 | for { 618 | it := m.llm.Chat(context.Background(), messages, m.tools) 619 | aiMsg.Contents = append(aiMsg.Contents, models.Content{ 620 | Type: models.ContentTypeText, 621 | Text: "", 622 | }) 623 | contentIdx++ 624 | callTool := false 625 | badToolInputFlag := false 626 | badToolInput := json.RawMessage("{}") 627 | 628 | for content, err := range it { 629 | msg := sse.Message{ 630 | Type: messagesSSEType, 631 | } 632 | if err != nil { 633 | m.logger.Error("Error from llm provider", slog.String(errLoggerKey, err.Error())) 634 | msg.AppendData(err.Error()) 635 | _ = m.sseSrv.Publish(&msg, messageIDTopic(aiMsg.ID)) 636 | return 637 | } 638 | 639 | m.logger.Debug("LLM response", slog.String("content", fmt.Sprintf("%+v", content))) 640 | 641 | switch content.Type { 642 | case models.ContentTypeText: 643 | aiMsg.Contents[contentIdx].Text += content.Text 644 | case models.ContentTypeCallTool: 645 | // Non-anthropic models sometimes give a bad tool input which can't be json-marshalled, and it would lead to failure 646 | // when the store try to save the message. So we check if the tool input is valid json, and if not, we set a flag 647 | // to inform the models that the tool input is invalid. And to avoid save failure, we change the tool input to 648 | // empty json string. 649 | _, err := json.Marshal(content.ToolInput) 650 | if err != nil { 651 | badToolInputFlag = true 652 | badToolInput = content.ToolInput 653 | content.ToolInput = []byte("{}") 654 | } 655 | callTool = true 656 | aiMsg.Contents = append(aiMsg.Contents, content) 657 | contentIdx++ 658 | case models.ContentTypeResource: 659 | m.logger.Error("Content type resource is not allowed") 660 | return 661 | case models.ContentTypeToolResult: 662 | m.logger.Error("Content type tool results is not allowed") 663 | return 664 | } 665 | 666 | if err := m.store.UpdateMessage(context.Background(), chatID, aiMsg); err != nil { 667 | m.logger.Error("Failed to update message", 668 | slog.String("message", fmt.Sprintf("%+v", aiMsg)), 669 | slog.String(errLoggerKey, err.Error())) 670 | return 671 | } 672 | 673 | rc, err := models.RenderContents(aiMsg.Contents) 674 | if err != nil { 675 | m.logger.Error("Failed to render contents", 676 | slog.String("message", fmt.Sprintf("%+v", aiMsg)), 677 | slog.String(errLoggerKey, err.Error())) 678 | return 679 | } 680 | m.logger.Debug("Render contents", 681 | slog.String("origMsg", fmt.Sprintf("%+v", aiMsg.Contents)), 682 | slog.String("renderedMsg", rc)) 683 | msg.AppendData(rc) 684 | if err := m.sseSrv.Publish(&msg, messageIDTopic(aiMsg.ID)); err != nil { 685 | m.logger.Error("Failed to publish message", 686 | slog.String("message", fmt.Sprintf("%+v", aiMsg)), 687 | slog.String(errLoggerKey, err.Error())) 688 | return 689 | } 690 | 691 | if callTool { 692 | break 693 | } 694 | } 695 | 696 | if !callTool { 697 | break 698 | } 699 | 700 | callToolContent := aiMsg.Contents[len(aiMsg.Contents)-1] 701 | 702 | toolResContent := models.Content{ 703 | Type: models.ContentTypeToolResult, 704 | CallToolID: callToolContent.CallToolID, 705 | } 706 | 707 | if badToolInputFlag { 708 | toolResContent.ToolResult = callToolError(fmt.Errorf("tool input %s is not valid json", string(badToolInput))) 709 | toolResContent.CallToolFailed = true 710 | aiMsg.Contents = append(aiMsg.Contents, toolResContent) 711 | contentIdx++ 712 | messages[len(messages)-1] = aiMsg 713 | continue 714 | } 715 | 716 | toolResult, success := m.callTool(mcp.CallToolParams{ 717 | Name: callToolContent.ToolName, 718 | Arguments: callToolContent.ToolInput, 719 | }) 720 | 721 | toolResContent.ToolResult = toolResult 722 | toolResContent.CallToolFailed = !success 723 | aiMsg.Contents = append(aiMsg.Contents, toolResContent) 724 | contentIdx++ 725 | messages[len(messages)-1] = aiMsg 726 | } 727 | } 728 | 729 | func (m Main) generateChatTitle(chatID string, message string) { 730 | title, err := m.titleGenerator.GenerateTitle(context.Background(), message) 731 | if err != nil { 732 | m.logger.Error("Error generating chat title", 733 | slog.String("message", message), 734 | slog.String(errLoggerKey, err.Error())) 735 | return 736 | } 737 | 738 | updatedChat := models.Chat{ 739 | ID: chatID, 740 | Title: title, 741 | } 742 | if err := m.store.UpdateChat(context.Background(), updatedChat); err != nil { 743 | m.logger.Error("Failed to update chat title", 744 | slog.String(errLoggerKey, err.Error())) 745 | return 746 | } 747 | 748 | divs, err := m.chatDivs(chatID) 749 | if err != nil { 750 | m.logger.Error("Failed to generate chat divs", 751 | slog.String(errLoggerKey, err.Error())) 752 | return 753 | } 754 | 755 | msg := sse.Message{ 756 | Type: chatsSSEType, 757 | } 758 | msg.AppendData(divs) 759 | if err := m.sseSrv.Publish(&msg, chatsSSETopic); err != nil { 760 | m.logger.Error("Failed to publish chats", 761 | slog.String(errLoggerKey, err.Error())) 762 | } 763 | } 764 | 765 | func (m Main) chatDivs(activeID string) (string, error) { 766 | chats, err := m.store.Chats(context.Background()) 767 | if err != nil { 768 | return "", fmt.Errorf("failed to get chats: %w", err) 769 | } 770 | 771 | var sb strings.Builder 772 | for _, ch := range chats { 773 | err := m.templates.ExecuteTemplate(&sb, "chat_title", chat{ 774 | ID: ch.ID, 775 | Title: ch.Title, 776 | Active: ch.ID == activeID, 777 | }) 778 | if err != nil { 779 | return "", fmt.Errorf("failed to execute chat_title template: %w", err) 780 | } 781 | } 782 | return sb.String(), nil 783 | } 784 | -------------------------------------------------------------------------------- /internal/handlers/home.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "net/http" 7 | "slices" 8 | 9 | "github.com/MegaGrindStone/go-mcp" 10 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 11 | ) 12 | 13 | type homePageData struct { 14 | Chats []chat 15 | Messages []message 16 | CurrentChatID string 17 | 18 | Servers []mcp.Info 19 | Tools []mcp.Tool 20 | Resources []mcp.Resource 21 | Prompts []mcp.Prompt 22 | } 23 | 24 | // HandleHome renders the home page template with chat and message data. It displays a list of available 25 | // chats and, if a chat_id query parameter is provided, shows the messages for the selected chat. 26 | // The handler retrieves chat and message data from the store and prepares it for template rendering. 27 | func (m Main) HandleHome(w http.ResponseWriter, r *http.Request) { 28 | cs, err := m.store.Chats(r.Context()) 29 | if err != nil { 30 | m.logger.Error("Failed to get chats", slog.String(errLoggerKey, err.Error())) 31 | http.Error(w, err.Error(), http.StatusInternalServerError) 32 | return 33 | } 34 | 35 | // We transform the store's chat data into our view-specific chat structs 36 | // to avoid exposing internal implementation details to the template 37 | chats := make([]chat, len(cs)) 38 | for i := range cs { 39 | chats[i] = chat{ 40 | ID: cs[i].ID, 41 | Title: cs[i].Title, 42 | Active: false, 43 | } 44 | } 45 | 46 | currentChatID := "" 47 | var messages []message 48 | if chatID := r.URL.Query().Get("chat_id"); chatID != "" { 49 | // We find and mark the currently selected chat as active for UI highlighting 50 | idx := slices.IndexFunc(chats, func(c chat) bool { 51 | return c.ID == chatID 52 | }) 53 | 54 | // Only proceed if the chat was found 55 | if idx >= 0 { 56 | currentChatID = chatID 57 | chats[idx].Active = true 58 | 59 | // We fetch and transform messages for the selected chat, 60 | // setting initial streaming state to "ended" for all messages 61 | ms, err := m.store.Messages(r.Context(), currentChatID) 62 | if err != nil { 63 | m.logger.Error("Failed to get messages", slog.String(errLoggerKey, err.Error())) 64 | http.Error(w, err.Error(), http.StatusInternalServerError) 65 | return 66 | } 67 | messages = make([]message, len(ms)) 68 | for i := range ms { 69 | rc, err := models.RenderContents(ms[i].Contents) 70 | if err != nil { 71 | m.logger.Error("Failed to render contents", 72 | slog.String("message", fmt.Sprintf("%+v", ms[i])), 73 | slog.String(errLoggerKey, err.Error())) 74 | http.Error(w, err.Error(), http.StatusInternalServerError) 75 | return 76 | } 77 | m.logger.Debug("Render contents", 78 | slog.String("origMsg", fmt.Sprintf("%+v", ms[i].Contents)), 79 | slog.String("renderedMsg", rc)) 80 | messages[i] = message{ 81 | ID: ms[i].ID, 82 | Role: string(ms[i].Role), 83 | Content: rc, 84 | Timestamp: ms[i].Timestamp, 85 | StreamingState: "ended", 86 | } 87 | } 88 | } 89 | } 90 | data := homePageData{ 91 | Chats: chats, 92 | Messages: messages, 93 | CurrentChatID: currentChatID, 94 | Servers: m.servers, 95 | Tools: m.tools, 96 | Resources: m.resources, 97 | Prompts: m.prompts, 98 | } 99 | 100 | if err := m.templates.ExecuteTemplate(w, "home.html", data); err != nil { 101 | m.logger.Error("Failed to execute home template", slog.String(errLoggerKey, err.Error())) 102 | http.Error(w, err.Error(), http.StatusInternalServerError) 103 | return 104 | } 105 | } 106 | 107 | // HandleSSE serves Server-Sent Events (SSE) requests by delegating to the underlying SSE server. 108 | // This endpoint enables real-time updates for the client. 109 | func (m Main) HandleSSE(w http.ResponseWriter, r *http.Request) { 110 | m.sseSrv.ServeHTTP(w, r) 111 | } 112 | -------------------------------------------------------------------------------- /internal/handlers/main.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "iter" 7 | "log/slog" 8 | "text/template" 9 | "time" 10 | 11 | "github.com/MegaGrindStone/go-mcp" 12 | mcpwebui "github.com/MegaGrindStone/mcp-web-ui" 13 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 14 | "github.com/tmaxmax/go-sse" 15 | ) 16 | 17 | // LLM represents a large language model interface that provides chat functionality. It accepts a context 18 | // and a sequence of messages, returning an iterator that yields response chunks and potential errors. 19 | type LLM interface { 20 | Chat(ctx context.Context, messages []models.Message, tools []mcp.Tool) iter.Seq2[models.Content, error] 21 | } 22 | 23 | // TitleGenerator represents a title generator interface that generates a title for a given message. 24 | type TitleGenerator interface { 25 | GenerateTitle(ctx context.Context, message string) (string, error) 26 | } 27 | 28 | // Store defines the interface for managing chat and message persistence. It provides methods for 29 | // creating, reading, and updating chats and their associated messages. The interface supports both 30 | // atomic operations and bulk retrieval of chats and messages. 31 | type Store interface { 32 | Chats(ctx context.Context) ([]models.Chat, error) 33 | AddChat(ctx context.Context, chat models.Chat) (string, error) 34 | UpdateChat(ctx context.Context, chat models.Chat) error 35 | 36 | Messages(ctx context.Context, chatID string) ([]models.Message, error) 37 | AddMessage(ctx context.Context, chatID string, message models.Message) (string, error) 38 | UpdateMessage(ctx context.Context, chatID string, message models.Message) error 39 | } 40 | 41 | // MCPClient defines the interface for interacting with an MCP server. 42 | // This allows for mocking in tests. 43 | type MCPClient interface { 44 | ServerInfo() mcp.Info 45 | ToolServerSupported() bool 46 | ResourceServerSupported() bool 47 | PromptServerSupported() bool 48 | ListTools(ctx context.Context, params mcp.ListToolsParams) (mcp.ListToolsResult, error) 49 | ListResources(ctx context.Context, params mcp.ListResourcesParams) (mcp.ListResourcesResult, error) 50 | ReadResource(ctx context.Context, params mcp.ReadResourceParams) (mcp.ReadResourceResult, error) 51 | ListPrompts(ctx context.Context, params mcp.ListPromptsParams) (mcp.ListPromptResult, error) 52 | GetPrompt(ctx context.Context, params mcp.GetPromptParams) (mcp.GetPromptResult, error) 53 | CallTool(ctx context.Context, params mcp.CallToolParams) (mcp.CallToolResult, error) 54 | } 55 | 56 | // Main handles the core functionality of the chat application, managing server-sent events, 57 | // HTML templates, and interactions between the LLM and Store components. 58 | type Main struct { 59 | sseSrv *sse.Server 60 | templates *template.Template 61 | 62 | llm LLM 63 | titleGenerator TitleGenerator 64 | store Store 65 | 66 | mcpClients []MCPClient 67 | 68 | servers []mcp.Info 69 | tools []mcp.Tool 70 | resources []mcp.Resource 71 | prompts []mcp.Prompt 72 | 73 | promptsMap map[string]int // Map of prompt names to mcpClients index. 74 | resourcesMap map[string]int // Map of resource uri to mcpClients index. 75 | toolsMap map[string]int // Map of tool names to mcpClients index. 76 | logger *slog.Logger 77 | } 78 | 79 | const ( 80 | chatsSSETopic = "chats" 81 | errLoggerKey = "err" 82 | ) 83 | 84 | // NewMain creates a new Main instance with the provided LLM and Store implementations. It initializes 85 | // the SSE server with default configurations and parses the required HTML templates from the embedded 86 | // filesystem. The SSE server is configured to handle both default events and chat-specific topics. 87 | func NewMain( 88 | llm LLM, 89 | titleGen TitleGenerator, 90 | store Store, 91 | mcpClients []MCPClient, 92 | logger *slog.Logger, 93 | ) (Main, error) { 94 | // We parse templates from three distinct directories to separate layout, pages, and partial views 95 | tmpl, err := template.ParseFS( 96 | mcpwebui.TemplateFS, 97 | "templates/layout/*.html", 98 | "templates/pages/*.html", 99 | "templates/partials/*.html", 100 | ) 101 | if err != nil { 102 | return Main{}, err 103 | } 104 | 105 | servers := make([]mcp.Info, len(mcpClients)) 106 | tools := make([]mcp.Tool, 0, len(mcpClients)) 107 | resources := make([]mcp.Resource, 0, len(mcpClients)) 108 | prompts := make([]mcp.Prompt, 0, len(mcpClients)) 109 | pm := make(map[string]int) 110 | rm := make(map[string]int) 111 | tm := make(map[string]int) 112 | for i := range mcpClients { 113 | servers[i] = mcpClients[i].ServerInfo() 114 | serverName := servers[i].Name 115 | 116 | var ts []mcp.Tool 117 | if mcpClients[i].ToolServerSupported() { 118 | listTools, err := mcpClients[i].ListTools(context.Background(), mcp.ListToolsParams{}) 119 | if err != nil { 120 | return Main{}, fmt.Errorf("failed to list tools from server %s: %w", serverName, err) 121 | } 122 | ts = listTools.Tools 123 | for _, tool := range ts { 124 | tm[tool.Name] = i 125 | } 126 | } 127 | 128 | var rs []mcp.Resource 129 | if mcpClients[i].ResourceServerSupported() { 130 | listResources, err := mcpClients[i].ListResources(context.Background(), mcp.ListResourcesParams{}) 131 | if err != nil { 132 | return Main{}, fmt.Errorf("failed to list resources from server %s: %w", serverName, err) 133 | } 134 | rs = listResources.Resources 135 | for _, resource := range rs { 136 | rm[resource.URI] = i 137 | } 138 | } 139 | 140 | var ps []mcp.Prompt 141 | if mcpClients[i].PromptServerSupported() { 142 | listPrompts, err := mcpClients[i].ListPrompts(context.Background(), mcp.ListPromptsParams{}) 143 | if err != nil { 144 | return Main{}, fmt.Errorf("failed to list prompts from server %s: %w", serverName, err) 145 | } 146 | ps = listPrompts.Prompts 147 | for _, prompt := range ps { 148 | pm[prompt.Name] = i 149 | } 150 | } 151 | 152 | tools = append(tools, ts...) 153 | resources = append(resources, rs...) 154 | prompts = append(prompts, ps...) 155 | } 156 | 157 | return Main{ 158 | sseSrv: &sse.Server{ 159 | OnSession: func(s *sse.Session) (sse.Subscription, bool) { 160 | // We start with default topics that all clients should subscribe to 161 | topics := []string{sse.DefaultTopic, chatsSSETopic} 162 | 163 | // We create a message-specific topic if the client requests updates for a particular message 164 | messageID := s.Req.URL.Query().Get("message_id") 165 | if messageID != "" { 166 | topics = append(topics, messageIDTopic(messageID)) 167 | } 168 | 169 | return sse.Subscription{ 170 | Client: s, 171 | LastEventID: s.LastEventID, 172 | Topics: topics, 173 | }, true 174 | }, 175 | }, 176 | templates: tmpl, 177 | llm: llm, 178 | titleGenerator: titleGen, 179 | store: store, 180 | mcpClients: mcpClients, 181 | promptsMap: pm, 182 | resourcesMap: rm, 183 | toolsMap: tm, 184 | logger: logger.With(slog.String("module", "main")), 185 | servers: servers, 186 | tools: tools, 187 | resources: resources, 188 | prompts: prompts, 189 | }, nil 190 | } 191 | 192 | func messageIDTopic(messageID string) string { 193 | return fmt.Sprintf("message-%s", messageID) 194 | } 195 | 196 | // Shutdown gracefully terminates the Main instance's SSE server. It broadcasts a close message to all 197 | // connected clients and waits up to 5 seconds for connections to terminate. After the timeout, any 198 | // remaining connections are forcefully closed. 199 | func (m Main) Shutdown(ctx context.Context) error { 200 | e := &sse.Message{Type: sse.Type("closeChat")} 201 | // We create a close event that complies with SSE spec requiring data 202 | e.AppendData("bye") 203 | 204 | // We ignore the error here since we're shutting down anyway 205 | _ = m.sseSrv.Publish(e) 206 | 207 | ctx, cancel := context.WithTimeout(ctx, time.Second*5) 208 | defer cancel() 209 | 210 | return m.sseSrv.Shutdown(ctx) 211 | } 212 | -------------------------------------------------------------------------------- /internal/handlers/main_test.go: -------------------------------------------------------------------------------- 1 | package handlers_test 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "iter" 8 | "log/slog" 9 | "net/http" 10 | "net/http/httptest" 11 | "slices" 12 | "strings" 13 | "sync" 14 | "testing" 15 | 16 | "github.com/MegaGrindStone/go-mcp" 17 | "github.com/MegaGrindStone/mcp-web-ui/internal/handlers" 18 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 19 | ) 20 | 21 | type mockLLM struct { 22 | responses []string 23 | err error 24 | } 25 | 26 | type mockStore struct { 27 | sync.Mutex 28 | chats []models.Chat 29 | messages map[string][]models.Message 30 | err error 31 | } 32 | 33 | type mockMCPClient struct { 34 | serverInfo mcp.Info 35 | toolServerSupported bool 36 | resourceServerSupported bool 37 | promptServerSupported bool 38 | 39 | tools []mcp.Tool 40 | resources []mcp.Resource 41 | prompts []mcp.Prompt 42 | 43 | getPromptResult mcp.GetPromptResult 44 | callToolResult mcp.CallToolResult 45 | readResourceFunc func(uri string) (mcp.ReadResourceResult, error) 46 | 47 | err error 48 | } 49 | 50 | func TestNewMain(t *testing.T) { 51 | llm := &mockLLM{} 52 | store := &mockStore{} 53 | mcpClient := &mockMCPClient{ 54 | serverInfo: mcp.Info{ 55 | Name: "Test Server", 56 | }, 57 | } 58 | 59 | main, err := handlers.NewMain(llm, llm, store, []handlers.MCPClient{mcpClient}, slog.Default()) 60 | if err != nil { 61 | t.Fatalf("NewMain() error = %v", err) 62 | } 63 | 64 | if main.Shutdown(context.Background()) != nil { 65 | t.Error("Shutdown() should not return error") 66 | } 67 | } 68 | 69 | func TestHandleHome(t *testing.T) { 70 | llm := &mockLLM{} 71 | store := &mockStore{ 72 | chats: []models.Chat{ 73 | {ID: "1", Title: "Test Chat"}, 74 | }, 75 | messages: map[string][]models.Message{ 76 | "1": {{ID: "1", Role: "user", Contents: []models.Content{ 77 | { 78 | Type: models.ContentTypeText, 79 | Text: "Hello", 80 | }, 81 | }}}, 82 | }, 83 | } 84 | mcpClient := &mockMCPClient{ 85 | serverInfo: mcp.Info{ 86 | Name: "Test Server", 87 | }, 88 | } 89 | 90 | main, err := handlers.NewMain(llm, llm, store, []handlers.MCPClient{mcpClient}, slog.Default()) 91 | if err != nil { 92 | t.Fatal(err) 93 | } 94 | 95 | tests := []struct { 96 | name string 97 | url string 98 | wantStatus int 99 | wantBody string 100 | }{ 101 | { 102 | name: "Home page without chat", 103 | url: "/", 104 | wantStatus: http.StatusOK, 105 | wantBody: "Test Chat", // Should contain chat title 106 | }, 107 | { 108 | name: "Home page with chat", 109 | url: "/?chat_id=1", 110 | wantStatus: http.StatusOK, 111 | wantBody: "Hello", // Should contain message content 112 | }, 113 | } 114 | 115 | for _, tt := range tests { 116 | t.Run(tt.name, func(t *testing.T) { 117 | req := httptest.NewRequest(http.MethodGet, tt.url, nil) 118 | w := httptest.NewRecorder() 119 | 120 | main.HandleHome(w, req) 121 | 122 | if w.Code != tt.wantStatus { 123 | t.Errorf("HandleHome() status = %v, want %v", w.Code, tt.wantStatus) 124 | } 125 | 126 | if !strings.Contains(w.Body.String(), tt.wantBody) { 127 | t.Errorf("HandleHome() body = %v, want to contain %v", w.Body.String(), tt.wantBody) 128 | } 129 | }) 130 | } 131 | } 132 | 133 | func TestHandleChats(t *testing.T) { 134 | llm := &mockLLM{responses: []string{"AI response"}} 135 | store := &mockStore{ 136 | messages: map[string][]models.Message{}, 137 | } 138 | 139 | // Setup MCP client with prompt and resource support 140 | mcpClient := &mockMCPClient{ 141 | serverInfo: mcp.Info{ 142 | Name: "Test Server", 143 | }, 144 | promptServerSupported: true, 145 | prompts: []mcp.Prompt{ 146 | {Name: "test_prompt"}, 147 | }, 148 | getPromptResult: mcp.GetPromptResult{ 149 | Messages: []mcp.PromptMessage{ 150 | { 151 | Role: "user", 152 | Content: mcp.Content{ 153 | Type: mcp.ContentTypeText, 154 | Text: "Prompt generated text", 155 | }, 156 | }, 157 | }, 158 | }, 159 | toolServerSupported: true, 160 | tools: []mcp.Tool{ 161 | {Name: "test_tool"}, 162 | }, 163 | callToolResult: mcp.CallToolResult{ 164 | Content: []mcp.Content{ 165 | { 166 | Type: mcp.ContentTypeText, 167 | Text: "Tool execution result", 168 | }, 169 | }, 170 | IsError: false, 171 | }, 172 | resourceServerSupported: true, 173 | resources: []mcp.Resource{ 174 | {URI: "file:///test.txt"}, 175 | {URI: "workspace:///sample.go"}, 176 | }, 177 | readResourceFunc: func(uri string) (mcp.ReadResourceResult, error) { 178 | switch uri { 179 | case "file:///test.txt": 180 | return mcp.ReadResourceResult{ 181 | Contents: []mcp.ResourceContents{ 182 | { 183 | URI: uri, 184 | MimeType: "text/plain", 185 | Text: "This is a test file", 186 | }, 187 | }, 188 | }, nil 189 | case "workspace:///sample.go": 190 | return mcp.ReadResourceResult{ 191 | Contents: []mcp.ResourceContents{ 192 | { 193 | URI: uri, 194 | MimeType: "text/x-go", 195 | Text: "package main\n\nfunc main() {\n\tfmt.Println(\"Hello\")\n}", 196 | }, 197 | }, 198 | }, nil 199 | case "error:///resource": 200 | return mcp.ReadResourceResult{}, fmt.Errorf("failed to read resource") 201 | default: 202 | return mcp.ReadResourceResult{}, fmt.Errorf("resource not found") 203 | } 204 | }, 205 | } 206 | 207 | tests := []struct { 208 | name string 209 | method string 210 | formData string 211 | store *mockStore 212 | llm *mockLLM 213 | wantStatus int 214 | }{ 215 | { 216 | name: "Invalid method", 217 | method: http.MethodGet, 218 | formData: "", 219 | wantStatus: http.StatusMethodNotAllowed, 220 | }, 221 | { 222 | name: "Empty message and no prompt", 223 | method: http.MethodPost, 224 | formData: "chat_id=", 225 | wantStatus: http.StatusBadRequest, 226 | }, 227 | { 228 | name: "New chat with message", 229 | method: http.MethodPost, 230 | formData: "message=Hello", 231 | wantStatus: http.StatusOK, 232 | }, 233 | { 234 | name: "Existing chat with message", 235 | method: http.MethodPost, 236 | formData: "message=Hello&chat_id=1", 237 | wantStatus: http.StatusOK, 238 | }, 239 | // Testing prompt functionality 240 | { 241 | name: "Invalid prompt arguments", 242 | method: http.MethodPost, 243 | formData: `prompt_name=test_prompt&prompt_args=invalid_json`, 244 | wantStatus: http.StatusInternalServerError, 245 | }, 246 | { 247 | name: "Valid prompt with empty args", 248 | method: http.MethodPost, 249 | formData: `prompt_name=test_prompt&prompt_args={}`, 250 | wantStatus: http.StatusOK, 251 | }, 252 | { 253 | name: "Prompt not found", 254 | method: http.MethodPost, 255 | formData: `prompt_name=unknown_prompt&prompt_args={"key":"value"}`, 256 | wantStatus: http.StatusInternalServerError, 257 | }, 258 | // Resource handling test cases 259 | { 260 | name: "Message with valid attached resources", 261 | method: http.MethodPost, 262 | formData: `message=Check these files&attached_resources=["file:///test.txt","workspace:///sample.go"]`, 263 | wantStatus: http.StatusOK, 264 | }, 265 | { 266 | name: "Invalid JSON in attached resources", 267 | method: http.MethodPost, 268 | formData: `message=Bad JSON&attached_resources=[invalid"json]`, 269 | wantStatus: http.StatusBadRequest, 270 | }, 271 | { 272 | name: "Resource not found", 273 | method: http.MethodPost, 274 | formData: `message=Missing resource&attached_resources=["unknown:///file.txt"]`, 275 | wantStatus: http.StatusInternalServerError, 276 | }, 277 | { 278 | name: "Error reading resource", 279 | method: http.MethodPost, 280 | formData: `message=Error case&attached_resources=["error:///resource"]`, 281 | wantStatus: http.StatusInternalServerError, 282 | }, 283 | { 284 | name: "Empty attached resources array", 285 | method: http.MethodPost, 286 | formData: `message=No attachments&attached_resources=[]`, 287 | wantStatus: http.StatusOK, 288 | }, 289 | // Test cases for error paths 290 | { 291 | name: "Store error when adding message", 292 | method: http.MethodPost, 293 | formData: "message=Hello", 294 | store: &mockStore{err: fmt.Errorf("database error")}, 295 | wantStatus: http.StatusInternalServerError, 296 | }, 297 | { 298 | name: "Continue chat with pending tool call", 299 | method: http.MethodPost, 300 | formData: "chat_id=existing-chat&message=Hello", 301 | store: &mockStore{ 302 | messages: map[string][]models.Message{ 303 | "existing-chat": { 304 | { 305 | ID: "last-msg", 306 | Role: models.RoleAssistant, 307 | Contents: []models.Content{ 308 | { 309 | Type: models.ContentTypeCallTool, 310 | ToolName: "test_tool", 311 | ToolInput: json.RawMessage(`{"param":"value"}`), 312 | CallToolID: "tool-call-1", 313 | }, 314 | }, 315 | }, 316 | }, 317 | }, 318 | }, 319 | wantStatus: http.StatusOK, 320 | }, 321 | { 322 | name: "Tool not found", 323 | method: http.MethodPost, 324 | formData: "chat_id=existing-chat&message=Hello", 325 | store: &mockStore{ 326 | messages: map[string][]models.Message{ 327 | "existing-chat": { 328 | { 329 | ID: "last-msg", 330 | Role: models.RoleAssistant, 331 | Contents: []models.Content{ 332 | { 333 | Type: models.ContentTypeCallTool, 334 | ToolName: "unknown_tool", // Tool that doesn't exist 335 | ToolInput: json.RawMessage(`{"param":"value"}`), 336 | CallToolID: "tool-call-1", 337 | }, 338 | }, 339 | }, 340 | }, 341 | }, 342 | }, 343 | wantStatus: http.StatusOK, 344 | }, 345 | } 346 | 347 | for _, tt := range tests { 348 | t.Run(tt.name, func(t *testing.T) { 349 | // Use custom store and LLM if provided in the test case 350 | currentStore := store 351 | if tt.store != nil { 352 | currentStore = tt.store 353 | } 354 | 355 | currentLLM := llm 356 | if tt.llm != nil { 357 | currentLLM = tt.llm 358 | } 359 | 360 | testMain, err := handlers.NewMain(currentLLM, currentLLM, currentStore, 361 | []handlers.MCPClient{mcpClient}, slog.Default()) 362 | if err != nil { 363 | t.Fatal(err) 364 | } 365 | 366 | form := strings.NewReader(tt.formData) 367 | req := httptest.NewRequest(tt.method, "/chat", form) 368 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") 369 | w := httptest.NewRecorder() 370 | 371 | testMain.HandleChats(w, req) 372 | 373 | if w.Code != tt.wantStatus { 374 | t.Errorf("HandleChats() status = %v, want %v", w.Code, tt.wantStatus) 375 | } 376 | }) 377 | } 378 | } 379 | 380 | func TestHandleRefreshTitle(t *testing.T) { 381 | // Test success case first 382 | t.Run("Success", func(t *testing.T) { 383 | llm := &mockLLM{} 384 | store := &mockStore{ 385 | chats: []models.Chat{ 386 | {ID: "1", Title: "Old Title"}, 387 | }, 388 | messages: map[string][]models.Message{ 389 | "1": { 390 | { 391 | ID: "msg1", 392 | Role: models.RoleUser, 393 | Contents: []models.Content{ 394 | { 395 | Type: models.ContentTypeText, 396 | Text: "First user message", 397 | }, 398 | }, 399 | }, 400 | }, 401 | }, 402 | } 403 | mcpClient := &mockMCPClient{ 404 | serverInfo: mcp.Info{ 405 | Name: "Test Server", 406 | }, 407 | } 408 | 409 | main, err := handlers.NewMain(llm, llm, store, []handlers.MCPClient{mcpClient}, slog.Default()) 410 | if err != nil { 411 | t.Fatal(err) 412 | } 413 | 414 | form := strings.NewReader("chat_id=1") 415 | req := httptest.NewRequest(http.MethodPost, "/refresh-title", form) 416 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") 417 | w := httptest.NewRecorder() 418 | 419 | main.HandleRefreshTitle(w, req) 420 | 421 | if w.Code != http.StatusOK { 422 | t.Errorf("HandleRefreshTitle() status = %v, want %v", w.Code, http.StatusOK) 423 | } 424 | 425 | if !strings.Contains(w.Body.String(), "Test Chat") { 426 | t.Errorf("HandleRefreshTitle() body = %v, want to contain %v", w.Body.String(), "Test Chat") 427 | } 428 | 429 | // Verify chat title was updated 430 | if store.chats[0].Title != "Test Chat" { 431 | t.Errorf("Chat title not updated, got %s, want %s", store.chats[0].Title, "Test Chat") 432 | } 433 | }) 434 | 435 | // Test various error cases 436 | tests := []struct { 437 | name string 438 | method string 439 | chatID string 440 | messages map[string][]models.Message 441 | err error 442 | titleGenErr bool 443 | wantStatus int 444 | }{ 445 | { 446 | name: "Invalid method", 447 | method: http.MethodGet, 448 | chatID: "1", 449 | wantStatus: http.StatusMethodNotAllowed, 450 | }, 451 | { 452 | name: "Missing chat_id", 453 | method: http.MethodPost, 454 | chatID: "", 455 | wantStatus: http.StatusBadRequest, 456 | }, 457 | { 458 | name: "No messages", 459 | method: http.MethodPost, 460 | chatID: "1", 461 | messages: map[string][]models.Message{"1": {}}, 462 | wantStatus: http.StatusNotFound, 463 | }, 464 | { 465 | name: "No user messages", 466 | method: http.MethodPost, 467 | chatID: "1", 468 | messages: map[string][]models.Message{ 469 | "1": { 470 | { 471 | ID: "msg3", 472 | Role: models.RoleAssistant, 473 | Contents: []models.Content{ 474 | { 475 | Type: models.ContentTypeText, 476 | Text: "Assistant message", 477 | }, 478 | }, 479 | }, 480 | }, 481 | }, 482 | wantStatus: http.StatusInternalServerError, 483 | }, 484 | { 485 | name: "Store error", 486 | method: http.MethodPost, 487 | chatID: "1", 488 | messages: map[string][]models.Message{ 489 | "1": { 490 | { 491 | ID: "msg1", 492 | Role: models.RoleUser, 493 | Contents: []models.Content{ 494 | { 495 | Type: models.ContentTypeText, 496 | Text: "Hello", 497 | }, 498 | }, 499 | }, 500 | }, 501 | }, 502 | err: fmt.Errorf("store error"), 503 | wantStatus: http.StatusInternalServerError, 504 | }, 505 | { 506 | name: "Title generator error", 507 | method: http.MethodPost, 508 | chatID: "1", 509 | messages: map[string][]models.Message{ 510 | "1": { 511 | { 512 | ID: "msg1", 513 | Role: models.RoleUser, 514 | Contents: []models.Content{ 515 | { 516 | Type: models.ContentTypeText, 517 | Text: "Hello", 518 | }, 519 | }, 520 | }, 521 | }, 522 | }, 523 | titleGenErr: true, 524 | wantStatus: http.StatusInternalServerError, 525 | }, 526 | } 527 | 528 | for _, tt := range tests { 529 | t.Run(tt.name, func(t *testing.T) { 530 | llm := &mockLLM{} 531 | if tt.titleGenErr { 532 | llm = &mockLLM{ 533 | err: fmt.Errorf("title generation failed"), 534 | } 535 | } 536 | 537 | store := &mockStore{ 538 | chats: []models.Chat{{ID: "1", Title: "Old Title"}}, 539 | messages: tt.messages, 540 | err: tt.err, 541 | } 542 | mcpClient := &mockMCPClient{ 543 | serverInfo: mcp.Info{ 544 | Name: "Test Server", 545 | }, 546 | } 547 | 548 | main, err := handlers.NewMain(llm, llm, store, []handlers.MCPClient{mcpClient}, slog.Default()) 549 | if err != nil { 550 | t.Fatal(err) 551 | } 552 | 553 | form := strings.NewReader("chat_id=" + tt.chatID) 554 | req := httptest.NewRequest(tt.method, "/refresh-title", form) 555 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") 556 | w := httptest.NewRecorder() 557 | 558 | main.HandleRefreshTitle(w, req) 559 | 560 | if w.Code != tt.wantStatus { 561 | t.Errorf("HandleRefreshTitle() status = %v, want %v", w.Code, tt.wantStatus) 562 | } 563 | }) 564 | } 565 | } 566 | 567 | func TestMCPToolInteractions(t *testing.T) { 568 | // Test tool call functionality 569 | llm := &mockLLM{ 570 | responses: []string{"I'll use a tool"}, 571 | } 572 | store := &mockStore{ 573 | messages: map[string][]models.Message{}, 574 | } 575 | 576 | // Setup MCP client with tool support 577 | mcpClient := &mockMCPClient{ 578 | serverInfo: mcp.Info{ 579 | Name: "Test Server", 580 | }, 581 | toolServerSupported: true, 582 | tools: []mcp.Tool{ 583 | {Name: "test_tool"}, 584 | }, 585 | callToolResult: mcp.CallToolResult{ 586 | Content: []mcp.Content{ 587 | { 588 | Type: mcp.ContentTypeText, 589 | Text: "Tool execution result", 590 | }, 591 | }, 592 | IsError: false, 593 | }, 594 | } 595 | 596 | main, err := handlers.NewMain(llm, llm, store, []handlers.MCPClient{mcpClient}, slog.Default()) 597 | if err != nil { 598 | t.Fatal(err) 599 | } 600 | 601 | // Create a chat with a message that will trigger tool call 602 | form := strings.NewReader("message=Use the test_tool") 603 | req := httptest.NewRequest(http.MethodPost, "/chat", form) 604 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") 605 | w := httptest.NewRecorder() 606 | 607 | main.HandleChats(w, req) 608 | 609 | if w.Code != http.StatusOK { 610 | t.Errorf("HandleChats() status = %v, want %v", w.Code, http.StatusOK) 611 | } 612 | } 613 | 614 | func (m mockLLM) Chat(_ context.Context, _ []models.Message, _ []mcp.Tool) iter.Seq2[models.Content, error] { 615 | return func(yield func(models.Content, error) bool) { 616 | if m.err != nil { 617 | yield(models.Content{}, m.err) 618 | return 619 | } 620 | for _, resp := range m.responses { 621 | if !yield(models.Content{ 622 | Type: models.ContentTypeText, 623 | Text: resp, 624 | }, nil) { 625 | return 626 | } 627 | } 628 | } 629 | } 630 | 631 | func (m mockLLM) GenerateTitle(_ context.Context, _ string) (string, error) { 632 | if m.err != nil { 633 | return "", m.err 634 | } 635 | return "Test Chat", nil 636 | } 637 | 638 | func (m *mockStore) Chats(_ context.Context) ([]models.Chat, error) { 639 | m.Lock() 640 | defer m.Unlock() 641 | if m.err != nil { 642 | return nil, m.err 643 | } 644 | // Return a copy to avoid race conditions on the slice 645 | chatsCopy := make([]models.Chat, len(m.chats)) 646 | copy(chatsCopy, m.chats) 647 | return chatsCopy, nil 648 | } 649 | 650 | func (m *mockStore) AddChat(_ context.Context, chat models.Chat) (string, error) { 651 | m.Lock() 652 | defer m.Unlock() 653 | if m.err != nil { 654 | return "", m.err 655 | } 656 | m.chats = append(m.chats, chat) 657 | return chat.ID, nil 658 | } 659 | 660 | func (m *mockStore) UpdateChat(_ context.Context, chat models.Chat) error { 661 | m.Lock() 662 | defer m.Unlock() 663 | idx := slices.IndexFunc(m.chats, func(c models.Chat) bool { return c.ID == chat.ID }) 664 | if idx == -1 { 665 | return fmt.Errorf("chat not found") 666 | } 667 | m.chats[idx] = chat 668 | return m.err 669 | } 670 | 671 | func (m *mockStore) Messages(_ context.Context, chatID string) ([]models.Message, error) { 672 | m.Lock() 673 | defer m.Unlock() 674 | if m.err != nil { 675 | return nil, m.err 676 | } 677 | // Return a copy to avoid race conditions on the slice 678 | messagesCopy := make([]models.Message, len(m.messages[chatID])) 679 | copy(messagesCopy, m.messages[chatID]) 680 | return messagesCopy, nil 681 | } 682 | 683 | func (m *mockStore) AddMessage(_ context.Context, chatID string, msg models.Message) (string, error) { 684 | m.Lock() 685 | defer m.Unlock() 686 | if m.err != nil { 687 | return "", m.err 688 | } 689 | m.messages[chatID] = append(m.messages[chatID], msg) 690 | return msg.ID, nil 691 | } 692 | 693 | func (m *mockStore) UpdateMessage(_ context.Context, chatID string, msg models.Message) error { 694 | m.Lock() 695 | defer m.Unlock() 696 | if m.err != nil { 697 | return m.err 698 | } 699 | 700 | // Find and update the message 701 | for i, existingMsg := range m.messages[chatID] { 702 | if existingMsg.ID == msg.ID { 703 | m.messages[chatID][i] = msg 704 | return nil 705 | } 706 | } 707 | 708 | // If no matching message found, add it 709 | m.messages[chatID] = append(m.messages[chatID], msg) 710 | return nil 711 | } 712 | 713 | func (m *mockMCPClient) ServerInfo() mcp.Info { 714 | return m.serverInfo 715 | } 716 | 717 | func (m *mockMCPClient) ToolServerSupported() bool { 718 | return m.toolServerSupported 719 | } 720 | 721 | func (m *mockMCPClient) ResourceServerSupported() bool { 722 | return m.resourceServerSupported 723 | } 724 | 725 | func (m *mockMCPClient) PromptServerSupported() bool { 726 | return m.promptServerSupported 727 | } 728 | 729 | func (m *mockMCPClient) ListTools(_ context.Context, _ mcp.ListToolsParams) (mcp.ListToolsResult, error) { 730 | if m.err != nil { 731 | return mcp.ListToolsResult{}, m.err 732 | } 733 | return mcp.ListToolsResult{Tools: m.tools}, nil 734 | } 735 | 736 | func (m *mockMCPClient) ListResources(_ context.Context, _ mcp.ListResourcesParams) (mcp.ListResourcesResult, error) { 737 | if m.err != nil { 738 | return mcp.ListResourcesResult{}, m.err 739 | } 740 | return mcp.ListResourcesResult{Resources: m.resources}, nil 741 | } 742 | 743 | func (m *mockMCPClient) ReadResource(_ context.Context, params mcp.ReadResourceParams) (mcp.ReadResourceResult, error) { 744 | if m.err != nil { 745 | return mcp.ReadResourceResult{}, m.err 746 | } 747 | 748 | if m.readResourceFunc != nil { 749 | return m.readResourceFunc(params.URI) 750 | } 751 | 752 | return mcp.ReadResourceResult{ 753 | Contents: []mcp.ResourceContents{ 754 | { 755 | URI: params.URI, 756 | MimeType: "text/plain", 757 | Text: "Mock resource content", 758 | }, 759 | }, 760 | }, nil 761 | } 762 | 763 | func (m *mockMCPClient) ListPrompts(_ context.Context, _ mcp.ListPromptsParams) (mcp.ListPromptResult, error) { 764 | if m.err != nil { 765 | return mcp.ListPromptResult{}, m.err 766 | } 767 | return mcp.ListPromptResult{Prompts: m.prompts}, nil 768 | } 769 | 770 | func (m *mockMCPClient) GetPrompt(_ context.Context, _ mcp.GetPromptParams) (mcp.GetPromptResult, error) { 771 | if m.err != nil { 772 | return mcp.GetPromptResult{}, m.err 773 | } 774 | return m.getPromptResult, nil 775 | } 776 | 777 | func (m *mockMCPClient) CallTool(_ context.Context, _ mcp.CallToolParams) (mcp.CallToolResult, error) { 778 | if m.err != nil { 779 | return mcp.CallToolResult{}, m.err 780 | } 781 | return m.callToolResult, nil 782 | } 783 | -------------------------------------------------------------------------------- /internal/models/chat.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | "time" 9 | 10 | "github.com/MegaGrindStone/go-mcp" 11 | "github.com/yuin/goldmark" 12 | highlighting "github.com/yuin/goldmark-highlighting" 13 | "github.com/yuin/goldmark/extension" 14 | "github.com/yuin/goldmark/renderer/html" 15 | ) 16 | 17 | // Chat represents a conversation container in the chat system. It provides basic identification and 18 | // labeling capabilities for organizing message threads. 19 | type Chat struct { 20 | ID string 21 | Title string 22 | } 23 | 24 | // Message represents an individual communication entry within a chat. It contains the core components 25 | // of a chat message including its unique identifier, the participant's role, the actual content, and 26 | // the precise time when the message was created. 27 | type Message struct { 28 | ID string 29 | Role Role 30 | Contents []Content 31 | Timestamp time.Time 32 | } 33 | 34 | // Content is a message content with its type. 35 | type Content struct { 36 | Type ContentType 37 | 38 | // Text would be filled if Type is ContentTypeText. 39 | Text string 40 | 41 | // ResourceContents would be filled if Type is ContentTypeResource. 42 | ResourceContents []mcp.ResourceContents 43 | 44 | // ToolName would be filled if Type is ContentTypeCallTool. 45 | ToolName string 46 | // ToolInput would be filled if Type is ContentTypeCallTool. 47 | ToolInput json.RawMessage 48 | 49 | // ToolResult would be filled if Type is ContentTypeToolResult. The value would be either tool result or error. 50 | ToolResult json.RawMessage 51 | 52 | // CallToolID would be filled if Type is ContentTypeCallTool or ContentTypeToolResult. 53 | CallToolID string 54 | // CallToolFailed is a flag indicating if the call tool failed. 55 | // This flag would be set to true if the call tool failed and Type is ContentTypeToolResult. 56 | CallToolFailed bool 57 | } 58 | 59 | // Role represents the role of a message participant. 60 | type Role string 61 | 62 | // ContentType represents the type of content in messages. 63 | type ContentType string 64 | 65 | const ( 66 | // RoleUser represents a user message. A message with this role would only contain text or resource content. 67 | RoleUser Role = "user" 68 | // RoleAssistant represents an assistant message. A message with this role would contain 69 | // all types of content but resource. 70 | RoleAssistant Role = "assistant" 71 | 72 | // ContentTypeText represents text content. 73 | ContentTypeText ContentType = "text" 74 | // ContentTypeResource represents a resource content. 75 | ContentTypeResource ContentType = "resource" 76 | // ContentTypeCallTool represents a call to a tool. 77 | ContentTypeCallTool ContentType = "call_tool" 78 | // ContentTypeToolResult represents the result of a tool call. 79 | ContentTypeToolResult ContentType = "tool_result" 80 | ) 81 | 82 | var mimeTypeToLanguage = map[string]string{ 83 | "text/x-go": "go", 84 | "text/golang": "go", 85 | "application/json": "json", 86 | "text/javascript": "javascript", 87 | "text/html": "html", 88 | "text/css": "css", 89 | } 90 | 91 | // RenderContents renders contents into a markdown string. 92 | func RenderContents(contents []Content) (string, error) { 93 | var sb strings.Builder 94 | for _, content := range contents { 95 | switch content.Type { 96 | case ContentTypeText: 97 | if content.Text == "" { 98 | continue 99 | } 100 | sb.WriteString(content.Text) 101 | case ContentTypeResource: 102 | if len(content.ResourceContents) == 0 { 103 | continue 104 | } 105 | for _, resource := range content.ResourceContents { 106 | sb.WriteString(" \n\n
\n") 107 | sb.WriteString(fmt.Sprintf("Resource: %s\n\n", resource.URI)) 108 | 109 | if resource.MimeType != "" { 110 | sb.WriteString(fmt.Sprintf("MIME Type: `%s`\n\n", resource.MimeType)) 111 | } 112 | 113 | if resource.Text != "" { 114 | // Use map for language determination 115 | language := "text" 116 | if lang, exists := mimeTypeToLanguage[resource.MimeType]; exists { 117 | language = lang 118 | } 119 | 120 | sb.WriteString(fmt.Sprintf("```%s\n%s\n```\n", language, resource.Text)) 121 | } else if resource.Blob != "" { 122 | // Handle binary content 123 | if strings.HasPrefix(resource.MimeType, "image/") { 124 | // Display images inline 125 | sb.WriteString(fmt.Sprintf("\"%s\"\n", 126 | resource.MimeType, resource.Blob, resource.URI)) 127 | } else { 128 | // Provide download link for other binary content 129 | sb.WriteString(fmt.Sprintf("Download %s\n", 130 | resource.MimeType, resource.Blob, resource.URI, resource.URI)) 131 | } 132 | } 133 | 134 | sb.WriteString("\n
\n\n") 135 | } 136 | case ContentTypeCallTool: 137 | sb.WriteString(" \n\n
\n") 138 | sb.WriteString(fmt.Sprintf("Calling Tool: %s\n\n", content.ToolName)) 139 | sb.WriteString("Input:\n") 140 | 141 | var prettyJSON bytes.Buffer 142 | input := string(content.ToolInput) 143 | if err := json.Indent(&prettyJSON, content.ToolInput, "", " "); err == nil { 144 | input = prettyJSON.String() 145 | } 146 | 147 | sb.WriteString(fmt.Sprintf("```json\n%s\n```\n", input)) 148 | case ContentTypeToolResult: 149 | sb.WriteString("\n\n") 150 | sb.WriteString("Result:\n") 151 | 152 | var prettyJSON bytes.Buffer 153 | result := string(content.ToolResult) 154 | if err := json.Indent(&prettyJSON, content.ToolResult, "", " "); err == nil { 155 | result = prettyJSON.String() 156 | } 157 | sb.WriteString(fmt.Sprintf("```json\n%s\n```\n", result)) 158 | sb.WriteString("\n
\n\n") 159 | } 160 | } 161 | md := goldmark.New( 162 | goldmark.WithExtensions( 163 | extension.GFM, 164 | highlighting.NewHighlighting( 165 | highlighting.WithStyle("rose-pine"), 166 | ), 167 | ), 168 | goldmark.WithRendererOptions( 169 | html.WithHardWraps(), // To render newlines. 170 | html.WithUnsafe(), // To render details tag. 171 | ), 172 | ) 173 | 174 | var buf bytes.Buffer 175 | if err := md.Convert([]byte(sb.String()), &buf); err != nil { 176 | return "", fmt.Errorf("failed to convert markdown: %w", err) 177 | } 178 | 179 | return buf.String(), nil 180 | } 181 | 182 | // String returns a string representation of the Content. 183 | // 184 | // The reason for this function is to make sure the json.RawMessage fields of c is 185 | // rendered as a string, make it easier to debug. 186 | func (c Content) String() string { 187 | type content struct { 188 | Type ContentType 189 | Text string 190 | ToolName string 191 | ToolInput string 192 | ToolResult string 193 | CallToolID string 194 | CallToolFailed bool 195 | } 196 | nc := content{ 197 | Type: c.Type, 198 | Text: c.Text, 199 | ToolName: c.ToolName, 200 | ToolInput: string(c.ToolInput), 201 | ToolResult: string(c.ToolResult), 202 | CallToolID: c.CallToolID, 203 | CallToolFailed: c.CallToolFailed, 204 | } 205 | return fmt.Sprintf("%+v", nc) 206 | } 207 | -------------------------------------------------------------------------------- /internal/services/anthropic.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/base64" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "iter" 12 | "net/http" 13 | "strings" 14 | 15 | "github.com/MegaGrindStone/go-mcp" 16 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 17 | "github.com/tmaxmax/go-sse" 18 | ) 19 | 20 | // Anthropic provides an interface to the Anthropic API for large language model interactions. It implements 21 | // the LLM interface and handles streaming chat completions using Claude models. 22 | type Anthropic struct { 23 | apiKey string 24 | model string 25 | maxTokens int 26 | systemPrompt string 27 | 28 | params LLMParameters 29 | 30 | client *http.Client 31 | } 32 | 33 | type anthropicChatRequest struct { 34 | Model string `json:"model"` 35 | Messages []anthropicMessage `json:"messages"` 36 | System string `json:"system"` 37 | MaxTokens int `json:"max_tokens"` 38 | Tools []anthropicTool `json:"tools"` 39 | Stream bool `json:"stream"` 40 | 41 | StopSequences []string `json:"stop_sequences,omitempty"` 42 | Temperature *float32 `json:"temperature,omitempty"` 43 | TopK *int `json:"top_k,omitempty"` 44 | TopP *float32 `json:"top_p,omitempty"` 45 | } 46 | 47 | type anthropicMessage struct { 48 | Role string `json:"role"` 49 | Content []anthropicMessageContent `json:"content"` 50 | } 51 | 52 | type anthropicMessageContent struct { 53 | Type string `json:"type"` 54 | 55 | // For text type. 56 | Text string `json:"text,omitempty"` 57 | 58 | // For image and document type. 59 | Source *anthropicResourceContent `json:"source,omitempty"` 60 | 61 | // For tool_use type. 62 | ID string `json:"id,omitempty"` 63 | Name string `json:"name,omitempty"` 64 | Input json.RawMessage `json:"input,omitempty"` 65 | 66 | // For tool_result type. 67 | ToolUseID string `json:"tool_use_id,omitempty"` 68 | Content json.RawMessage `json:"content,omitempty"` 69 | IsError bool `json:"is_error,omitempty"` 70 | } 71 | 72 | type anthropicResourceContent struct { 73 | Type string `json:"type"` 74 | MediaType string `json:"media_type"` 75 | Data string `json:"data"` 76 | } 77 | 78 | type anthropicContentBlockStart struct { 79 | Type string 80 | ContentBlock struct { 81 | Type string `json:"type"` 82 | ID string `json:"id"` 83 | Name string `json:"name"` 84 | Input json.RawMessage `json:"input"` 85 | } `json:"content_block"` 86 | } 87 | 88 | type anthropicContentBlockDelta struct { 89 | Type string `json:"type"` 90 | Delta struct { 91 | Type string `json:"type"` 92 | Text string `json:"text"` 93 | PartialJSON string `json:"partial_json"` 94 | } `json:"delta"` 95 | } 96 | 97 | type anthropicError struct { 98 | Type string `json:"type"` 99 | Error struct { 100 | Type string `json:"type"` 101 | Message string `json:"message"` 102 | } `json:"error"` 103 | } 104 | 105 | type anthropicTool struct { 106 | Name string `json:"name"` 107 | Description string `json:"description"` 108 | InputSchema json.RawMessage `json:"input_schema"` 109 | } 110 | 111 | const ( 112 | anthropicAPIEndpoint = "https://api.anthropic.com/v1" 113 | ) 114 | 115 | // NewAnthropic creates a new Anthropic instance with the specified API key, model name, and maximum 116 | // token limit. It initializes an HTTP client for API communication and returns a configured Anthropic 117 | // instance ready for chat interactions. 118 | func NewAnthropic(apiKey, model, systemPrompt string, maxTokens int, params LLMParameters) Anthropic { 119 | return Anthropic{ 120 | apiKey: apiKey, 121 | model: model, 122 | maxTokens: maxTokens, 123 | systemPrompt: systemPrompt, 124 | params: params, 125 | client: &http.Client{}, 126 | } 127 | } 128 | 129 | // Chat streams responses from the Anthropic API for a given sequence of messages. It processes system 130 | // messages separately and returns an iterator that yields response chunks and potential errors. The 131 | // context can be used to cancel ongoing requests. Refer to models.Message for message structure details. 132 | func (a Anthropic) Chat( 133 | ctx context.Context, 134 | messages []models.Message, 135 | tools []mcp.Tool, 136 | ) iter.Seq2[models.Content, error] { 137 | return func(yield func(models.Content, error) bool) { 138 | resp, err := a.doRequest(ctx, messages, tools, true) 139 | if err != nil { 140 | if errors.Is(err, context.Canceled) { 141 | return 142 | } 143 | yield(models.Content{}, fmt.Errorf("error sending request: %w", err)) 144 | return 145 | } 146 | defer resp.Body.Close() 147 | 148 | isToolUse := false 149 | inputJSON := "" 150 | toolContent := models.Content{ 151 | Type: models.ContentTypeCallTool, 152 | } 153 | for ev, err := range sse.Read(resp.Body, nil) { 154 | if err != nil { 155 | yield(models.Content{}, fmt.Errorf("error reading response: %w", err)) 156 | return 157 | } 158 | switch ev.Type { 159 | case "error": 160 | var e anthropicError 161 | if err := json.Unmarshal([]byte(ev.Data), &e); err != nil { 162 | yield(models.Content{}, fmt.Errorf("error unmarshaling error: %w", err)) 163 | return 164 | } 165 | yield(models.Content{}, fmt.Errorf("anthropic error %s: %s", e.Error.Type, e.Error.Message)) 166 | return 167 | case "message_stop": 168 | return 169 | case "content_block_start": 170 | var res anthropicContentBlockStart 171 | if err := json.Unmarshal([]byte(ev.Data), &res); err != nil { 172 | yield(models.Content{}, fmt.Errorf("error unmarshaling block start: %w", err)) 173 | return 174 | } 175 | if res.ContentBlock.Type != "tool_use" { 176 | continue 177 | } 178 | isToolUse = true 179 | toolContent.ToolName = res.ContentBlock.Name 180 | toolContent.CallToolID = res.ContentBlock.ID 181 | case "content_block_delta": 182 | var res anthropicContentBlockDelta 183 | if err := json.Unmarshal([]byte(ev.Data), &res); err != nil { 184 | yield(models.Content{}, fmt.Errorf("error unmarshaling block delta: %w", err)) 185 | return 186 | } 187 | if isToolUse { 188 | inputJSON += res.Delta.PartialJSON 189 | continue 190 | } 191 | if !yield(models.Content{ 192 | Type: models.ContentTypeText, 193 | Text: res.Delta.Text, 194 | }, nil) { 195 | return 196 | } 197 | case "content_block_stop": 198 | if !isToolUse { 199 | continue 200 | } 201 | 202 | if inputJSON == "" { 203 | inputJSON = "{}" 204 | } 205 | toolContent.ToolInput = json.RawMessage(inputJSON) 206 | if !yield(toolContent, nil) { 207 | return 208 | } 209 | isToolUse = false 210 | inputJSON = "" 211 | default: 212 | } 213 | } 214 | } 215 | } 216 | 217 | // GenerateTitle generates a title for a given message using the Anthropic API. It sends a single message to the 218 | // Anthropic API and returns the first response content as the title. The context can be used to cancel ongoing 219 | // requests. 220 | func (a Anthropic) GenerateTitle(ctx context.Context, message string) (string, error) { 221 | messages := []models.Message{ 222 | { 223 | Role: "user", 224 | Contents: []models.Content{ 225 | { 226 | Type: models.ContentTypeText, 227 | Text: message, 228 | }, 229 | }, 230 | }, 231 | } 232 | resp, err := a.doRequest(ctx, messages, nil, false) 233 | if err != nil { 234 | return "", fmt.Errorf("error sending request: %w", err) 235 | } 236 | defer resp.Body.Close() 237 | 238 | if resp.StatusCode != http.StatusOK { 239 | body, _ := io.ReadAll(resp.Body) 240 | return "", fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)) 241 | } 242 | 243 | var msg anthropicMessage 244 | if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil { 245 | return "", fmt.Errorf("error decoding response: %w", err) 246 | } 247 | 248 | if len(msg.Content) == 0 { 249 | return "", fmt.Errorf("empty response content") 250 | } 251 | 252 | return msg.Content[0].Text, nil 253 | } 254 | 255 | func (a Anthropic) doRequest( 256 | ctx context.Context, 257 | messages []models.Message, 258 | tools []mcp.Tool, 259 | stream bool, 260 | ) (*http.Response, error) { 261 | msgs, err := a.convertMessages(messages) 262 | if err != nil { 263 | return nil, err 264 | } 265 | 266 | aTools := make([]anthropicTool, len(tools)) 267 | for i, tool := range tools { 268 | aTools[i] = anthropicTool{ 269 | Name: tool.Name, 270 | Description: tool.Description, 271 | InputSchema: tool.InputSchema, 272 | } 273 | } 274 | 275 | // Filter out invalid stop sequences by trimming whitespace 276 | // because antrhopic doesn't support whitespace in stop sequences 277 | var validStopSequences []string 278 | if a.params.Stop != nil { 279 | for _, seq := range a.params.Stop { 280 | // Trim all whitespace and check if anything remains 281 | trimmed := strings.TrimSpace(seq) 282 | if trimmed != "" { 283 | validStopSequences = append(validStopSequences, trimmed) 284 | } 285 | } 286 | } 287 | 288 | reqBody := anthropicChatRequest{ 289 | Model: a.model, 290 | Messages: msgs, 291 | System: a.systemPrompt, 292 | MaxTokens: a.maxTokens, 293 | Tools: aTools, 294 | Stream: stream, 295 | 296 | StopSequences: validStopSequences, 297 | Temperature: a.params.Temperature, 298 | TopK: a.params.TopK, 299 | TopP: a.params.TopP, 300 | } 301 | 302 | jsonBody, err := json.Marshal(reqBody) 303 | if err != nil { 304 | return nil, fmt.Errorf("error marshaling request: %w", err) 305 | } 306 | 307 | req, err := http.NewRequestWithContext(ctx, http.MethodPost, 308 | anthropicAPIEndpoint+"/messages", bytes.NewBuffer(jsonBody)) 309 | if err != nil { 310 | return nil, fmt.Errorf("error creating request: %w", err) 311 | } 312 | 313 | req.Header.Set("Content-Type", "application/json") 314 | req.Header.Set("x-api-key", a.apiKey) 315 | req.Header.Set("anthropic-version", "2023-06-01") 316 | 317 | resp, err := a.client.Do(req) 318 | if err != nil { 319 | return nil, err 320 | } 321 | if resp.StatusCode != http.StatusOK { 322 | body, _ := io.ReadAll(resp.Body) 323 | return nil, fmt.Errorf("unexpected status code: %d, body: %s, request: %s", resp.StatusCode, string(body), jsonBody) 324 | } 325 | 326 | return resp, nil 327 | } 328 | 329 | func (a Anthropic) convertMessages(messages []models.Message) ([]anthropicMessage, error) { 330 | var msgs []anthropicMessage 331 | 332 | for _, msg := range messages { 333 | if msg.Role == models.RoleUser { 334 | userMsg, err := a.processUserMessage(msg) 335 | if err != nil { 336 | return nil, err 337 | } 338 | msgs = append(msgs, userMsg) 339 | continue 340 | } 341 | 342 | otherMsgs, err := a.processOtherRoleMessage(msg) 343 | if err != nil { 344 | return nil, err 345 | } 346 | msgs = append(msgs, otherMsgs...) 347 | } 348 | 349 | return msgs, nil 350 | } 351 | 352 | func (a Anthropic) processUserMessage(msg models.Message) (anthropicMessage, error) { 353 | contents := make([]anthropicMessageContent, 0, len(msg.Contents)) 354 | 355 | for _, ct := range msg.Contents { 356 | switch ct.Type { 357 | case models.ContentTypeText: 358 | if ct.Text != "" { 359 | contents = append(contents, anthropicMessageContent{ 360 | Type: "text", 361 | Text: ct.Text, 362 | }) 363 | } 364 | case models.ContentTypeResource: 365 | contents = append(contents, a.processResourceContents(ct.ResourceContents)...) 366 | case models.ContentTypeCallTool, models.ContentTypeToolResult: 367 | return anthropicMessage{}, fmt.Errorf("content type %s is not supported for user messages", ct.Type) 368 | } 369 | } 370 | 371 | return anthropicMessage{ 372 | Role: string(msg.Role), 373 | Content: contents, 374 | }, nil 375 | } 376 | 377 | func (a Anthropic) processOtherRoleMessage(msg models.Message) ([]anthropicMessage, error) { 378 | var msgs []anthropicMessage 379 | contents := make([]anthropicMessageContent, 0, len(msg.Contents)) 380 | 381 | for _, ct := range msg.Contents { 382 | switch ct.Type { 383 | case models.ContentTypeText: 384 | if ct.Text != "" { 385 | contents = append(contents, anthropicMessageContent{ 386 | Type: "text", 387 | Text: ct.Text, 388 | }) 389 | } 390 | case models.ContentTypeCallTool: 391 | contents = append(contents, anthropicMessageContent{ 392 | Type: "tool_use", 393 | ID: ct.CallToolID, 394 | Name: ct.ToolName, 395 | Input: ct.ToolInput, 396 | }) 397 | msgs = append(msgs, anthropicMessage{ 398 | Role: string(msg.Role), 399 | Content: contents, 400 | }) 401 | contents = make([]anthropicMessageContent, 0, len(msg.Contents)) 402 | case models.ContentTypeToolResult: 403 | msgs = append(msgs, anthropicMessage{ 404 | Role: "user", 405 | Content: []anthropicMessageContent{ 406 | { 407 | Type: "tool_result", 408 | ToolUseID: ct.CallToolID, 409 | IsError: ct.CallToolFailed, 410 | Content: ct.ToolResult, 411 | }, 412 | }, 413 | }) 414 | case models.ContentTypeResource: 415 | return nil, fmt.Errorf("content type %s is not supported for assistant messages", ct.Type) 416 | } 417 | } 418 | 419 | if len(contents) > 0 { 420 | msgs = append(msgs, anthropicMessage{ 421 | Role: string(msg.Role), 422 | Content: contents, 423 | }) 424 | } 425 | 426 | return msgs, nil 427 | } 428 | 429 | func (a Anthropic) processResourceContents(resources []mcp.ResourceContents) []anthropicMessageContent { 430 | var contents []anthropicMessageContent 431 | 432 | for _, resource := range resources { 433 | switch { 434 | case strings.HasPrefix(resource.MimeType, "image/"): 435 | blobData := resource.Blob 436 | if !isBase64(blobData) { 437 | blobData = base64.StdEncoding.EncodeToString([]byte(blobData)) 438 | } 439 | contents = append(contents, anthropicMessageContent{ 440 | Type: "image", 441 | Source: &anthropicResourceContent{ 442 | Type: "base64", 443 | MediaType: resource.MimeType, 444 | Data: blobData, 445 | }, 446 | }) 447 | case resource.MimeType == "application/pdf": 448 | blobData := resource.Blob 449 | if !isBase64(blobData) { 450 | blobData = base64.StdEncoding.EncodeToString([]byte(blobData)) 451 | } 452 | contents = append(contents, anthropicMessageContent{ 453 | Type: "document", 454 | Source: &anthropicResourceContent{ 455 | Type: "base64", 456 | MediaType: resource.MimeType, 457 | Data: blobData, 458 | }, 459 | }) 460 | default: 461 | // Anthropic only supports images and PDFs, so treat others as text 462 | data := resource.Text 463 | if data == "" { 464 | data = resource.Blob 465 | } 466 | contents = append(contents, anthropicMessageContent{ 467 | Type: "text", 468 | Text: fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, data), 469 | }) 470 | } 471 | } 472 | 473 | return contents 474 | } 475 | -------------------------------------------------------------------------------- /internal/services/bolt.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "cmp" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "slices" 9 | "strconv" 10 | "strings" 11 | 12 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 13 | bolt "go.etcd.io/bbolt" 14 | ) 15 | 16 | // BoltDB implements the Store interface using a BoltDB backend for persistent storage of chats and 17 | // messages. It provides atomic operations for managing chat histories and their associated messages 18 | // through a key-value storage model. 19 | type BoltDB struct { 20 | db *bolt.DB 21 | } 22 | 23 | // NewBoltDB creates a new BoltDB instance with the specified file path. It initializes the database 24 | // with required buckets and returns an error if the database cannot be opened or initialized. The 25 | // database file is created with 0600 permissions if it doesn't exist. 26 | func NewBoltDB(path string) (BoltDB, error) { 27 | db, err := bolt.Open(path, 0600, nil) 28 | if err != nil { 29 | return BoltDB{}, fmt.Errorf("failed to open bolt db: %w", err) 30 | } 31 | 32 | err = db.Update(func(tx *bolt.Tx) error { 33 | _, err := tx.CreateBucketIfNotExists([]byte("chats")) 34 | return err 35 | }) 36 | 37 | return BoltDB{db: db}, err 38 | } 39 | 40 | func messageBucketName(chatID string) []byte { 41 | return []byte(fmt.Sprintf("chat-%s", chatID)) 42 | } 43 | 44 | // Chats retrieves all stored chat records from the database in reverse chronological order. It 45 | // returns a slice of Chat models or an error if the database operation fails. 46 | func (b BoltDB) Chats(context.Context) ([]models.Chat, error) { 47 | var chats []models.Chat 48 | err := b.db.View(func(tx *bolt.Tx) error { 49 | b := tx.Bucket([]byte("chats")) 50 | if b == nil { 51 | return nil 52 | } 53 | 54 | return b.ForEach(func(_, v []byte) error { 55 | var chat models.Chat 56 | if err := json.Unmarshal(v, &chat); err != nil { 57 | return fmt.Errorf("failed to unmarshal chat: %w", err) 58 | } 59 | chats = append(chats, chat) 60 | return nil 61 | }) 62 | }) 63 | if err != nil { 64 | return nil, err 65 | } 66 | slices.SortFunc(chats, func(a, b models.Chat) int { 67 | aID := 0 68 | bID := 0 69 | 70 | aIDArr := strings.Split(a.ID, "-") 71 | if len(aIDArr) > 1 { 72 | aID, _ = strconv.Atoi(aIDArr[0]) 73 | } 74 | bIDArr := strings.Split(b.ID, "-") 75 | if len(bIDArr) > 1 { 76 | bID, _ = strconv.Atoi(bIDArr[0]) 77 | } 78 | 79 | return cmp.Compare(bID, aID) 80 | }) 81 | return chats, nil 82 | } 83 | 84 | // AddChat stores a new chat record in the database and creates an associated message bucket. It 85 | // generates a unique ID for the chat by combining a sequence number with the chat's original ID, 86 | // and returns the new ID or an error if the operation fails. 87 | func (b BoltDB) AddChat(_ context.Context, chat models.Chat) (string, error) { 88 | var newID string 89 | err := b.db.Update(func(tx *bolt.Tx) error { 90 | b := tx.Bucket([]byte("chats")) 91 | if b == nil { 92 | return nil 93 | } 94 | 95 | idPrefix, err := b.NextSequence() 96 | if err != nil { 97 | return fmt.Errorf("failed to get next sequence: %w", err) 98 | } 99 | newID = fmt.Sprintf("%d-%s", idPrefix, chat.ID) 100 | chat.ID = newID 101 | 102 | _, err = tx.CreateBucketIfNotExists(messageBucketName(chat.ID)) 103 | if err != nil { 104 | return fmt.Errorf("failed to create message bucket: %w", err) 105 | } 106 | 107 | v, err := json.Marshal(chat) 108 | if err != nil { 109 | return fmt.Errorf("failed to marshal chat: %w", err) 110 | } 111 | 112 | return b.Put([]byte(newID), v) 113 | }) 114 | 115 | return newID, err 116 | } 117 | 118 | // UpdateChat modifies an existing chat record in the database. If the chat doesn't exist, the 119 | // operation is silently ignored. Returns an error if the marshaling or database operation fails. 120 | func (b BoltDB) UpdateChat(_ context.Context, chat models.Chat) error { 121 | return b.db.Update(func(tx *bolt.Tx) error { 122 | b := tx.Bucket([]byte("chats")) 123 | if b == nil { 124 | return nil 125 | } 126 | 127 | v := b.Get([]byte(chat.ID)) 128 | if v == nil { 129 | return nil 130 | } 131 | 132 | v, err := json.Marshal(chat) 133 | if err != nil { 134 | return fmt.Errorf("failed to marshal chat: %w", err) 135 | } 136 | 137 | return b.Put([]byte(chat.ID), v) 138 | }) 139 | } 140 | 141 | // Messages retrieves all messages associated with the specified chat ID. It returns the messages 142 | // in their stored order or an error if the database operation fails. 143 | func (b BoltDB) Messages(_ context.Context, chatID string) ([]models.Message, error) { 144 | var messages []models.Message 145 | err := b.db.View(func(tx *bolt.Tx) error { 146 | b := tx.Bucket(messageBucketName(chatID)) 147 | if b == nil { 148 | return nil 149 | } 150 | 151 | return b.ForEach(func(_, v []byte) error { 152 | var message models.Message 153 | if err := json.Unmarshal(v, &message); err != nil { 154 | return fmt.Errorf("failed to unmarshal message: %w", err) 155 | } 156 | messages = append(messages, message) 157 | return nil 158 | }) 159 | }) 160 | if err != nil { 161 | return nil, err 162 | } 163 | slices.SortFunc(messages, func(a, b models.Message) int { 164 | aID := 0 165 | bID := 0 166 | 167 | aIDArr := strings.Split(a.ID, "-") 168 | if len(aIDArr) > 1 { 169 | aID, _ = strconv.Atoi(aIDArr[0]) 170 | } 171 | bIDArr := strings.Split(b.ID, "-") 172 | if len(bIDArr) > 1 { 173 | bID, _ = strconv.Atoi(bIDArr[0]) 174 | } 175 | 176 | return cmp.Compare(aID, bID) 177 | }) 178 | return messages, nil 179 | } 180 | 181 | // AddMessage stores a new message in the specified chat's message bucket. It generates a unique 182 | // ID for the message by combining a sequence number with the message's original ID, and returns 183 | // the new ID or an error if the operation fails. 184 | func (b BoltDB) AddMessage(_ context.Context, chatID string, message models.Message) (string, error) { 185 | var newID string 186 | err := b.db.Update(func(tx *bolt.Tx) error { 187 | b := tx.Bucket(messageBucketName(chatID)) 188 | if b == nil { 189 | return nil 190 | } 191 | 192 | idPrefix, err := b.NextSequence() 193 | if err != nil { 194 | return fmt.Errorf("failed to get next sequence: %w", err) 195 | } 196 | newID = fmt.Sprintf("%d-%s", idPrefix, message.ID) 197 | message.ID = fmt.Sprintf("%d-%s", idPrefix, message.ID) 198 | 199 | v, err := json.Marshal(message) 200 | if err != nil { 201 | return fmt.Errorf("failed to marshal message: %w", err) 202 | } 203 | 204 | return b.Put([]byte(newID), v) 205 | }) 206 | 207 | return newID, err 208 | } 209 | 210 | // UpdateMessage modifies an existing message in the specified chat's message bucket. If the 211 | // message doesn't exist, the operation is silently ignored. Returns an error if the marshaling 212 | // or database operation fails. 213 | func (b BoltDB) UpdateMessage(_ context.Context, chatID string, message models.Message) error { 214 | return b.db.Update(func(tx *bolt.Tx) error { 215 | b := tx.Bucket(messageBucketName(chatID)) 216 | if b == nil { 217 | return nil 218 | } 219 | 220 | msgID := message.ID 221 | 222 | v, err := json.Marshal(message) 223 | if err != nil { 224 | return fmt.Errorf("failed to marshal message: %w", err) 225 | } 226 | 227 | return b.Put([]byte(msgID), v) 228 | }) 229 | } 230 | -------------------------------------------------------------------------------- /internal/services/helper.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import "encoding/base64" 4 | 5 | func isBase64(s string) bool { 6 | _, err := base64.StdEncoding.DecodeString(s) 7 | return err == nil 8 | } 9 | -------------------------------------------------------------------------------- /internal/services/ollama.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "iter" 10 | "log/slog" 11 | "net/http" 12 | "net/url" 13 | "slices" 14 | "strings" 15 | 16 | "github.com/MegaGrindStone/go-mcp" 17 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 18 | "github.com/ollama/ollama/api" 19 | ) 20 | 21 | // Ollama provides an implementation of the LLM interface for interacting with Ollama's language models. 22 | // It manages connections to an Ollama server instance and handles streaming chat completions. 23 | type Ollama struct { 24 | host string 25 | model string 26 | systemPrompt string 27 | 28 | params LLMParameters 29 | 30 | client *api.Client 31 | 32 | logger *slog.Logger 33 | } 34 | 35 | // NewOllama creates a new Ollama instance with the specified host URL and model name. The host 36 | // parameter should be a valid URL pointing to an Ollama server. If the provided host URL is invalid, 37 | // the function will panic. 38 | func NewOllama(host, model, systemPrompt string, params LLMParameters, logger *slog.Logger) Ollama { 39 | u, err := url.Parse(host) 40 | if err != nil { 41 | panic(err) 42 | } 43 | 44 | return Ollama{ 45 | host: host, 46 | model: model, 47 | systemPrompt: systemPrompt, 48 | params: params, 49 | client: api.NewClient(u, &http.Client{}), 50 | logger: logger.With(slog.String("module", "ollama")), 51 | } 52 | } 53 | 54 | func ollamaMessages(messages []models.Message) ([]api.Message, error) { 55 | msgs := make([]api.Message, 0, len(messages)) 56 | for _, msg := range messages { 57 | if msg.Role == models.RoleUser { 58 | // Process user messages with potential multiple contents 59 | content := "" 60 | var images []api.ImageData 61 | 62 | for _, ct := range msg.Contents { 63 | switch ct.Type { 64 | case models.ContentTypeText: 65 | content += ct.Text 66 | case models.ContentTypeResource: 67 | // Process resources (extract images, convert others to text) 68 | textContent, extractedImages, err := processResourceContentsForOllama(ct.ResourceContents) 69 | if err != nil { 70 | return nil, err 71 | } 72 | content += textContent 73 | images = append(images, extractedImages...) 74 | case models.ContentTypeCallTool, models.ContentTypeToolResult: 75 | return nil, fmt.Errorf("content type %s is not supported for user messages", ct.Type) 76 | } 77 | } 78 | 79 | // Create user message with combined content and images 80 | msgs = append(msgs, api.Message{ 81 | Role: string(msg.Role), 82 | Content: content, 83 | Images: images, 84 | }) 85 | continue 86 | } 87 | 88 | for _, ct := range msg.Contents { 89 | switch ct.Type { 90 | case models.ContentTypeText: 91 | if ct.Text != "" { 92 | msgs = append(msgs, api.Message{ 93 | Role: string(msg.Role), 94 | Content: ct.Text, 95 | }) 96 | } 97 | case models.ContentTypeCallTool: 98 | args := make(map[string]any) 99 | if err := json.Unmarshal(ct.ToolInput, &args); err != nil { 100 | return nil, fmt.Errorf("error unmarshaling tool input: %w", err) 101 | } 102 | msgs = append(msgs, api.Message{ 103 | Role: string(msg.Role), 104 | ToolCalls: []api.ToolCall{ 105 | { 106 | Function: api.ToolCallFunction{ 107 | Name: ct.ToolName, 108 | Arguments: args, 109 | }, 110 | }, 111 | }, 112 | }) 113 | case models.ContentTypeToolResult: 114 | msgs = append(msgs, api.Message{ 115 | Role: "tool", 116 | Content: string(ct.ToolResult), 117 | }) 118 | case models.ContentTypeResource: 119 | return nil, fmt.Errorf("content type %s is not supported for assistant messages", ct.Type) 120 | } 121 | } 122 | } 123 | return msgs, nil 124 | } 125 | 126 | func processResourceContentsForOllama(resources []mcp.ResourceContents) (string, []api.ImageData, error) { 127 | var textContent string 128 | var images []api.ImageData 129 | 130 | for _, resource := range resources { 131 | switch { 132 | case strings.HasPrefix(resource.MimeType, "image/"): 133 | // Process images - convert to ImageData for Ollama 134 | imageData, err := processImageForOllama(resource) 135 | if err != nil { 136 | return "", nil, err 137 | } 138 | images = append(images, imageData) 139 | default: 140 | // Convert other resources to text descriptions 141 | description := convertResourceToText(resource) 142 | if textContent != "" && description != "" { 143 | textContent += "\n\n" 144 | } 145 | textContent += description 146 | } 147 | } 148 | 149 | return textContent, images, nil 150 | } 151 | 152 | func processImageForOllama(resource mcp.ResourceContents) (api.ImageData, error) { 153 | // If blob is already binary data, use it directly 154 | if !isBase64(resource.Blob) { 155 | return api.ImageData(resource.Blob), nil 156 | } 157 | 158 | // Decode base64 data 159 | decodedData, err := base64.StdEncoding.DecodeString(resource.Blob) 160 | if err != nil { 161 | return nil, fmt.Errorf("error decoding base64 image: %w", err) 162 | } 163 | 164 | return api.ImageData(decodedData), nil 165 | } 166 | 167 | func convertResourceToText(resource mcp.ResourceContents) string { 168 | if resource.Text != "" { 169 | return fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, resource.Text) 170 | } 171 | 172 | // For binary data that isn't an image (e.g., PDF), provide base64 data 173 | if resource.Blob != "" { 174 | data := resource.Blob 175 | if !isBase64(resource.Blob) { 176 | data = base64.StdEncoding.EncodeToString([]byte(resource.Blob)) 177 | } 178 | return fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, data) 179 | } 180 | 181 | return "" 182 | } 183 | 184 | // Chat implements the LLM interface by streaming responses from the Ollama model. It accepts a context 185 | // for cancellation and a slice of messages representing the conversation history. The function returns 186 | // an iterator that yields response chunks as strings and potential errors. The response is streamed 187 | // incrementally, allowing for real-time processing of model outputs. 188 | func (o Ollama) Chat( 189 | ctx context.Context, 190 | messages []models.Message, 191 | tools []mcp.Tool, 192 | ) iter.Seq2[models.Content, error] { 193 | return func(yield func(models.Content, error) bool) { 194 | msgs, err := ollamaMessages(messages) 195 | if err != nil { 196 | yield(models.Content{}, fmt.Errorf("error creating ollama messages: %w", err)) 197 | return 198 | } 199 | 200 | msgs = slices.Insert(msgs, 0, api.Message{ 201 | Role: "system", 202 | Content: o.systemPrompt, 203 | }) 204 | 205 | oTools := make([]api.Tool, len(tools)) 206 | for i, tool := range tools { 207 | var params struct { 208 | Type string `json:"type"` 209 | Required []string `json:"required"` 210 | Properties map[string]struct { 211 | Type string `json:"type"` 212 | Description string `json:"description"` 213 | Enum []string `json:"enum,omitempty"` 214 | } `json:"properties"` 215 | } 216 | if err := json.Unmarshal([]byte(tool.InputSchema), ¶ms); err != nil { 217 | yield(models.Content{}, fmt.Errorf("error unmarshaling tool input schema: %w", err)) 218 | return 219 | } 220 | oTool := api.Tool{ 221 | Type: "function", 222 | Function: api.ToolFunction{ 223 | Name: tool.Name, 224 | Description: tool.Description, 225 | Parameters: params, 226 | }, 227 | } 228 | 229 | if err := json.Unmarshal([]byte(tool.InputSchema), &oTool.Function.Parameters); err != nil { 230 | yield(models.Content{}, fmt.Errorf("error unmarshaling tool input schema: %w", err)) 231 | return 232 | } 233 | oTools[i] = oTool 234 | } 235 | 236 | req := o.chatRequest(msgs, oTools, true) 237 | 238 | reqJSON, err := json.Marshal(req) 239 | if err == nil { 240 | o.logger.Debug("Request", slog.String("req", string(reqJSON))) 241 | } 242 | 243 | ctx, cancel := context.WithCancel(ctx) 244 | defer cancel() 245 | 246 | if err := o.client.Chat(ctx, &req, func(res api.ChatResponse) error { 247 | if res.Message.Content != "" { 248 | if !yield(models.Content{ 249 | Type: models.ContentTypeText, 250 | Text: res.Message.Content, 251 | }, nil) { 252 | cancel() 253 | return nil 254 | } 255 | } 256 | if len(res.Message.ToolCalls) > 0 { 257 | args, err := json.Marshal(res.Message.ToolCalls[0].Function.Arguments) 258 | if err != nil { 259 | return fmt.Errorf("error marshaling tool arguments: %w", err) 260 | } 261 | if len(res.Message.ToolCalls) > 1 { 262 | o.logger.Warn("Received multiples tool call, but only the first one is supported", 263 | slog.Int("count", len(res.Message.ToolCalls)), 264 | slog.String("toolCalls", fmt.Sprintf("%+v", res.Message.ToolCalls)), 265 | ) 266 | } 267 | if !yield(models.Content{ 268 | Type: models.ContentTypeCallTool, 269 | ToolName: res.Message.ToolCalls[0].Function.Name, 270 | ToolInput: args, 271 | }, nil) { 272 | cancel() 273 | } 274 | } 275 | return nil 276 | }); err != nil { 277 | if errors.Is(err, context.Canceled) { 278 | return 279 | } 280 | yield(models.Content{}, fmt.Errorf("error sending request: %w", err)) 281 | return 282 | } 283 | } 284 | } 285 | 286 | // GenerateTitle generates a title for a given message using the Ollama API. It sends a single message to the 287 | // Ollama API and returns the first response content as the title. The context can be used to cancel ongoing 288 | // requests. 289 | func (o Ollama) GenerateTitle(ctx context.Context, message string) (string, error) { 290 | msgs := []api.Message{ 291 | { 292 | Role: "system", 293 | Content: o.systemPrompt, 294 | }, 295 | { 296 | Role: "user", 297 | Content: message, 298 | }, 299 | } 300 | 301 | req := o.chatRequest(msgs, nil, false) 302 | 303 | var title string 304 | 305 | if err := o.client.Chat(ctx, &req, func(res api.ChatResponse) error { 306 | title = res.Message.Content 307 | return nil 308 | }); err != nil { 309 | return "", fmt.Errorf("error sending request: %w", err) 310 | } 311 | 312 | return title, nil 313 | } 314 | 315 | func (o Ollama) chatRequest(messages []api.Message, tools []api.Tool, stream bool) api.ChatRequest { 316 | req := api.ChatRequest{ 317 | Model: o.model, 318 | Messages: messages, 319 | Stream: &stream, 320 | Tools: tools, 321 | } 322 | 323 | opts := make(map[string]any) 324 | 325 | if o.params.Temperature != nil { 326 | opts["temperature"] = *o.params.Temperature 327 | } 328 | if o.params.Seed != nil { 329 | opts["seed"] = *o.params.Seed 330 | } 331 | if o.params.Stop != nil { 332 | opts["stop"] = o.params.Stop 333 | } 334 | if o.params.TopK != nil { 335 | opts["top_k"] = *o.params.TopK 336 | } 337 | if o.params.TopP != nil { 338 | opts["top_p"] = *o.params.TopP 339 | } 340 | if o.params.MinP != nil { 341 | opts["min_p"] = *o.params.MinP 342 | } 343 | 344 | req.Options = opts 345 | 346 | return req 347 | } 348 | -------------------------------------------------------------------------------- /internal/services/openai.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "iter" 11 | "log/slog" 12 | "slices" 13 | "strings" 14 | 15 | "github.com/MegaGrindStone/go-mcp" 16 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 17 | goopenai "github.com/sashabaranov/go-openai" 18 | ) 19 | 20 | // OpenAI provides an implementation of the LLM interface for interacting with OpenAI's language models. 21 | type OpenAI struct { 22 | model string 23 | systemPrompt string 24 | 25 | params LLMParameters 26 | 27 | client *goopenai.Client 28 | 29 | logger *slog.Logger 30 | } 31 | 32 | // NewOpenAI creates a new OpenAI instance with the specified API key, base URL, model name, and system prompt. 33 | func NewOpenAI( 34 | apiKey, model, systemPrompt, endpoint string, 35 | params LLMParameters, 36 | logger *slog.Logger, 37 | ) OpenAI { 38 | var client *goopenai.Client 39 | if endpoint == "" { 40 | client = goopenai.NewClient(apiKey) 41 | } else { 42 | cfg := goopenai.DefaultConfig(apiKey) 43 | cfg.BaseURL = endpoint 44 | client = goopenai.NewClientWithConfig(cfg) 45 | } 46 | 47 | return OpenAI{ 48 | model: model, 49 | systemPrompt: systemPrompt, 50 | params: params, 51 | client: client, 52 | logger: logger.With(slog.String("module", "openai")), 53 | } 54 | } 55 | 56 | func openAIMessages(messages []models.Message) ([]goopenai.ChatCompletionMessage, error) { 57 | msgs := make([]goopenai.ChatCompletionMessage, 0, len(messages)) 58 | for _, msg := range messages { 59 | if msg.Role == models.RoleUser { 60 | // Process user message with potential resources 61 | userMsg, err := processUserMessageForOpenAI(msg) 62 | if err != nil { 63 | return nil, err 64 | } 65 | msgs = append(msgs, userMsg) 66 | continue 67 | } 68 | 69 | // Handle assistant and other roles 70 | for _, ct := range msg.Contents { 71 | switch ct.Type { 72 | case models.ContentTypeText: 73 | if ct.Text != "" { 74 | msgs = append(msgs, goopenai.ChatCompletionMessage{ 75 | Role: string(msg.Role), 76 | Content: ct.Text, 77 | }) 78 | } 79 | case models.ContentTypeCallTool: 80 | msgs = append(msgs, goopenai.ChatCompletionMessage{ 81 | Role: string(msg.Role), 82 | ToolCalls: []goopenai.ToolCall{ 83 | { 84 | Type: "function", 85 | ID: ct.CallToolID, 86 | Function: goopenai.FunctionCall{ 87 | Name: ct.ToolName, 88 | Arguments: string(ct.ToolInput), 89 | }, 90 | }, 91 | }, 92 | }) 93 | case models.ContentTypeToolResult: 94 | msgs = append(msgs, goopenai.ChatCompletionMessage{ 95 | Role: "tool", 96 | Content: string(ct.ToolResult), 97 | ToolCallID: ct.CallToolID, 98 | }) 99 | case models.ContentTypeResource: 100 | return nil, fmt.Errorf("content type %s is not supported for assistant messages", ct.Type) 101 | } 102 | } 103 | } 104 | return msgs, nil 105 | } 106 | 107 | func processUserMessageForOpenAI(msg models.Message) (goopenai.ChatCompletionMessage, error) { 108 | // Check if we have any resource contents 109 | hasResources := false 110 | for _, ct := range msg.Contents { 111 | if ct.Type == models.ContentTypeResource { 112 | hasResources = true 113 | break 114 | } 115 | } 116 | 117 | // If no resources, combine all text contents 118 | if !hasResources { 119 | var textParts []string 120 | for _, ct := range msg.Contents { 121 | if ct.Type == models.ContentTypeText && ct.Text != "" { 122 | textParts = append(textParts, ct.Text) 123 | } 124 | } 125 | return goopenai.ChatCompletionMessage{ 126 | Role: string(msg.Role), 127 | Content: strings.Join(textParts, "\n\n"), 128 | }, nil 129 | } 130 | 131 | // If we have resources, we need to use MultiContent 132 | var contentParts []goopenai.ChatMessagePart 133 | textContent := "" 134 | 135 | for _, ct := range msg.Contents { 136 | switch ct.Type { 137 | case models.ContentTypeText: 138 | if ct.Text != "" { 139 | if textContent != "" { 140 | textContent += "\n\n" 141 | } 142 | textContent += ct.Text 143 | } 144 | case models.ContentTypeResource: 145 | for _, resource := range ct.ResourceContents { 146 | if strings.HasPrefix(resource.MimeType, "image/") { 147 | // Process image for OpenAI 148 | imageURL := processImageForOpenAI(resource) 149 | 150 | contentParts = append(contentParts, goopenai.ChatMessagePart{ 151 | Type: goopenai.ChatMessagePartTypeImageURL, 152 | ImageURL: &goopenai.ChatMessageImageURL{ 153 | URL: imageURL, 154 | Detail: goopenai.ImageURLDetailAuto, 155 | }, 156 | }) 157 | } else { 158 | // For non-image resources, convert to text 159 | resourceText := convertResourceToTextForOpenAI(resource) 160 | if resourceText != "" { 161 | if textContent != "" { 162 | textContent += "\n\n" 163 | } 164 | textContent += resourceText 165 | } 166 | } 167 | } 168 | case models.ContentTypeCallTool, models.ContentTypeToolResult: 169 | return goopenai.ChatCompletionMessage{}, fmt.Errorf("content type %s is not supported for user messages", ct.Type) 170 | } 171 | } 172 | 173 | // Add text content as a part if we have any 174 | if textContent != "" { 175 | contentParts = append(contentParts, goopenai.ChatMessagePart{ 176 | Type: goopenai.ChatMessagePartTypeText, 177 | Text: textContent, 178 | }) 179 | } 180 | 181 | return goopenai.ChatCompletionMessage{ 182 | Role: string(msg.Role), 183 | MultiContent: contentParts, 184 | }, nil 185 | } 186 | 187 | func processImageForOpenAI(resource mcp.ResourceContents) string { 188 | // Format should be: data:image/jpeg;base64, 189 | mimeType := resource.MimeType 190 | if mimeType == "" { 191 | mimeType = "image/png" // Default 192 | } 193 | 194 | var imageData string 195 | if isBase64(resource.Blob) { 196 | imageData = resource.Blob 197 | } else { 198 | imageData = base64.StdEncoding.EncodeToString([]byte(resource.Blob)) 199 | } 200 | 201 | return fmt.Sprintf("data:%s;base64,%s", mimeType, imageData) 202 | } 203 | 204 | func convertResourceToTextForOpenAI(resource mcp.ResourceContents) string { 205 | if resource.Text != "" { 206 | return fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, resource.Text) 207 | } 208 | 209 | // For binary data that isn't an image (e.g., PDF), provide base64 data 210 | if resource.Blob != "" { 211 | data := resource.Blob 212 | if !isBase64(resource.Blob) { 213 | data = base64.StdEncoding.EncodeToString([]byte(resource.Blob)) 214 | } 215 | return fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, data) 216 | } 217 | 218 | return "" 219 | } 220 | 221 | // Chat is a wrapper around the OpenAI chat completion API. 222 | func (o OpenAI) Chat( 223 | ctx context.Context, 224 | messages []models.Message, 225 | tools []mcp.Tool, 226 | ) iter.Seq2[models.Content, error] { 227 | return func(yield func(models.Content, error) bool) { 228 | msgs, err := openAIMessages(messages) 229 | if err != nil { 230 | yield(models.Content{}, fmt.Errorf("error creating ollama messages: %w", err)) 231 | return 232 | } 233 | 234 | msgs = slices.Insert(msgs, 0, goopenai.ChatCompletionMessage{ 235 | Role: "system", 236 | Content: o.systemPrompt, 237 | }) 238 | 239 | oTools := make([]goopenai.Tool, len(tools)) 240 | for i, tool := range tools { 241 | oTools[i] = goopenai.Tool{ 242 | Type: "function", 243 | Function: &goopenai.FunctionDefinition{ 244 | Name: tool.Name, 245 | Description: tool.Description, 246 | Parameters: tool.InputSchema, 247 | }, 248 | } 249 | } 250 | 251 | req := o.chatRequest(msgs, oTools, true) 252 | 253 | reqJSON, err := json.Marshal(req) 254 | if err == nil { 255 | o.logger.Debug("Request", slog.String("req", string(reqJSON))) 256 | } 257 | 258 | ctx, cancel := context.WithCancel(ctx) 259 | defer cancel() 260 | 261 | stream, err := o.client.CreateChatCompletionStream(ctx, req) 262 | if err != nil { 263 | yield(models.Content{}, fmt.Errorf("error sending request: %w", err)) 264 | return 265 | } 266 | 267 | toolUse := false 268 | toolArgs := "" 269 | callToolContent := models.Content{ 270 | Type: models.ContentTypeCallTool, 271 | } 272 | for { 273 | response, err := stream.Recv() 274 | if err != nil { 275 | if errors.Is(err, io.EOF) { 276 | break 277 | } 278 | if errors.Is(err, context.Canceled) { 279 | return 280 | } 281 | yield(models.Content{}, fmt.Errorf("error receiving response: %w", err)) 282 | return 283 | } 284 | 285 | if len(response.Choices) == 0 { 286 | continue 287 | } 288 | 289 | res := response.Choices[0].Delta 290 | if res.Content != "" { 291 | if !yield(models.Content{ 292 | Type: models.ContentTypeText, 293 | Text: res.Content, 294 | }, nil) { 295 | return 296 | } 297 | } 298 | if len(res.ToolCalls) > 0 { 299 | if len(res.ToolCalls) > 1 { 300 | o.logger.Warn("Received multiples tool call, but only the first one is supported", 301 | slog.Int("count", len(res.ToolCalls)), 302 | slog.String("toolCalls", fmt.Sprintf("%+v", res.ToolCalls)), 303 | ) 304 | } 305 | toolArgs += res.ToolCalls[0].Function.Arguments 306 | if !toolUse { 307 | toolUse = true 308 | callToolContent.ToolName = res.ToolCalls[0].Function.Name 309 | callToolContent.CallToolID = res.ToolCalls[0].ID 310 | } 311 | } 312 | } 313 | if toolUse { 314 | if toolArgs == "" { 315 | toolArgs = "{}" 316 | } 317 | o.logger.Debug("Call Tool", 318 | slog.String("name", callToolContent.ToolName), 319 | slog.String("args", toolArgs), 320 | ) 321 | callToolContent.ToolInput = json.RawMessage(toolArgs) 322 | yield(callToolContent, nil) 323 | } 324 | } 325 | } 326 | 327 | // GenerateTitle is a wrapper around the OpenAI chat completion API. 328 | func (o OpenAI) GenerateTitle(ctx context.Context, message string) (string, error) { 329 | msgs := []goopenai.ChatCompletionMessage{ 330 | { 331 | Role: goopenai.ChatMessageRoleSystem, 332 | Content: o.systemPrompt, 333 | }, 334 | { 335 | Role: goopenai.ChatMessageRoleUser, 336 | Content: message, 337 | }, 338 | } 339 | 340 | req := o.chatRequest(msgs, nil, false) 341 | 342 | resp, err := o.client.CreateChatCompletion(ctx, req) 343 | if err != nil { 344 | return "", fmt.Errorf("error sending request: %w", err) 345 | } 346 | 347 | if len(resp.Choices) == 0 { 348 | return "", errors.New("no choices found") 349 | } 350 | 351 | return resp.Choices[0].Message.Content, nil 352 | } 353 | 354 | func (o OpenAI) chatRequest( 355 | messages []goopenai.ChatCompletionMessage, 356 | tools []goopenai.Tool, 357 | stream bool, 358 | ) goopenai.ChatCompletionRequest { 359 | req := goopenai.ChatCompletionRequest{ 360 | Model: o.model, 361 | Messages: messages, 362 | Stream: stream, 363 | Tools: tools, 364 | } 365 | 366 | if o.params.Temperature != nil { 367 | req.Temperature = *o.params.Temperature 368 | } 369 | if o.params.TopP != nil { 370 | req.TopP = *o.params.TopP 371 | } 372 | if o.params.Stop != nil { 373 | req.Stop = o.params.Stop 374 | } 375 | if o.params.PresencePenalty != nil { 376 | req.PresencePenalty = *o.params.PresencePenalty 377 | } 378 | if o.params.Seed != nil { 379 | req.Seed = o.params.Seed 380 | } 381 | if o.params.FrequencyPenalty != nil { 382 | req.FrequencyPenalty = *o.params.FrequencyPenalty 383 | } 384 | if o.params.LogitBias != nil { 385 | req.LogitBias = o.params.LogitBias 386 | } 387 | if o.params.Logprobs != nil { 388 | req.LogProbs = *o.params.Logprobs 389 | } 390 | if o.params.TopLogprobs != nil { 391 | req.TopLogProbs = *o.params.TopLogprobs 392 | } 393 | 394 | return req 395 | } 396 | -------------------------------------------------------------------------------- /internal/services/openrouter.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/base64" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "iter" 12 | "log/slog" 13 | "net/http" 14 | "slices" 15 | "strings" 16 | 17 | "github.com/MegaGrindStone/go-mcp" 18 | "github.com/MegaGrindStone/mcp-web-ui/internal/models" 19 | "github.com/tmaxmax/go-sse" 20 | ) 21 | 22 | // OpenRouter provides an implementation of the LLM interface for interacting with OpenRouter's language models. 23 | type OpenRouter struct { 24 | apiKey string 25 | model string 26 | systemPrompt string 27 | 28 | params LLMParameters 29 | 30 | client *http.Client 31 | 32 | logger *slog.Logger 33 | } 34 | 35 | type openRouterChatRequest struct { 36 | Model string `json:"model"` 37 | Messages []openRouterMessageRequest `json:"messages"` 38 | Tools []openRouterTool `json:"tools,omitempty"` 39 | Stream bool `json:"stream"` 40 | 41 | Temperature *float32 `json:"temperature,omitempty"` 42 | TopP *float32 `json:"top_p,omitempty"` 43 | TopK *int `json:"top_k,omitempty"` 44 | FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"` 45 | PresencePenalty *float32 `json:"presence_penalty,omitempty"` 46 | RepetitionPenalty *float32 `json:"repetition_penalty,omitempty"` 47 | MinP *float32 `json:"min_p,omitempty"` 48 | TopA *float32 `json:"top_a,omitempty"` 49 | Seed *int `json:"seed,omitempty"` 50 | MaxTokens *int `json:"max_tokens,omitempty"` 51 | LogitBias map[string]int `json:"logit_bias,omitempty"` 52 | Logprobs *bool `json:"logprobs,omitempty"` 53 | TopLogprobs *int `json:"top_logprobs,omitempty"` 54 | Stop []string `json:"stop,omitempty"` 55 | IncludeReasoning *bool `json:"include_reasoning,omitempty"` 56 | } 57 | 58 | type openRouterMessageRequest struct { 59 | Role string `json:"role"` 60 | Content any `json:"content,omitempty"` 61 | ToolCalls []openRouterToolCalls `json:"tool_calls,omitempty"` 62 | ToolCallID string `json:"tool_call_id,omitempty"` 63 | } 64 | 65 | type openRouterUserContent struct { 66 | Type string `json:"type"` 67 | 68 | // For text type. 69 | Text string `json:"text,omitempty"` 70 | 71 | // For image_url type. 72 | ImageURL *openRouterImageContent `json:"image_url,omitempty"` 73 | } 74 | 75 | type openRouterImageContent struct { 76 | URL string `json:"url"` 77 | } 78 | 79 | type openRouterMessage struct { 80 | Role string `json:"role"` 81 | Content string `json:"content,omitempty"` 82 | ToolCalls []openRouterToolCalls `json:"tool_calls,omitempty"` 83 | ToolCallID string `json:"tool_call_id,omitempty"` 84 | } 85 | 86 | type openRouterToolCalls struct { 87 | ID string `json:"id"` 88 | Type string `json:"type"` 89 | Function openRouterToolCallFunction `json:"function"` 90 | } 91 | 92 | type openRouterToolCallFunction struct { 93 | Name string `json:"name"` 94 | Arguments string `json:"arguments"` 95 | } 96 | 97 | type openRouterTool struct { 98 | Type string `json:"type"` 99 | Function openRouterToolFunction `json:"function"` 100 | } 101 | 102 | type openRouterToolFunction struct { 103 | Name string `json:"name"` 104 | Description string `json:"description"` 105 | Parameters json.RawMessage `json:"parameters,omitempty"` 106 | } 107 | 108 | type openRouterStreamingResponse struct { 109 | Choices []openRouterStreamingChoice `json:"choices"` 110 | } 111 | 112 | type openRouterStreamingErrorResponse struct { 113 | Error openRouterStreamingError `json:"error"` 114 | } 115 | 116 | type openRouterStreamingChoice struct { 117 | Delta openRouterMessage `json:"delta"` 118 | FinishReason string `json:"finish_reason"` 119 | NativeFinishReason string `json:"native_finish_reason"` 120 | } 121 | 122 | type openRouterStreamingError struct { 123 | Code int `json:"code"` 124 | Message string `json:"message"` 125 | Metadata map[string]any `json:"metadata"` 126 | } 127 | 128 | type openRouterResponse struct { 129 | Choices []openRouterChoice `json:"choices"` 130 | } 131 | 132 | type openRouterChoice struct { 133 | Message openRouterMessage `json:"message"` 134 | } 135 | 136 | const ( 137 | openRouterAPIEndpoint = "https://openrouter.ai/api/v1" 138 | 139 | openRouterRequestContentTypeText = "text" 140 | openRouterRequestContentTypeImageURL = "image_url" 141 | ) 142 | 143 | // NewOpenRouter creates a new OpenRouter instance with the specified API key, model name, and system prompt. 144 | func NewOpenRouter(apiKey, model, systemPrompt string, params LLMParameters, logger *slog.Logger) OpenRouter { 145 | return OpenRouter{ 146 | apiKey: apiKey, 147 | model: model, 148 | systemPrompt: systemPrompt, 149 | params: params, 150 | client: &http.Client{}, 151 | logger: logger.With(slog.String("module", "openrouter")), 152 | } 153 | } 154 | 155 | // Chat streams responses from the OpenRouter API for a given sequence of messages. It processes system 156 | // messages separately and returns an iterator that yields response chunks and potential errors. The 157 | // context can be used to cancel ongoing requests. Refer to models.Message for message structure details. 158 | func (o OpenRouter) Chat( 159 | ctx context.Context, 160 | messages []models.Message, 161 | tools []mcp.Tool, 162 | ) iter.Seq2[models.Content, error] { 163 | return func(yield func(models.Content, error) bool) { 164 | resp, err := o.doRequest(ctx, messages, tools, true) 165 | if err != nil { 166 | if errors.Is(err, context.Canceled) { 167 | return 168 | } 169 | yield(models.Content{}, fmt.Errorf("error sending request: %w", err)) 170 | return 171 | } 172 | defer resp.Body.Close() 173 | 174 | toolUse := false 175 | toolArgs := "" 176 | callToolContent := models.Content{ 177 | Type: models.ContentTypeCallTool, 178 | } 179 | for ev, err := range sse.Read(resp.Body, nil) { 180 | if err != nil { 181 | yield(models.Content{}, fmt.Errorf("error reading response: %w", err)) 182 | return 183 | } 184 | 185 | o.logger.Debug("Received event", 186 | slog.String("event", ev.Data), 187 | ) 188 | 189 | if ev.Data == "[DONE]" { 190 | break 191 | } 192 | 193 | // Before we try to unmarshall response to the expected format, we try to unmarshall it to 194 | // the streaming error format. 195 | var resErr openRouterStreamingErrorResponse 196 | if err := json.Unmarshal([]byte(ev.Data), &resErr); err == nil { 197 | if resErr.Error.Code != 0 { 198 | o.logger.Error("Received streaming error response", 199 | slog.String("error", fmt.Sprintf("%+v", resErr)), 200 | ) 201 | yield(models.Content{}, fmt.Errorf("openrouter error: %+v", resErr.Error)) 202 | return 203 | } 204 | } 205 | 206 | var res openRouterStreamingResponse 207 | if err := json.Unmarshal([]byte(ev.Data), &res); err != nil { 208 | yield(models.Content{}, fmt.Errorf("error unmarshaling response: %w", err)) 209 | return 210 | } 211 | 212 | if len(res.Choices) == 0 { 213 | continue 214 | } 215 | 216 | choice := res.Choices[0] 217 | 218 | if len(choice.Delta.ToolCalls) > 0 { 219 | if len(choice.Delta.ToolCalls) > 1 { 220 | o.logger.Warn("Received multiples tool call, but only the first one is supported", 221 | slog.Int("count", len(choice.Delta.ToolCalls)), 222 | slog.String("toolCalls", fmt.Sprintf("%+v", choice.Delta.ToolCalls)), 223 | ) 224 | } 225 | toolArgs += choice.Delta.ToolCalls[0].Function.Arguments 226 | if !toolUse { 227 | toolUse = true 228 | callToolContent.ToolName = choice.Delta.ToolCalls[0].Function.Name 229 | callToolContent.CallToolID = choice.Delta.ToolCalls[0].ID 230 | } 231 | } 232 | 233 | if choice.Delta.Content != "" { 234 | if !yield(models.Content{ 235 | Type: models.ContentTypeText, 236 | Text: choice.Delta.Content, 237 | }, nil) { 238 | break 239 | } 240 | } 241 | } 242 | if toolUse { 243 | if toolArgs == "" { 244 | toolArgs = "{}" 245 | } 246 | o.logger.Debug("Call Tool", 247 | slog.String("name", callToolContent.ToolName), 248 | slog.String("args", toolArgs), 249 | ) 250 | callToolContent.ToolInput = json.RawMessage(toolArgs) 251 | yield(callToolContent, nil) 252 | } 253 | } 254 | } 255 | 256 | // GenerateTitle generates a title for a given message using the OpenRouter API. It sends a single message to the 257 | // OpenRouter API and returns the first response content as the title. The context can be used to cancel ongoing 258 | // requests. 259 | func (o OpenRouter) GenerateTitle(ctx context.Context, message string) (string, error) { 260 | msgs := []models.Message{ 261 | { 262 | Role: models.RoleUser, 263 | Contents: []models.Content{ 264 | { 265 | Type: models.ContentTypeText, 266 | Text: message, 267 | }, 268 | }, 269 | }, 270 | } 271 | 272 | resp, err := o.doRequest(ctx, msgs, nil, false) 273 | if err != nil { 274 | return "", fmt.Errorf("error sending request: %w", err) 275 | } 276 | defer resp.Body.Close() 277 | 278 | if resp.StatusCode != http.StatusOK { 279 | body, _ := io.ReadAll(resp.Body) 280 | return "", fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)) 281 | } 282 | 283 | var res openRouterResponse 284 | if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { 285 | return "", fmt.Errorf("error decoding response: %w", err) 286 | } 287 | 288 | if len(res.Choices) == 0 { 289 | return "", errors.New("no choices found") 290 | } 291 | 292 | return res.Choices[0].Message.Content, nil 293 | } 294 | 295 | func (o OpenRouter) doRequest( 296 | ctx context.Context, 297 | messages []models.Message, 298 | tools []mcp.Tool, 299 | stream bool, 300 | ) (*http.Response, error) { 301 | msgs := make([]openRouterMessageRequest, 0, len(messages)) 302 | // Process messages 303 | for _, msg := range messages { 304 | if msg.Role == models.RoleUser { 305 | // Process user message with potential resources 306 | userMsgs, err := o.processUserMessageForOpenRouter(msg) 307 | if err != nil { 308 | return nil, err 309 | } 310 | msgs = append(msgs, userMsgs) 311 | continue 312 | } 313 | 314 | // Handle assistant and tool messages 315 | for _, ct := range msg.Contents { 316 | switch ct.Type { 317 | case models.ContentTypeText: 318 | if ct.Text != "" { 319 | msgs = append(msgs, openRouterMessageRequest{ 320 | Role: string(msg.Role), 321 | Content: ct.Text, 322 | }) 323 | } 324 | case models.ContentTypeCallTool: 325 | msgs = append(msgs, openRouterMessageRequest{ 326 | Role: "assistant", 327 | ToolCalls: []openRouterToolCalls{ 328 | { 329 | ID: ct.CallToolID, 330 | Type: "function", 331 | Function: openRouterToolCallFunction{ 332 | Name: ct.ToolName, 333 | Arguments: string(ct.ToolInput), 334 | }, 335 | }, 336 | }, 337 | }) 338 | case models.ContentTypeToolResult: 339 | msgs = append(msgs, openRouterMessageRequest{ 340 | Role: "tool", 341 | ToolCallID: ct.CallToolID, 342 | Content: string(ct.ToolResult), 343 | }) 344 | case models.ContentTypeResource: 345 | return nil, fmt.Errorf("content type %s is not supported for assistant messages", ct.Type) 346 | } 347 | } 348 | } 349 | 350 | msgs = slices.Insert(msgs, 0, openRouterMessageRequest{ 351 | Role: "system", 352 | Content: o.systemPrompt, 353 | }) 354 | 355 | oTools := make([]openRouterTool, len(tools)) 356 | for i, tool := range tools { 357 | parameters := tool.InputSchema 358 | // Check if parameters represent an empty object 359 | // This is required for some Google models, as if we don't do this, the model would return 360 | // bad request error (http 400), with message something like: 361 | // GenerateContentRequest.parameters.properties: should be non-empty for OBJECT type 362 | if len(parameters) > 0 { 363 | var obj map[string]any 364 | if err := json.Unmarshal(parameters, &obj); err == nil { 365 | if props, ok := obj["properties"].(map[string]any); ok && len(props) == 0 { 366 | parameters = nil 367 | } 368 | } 369 | } 370 | 371 | oTools[i] = openRouterTool{ 372 | Type: "function", 373 | Function: openRouterToolFunction{ 374 | Name: tool.Name, 375 | Description: tool.Description, 376 | Parameters: parameters, 377 | }, 378 | } 379 | } 380 | 381 | reqBody := openRouterChatRequest{ 382 | Model: o.model, 383 | Messages: msgs, 384 | Stream: stream, 385 | Tools: oTools, 386 | 387 | Temperature: o.params.Temperature, 388 | TopP: o.params.TopP, 389 | TopK: o.params.TopK, 390 | FrequencyPenalty: o.params.FrequencyPenalty, 391 | PresencePenalty: o.params.PresencePenalty, 392 | RepetitionPenalty: o.params.RepetitionPenalty, 393 | MinP: o.params.MinP, 394 | TopA: o.params.TopA, 395 | Seed: o.params.Seed, 396 | MaxTokens: o.params.MaxTokens, 397 | LogitBias: o.params.LogitBias, 398 | Logprobs: o.params.Logprobs, 399 | TopLogprobs: o.params.TopLogprobs, 400 | Stop: o.params.Stop, 401 | IncludeReasoning: o.params.IncludeReasoning, 402 | } 403 | 404 | jsonBody, err := json.Marshal(reqBody) 405 | if err != nil { 406 | return nil, fmt.Errorf("error marshaling request: %w", err) 407 | } 408 | 409 | o.logger.Debug("Request Body", slog.String("body", string(jsonBody))) 410 | 411 | req, err := http.NewRequestWithContext(ctx, http.MethodPost, 412 | openRouterAPIEndpoint+"/chat/completions", bytes.NewBuffer(jsonBody)) 413 | if err != nil { 414 | return nil, fmt.Errorf("error creating request: %w", err) 415 | } 416 | 417 | req.Header.Set("Content-Type", "application/json") 418 | req.Header.Set("Authorization", "Bearer "+o.apiKey) 419 | req.Header.Set("HTTP-Referer", "https://github.com/MegaGrindStone/mcp-web-ui/") 420 | req.Header.Set("X-Title", "MCP Web UI") 421 | 422 | resp, err := o.client.Do(req) 423 | if err != nil { 424 | return nil, err 425 | } 426 | if resp.StatusCode != http.StatusOK { 427 | body, _ := io.ReadAll(resp.Body) 428 | return nil, fmt.Errorf("unexpected status code: %d, body: %s, request: %s", resp.StatusCode, string(body), jsonBody) 429 | } 430 | 431 | return resp, nil 432 | } 433 | 434 | func (o OpenRouter) processUserMessageForOpenRouter(msg models.Message) (openRouterMessageRequest, error) { 435 | var contents []openRouterUserContent 436 | 437 | for _, ct := range msg.Contents { 438 | switch ct.Type { 439 | case models.ContentTypeText: 440 | if ct.Text != "" { 441 | contents = append(contents, openRouterUserContent{ 442 | Type: openRouterRequestContentTypeText, 443 | Text: ct.Text, 444 | }) 445 | } 446 | case models.ContentTypeResource: 447 | for _, resource := range ct.ResourceContents { 448 | if strings.HasPrefix(resource.MimeType, "image/") { 449 | // Process image for OpenRouter 450 | imageURL := processImageForOpenRouter(resource) 451 | 452 | contents = append(contents, openRouterUserContent{ 453 | Type: openRouterRequestContentTypeImageURL, 454 | ImageURL: &openRouterImageContent{ 455 | URL: imageURL, 456 | }, 457 | }) 458 | continue 459 | } 460 | 461 | // For non-image resources, convert to text 462 | resourceText := convertResourceToTextForOpenRouter(resource) 463 | contents = append(contents, openRouterUserContent{ 464 | Type: openRouterRequestContentTypeText, 465 | Text: resourceText, 466 | }) 467 | } 468 | case models.ContentTypeCallTool, models.ContentTypeToolResult: 469 | return openRouterMessageRequest{}, fmt.Errorf("content type %s is not supported for user messages", ct.Type) 470 | } 471 | } 472 | 473 | return openRouterMessageRequest{ 474 | Role: string(msg.Role), 475 | Content: contents, 476 | }, nil 477 | } 478 | 479 | func processImageForOpenRouter(resource mcp.ResourceContents) string { 480 | mimeType := resource.MimeType 481 | if mimeType == "" { 482 | mimeType = "image/png" // Default 483 | } 484 | 485 | var imageData string 486 | if isBase64(resource.Blob) { 487 | imageData = resource.Blob 488 | } else { 489 | imageData = base64.StdEncoding.EncodeToString([]byte(resource.Blob)) 490 | } 491 | 492 | return fmt.Sprintf("data:%s;base64,%s", mimeType, imageData) 493 | } 494 | 495 | func convertResourceToTextForOpenRouter(resource mcp.ResourceContents) string { 496 | if resource.Text != "" { 497 | return fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, resource.Text) 498 | } 499 | 500 | if resource.Blob != "" { 501 | data := resource.Blob 502 | if !isBase64(resource.Blob) { 503 | data = base64.StdEncoding.EncodeToString([]byte(resource.Blob)) 504 | } 505 | return fmt.Sprintf("[Document of type %s]\n%s", resource.MimeType, data) 506 | } 507 | 508 | return "" 509 | } 510 | -------------------------------------------------------------------------------- /internal/services/parameter.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | // LLMParameters contains the optional configuration parameters for LLM services. 4 | // 5 | // Not all parameters are supported by all LLM providers. The parameters are documented in the 6 | // corresponding LLM provider's documentation. 7 | // 8 | // These parameters is taken from OpenRouter documentation: 9 | // https://openrouter.ai/docs/api-reference/parameters 10 | // For more information obout what these parameters do, please refer to it. 11 | type LLMParameters struct { 12 | Temperature *float32 `yaml:"temperature"` 13 | TopP *float32 `yaml:"topP"` 14 | TopK *int `yaml:"topK"` 15 | FrequencyPenalty *float32 `yaml:"frequencyPenalty"` 16 | PresencePenalty *float32 `yaml:"presencePenalty"` 17 | RepetitionPenalty *float32 `yaml:"repetitionPenalty"` 18 | MinP *float32 `yaml:"minP"` 19 | TopA *float32 `yaml:"topA"` 20 | Seed *int `yaml:"seed"` 21 | MaxTokens *int `yaml:"maxTokens"` 22 | LogitBias map[string]int `yaml:"logitBias"` 23 | Logprobs *bool `yaml:"logprobs"` 24 | TopLogprobs *int `yaml:"topLogprobs"` 25 | Stop []string `yaml:"stop"` 26 | IncludeReasoning *bool `yaml:"includeReasoning"` 27 | } 28 | -------------------------------------------------------------------------------- /static/css/styles.css: -------------------------------------------------------------------------------- 1 | .auto-expand { 2 | overflow-y: hidden; 3 | transition: height 0.1s ease-out; 4 | } 5 | -------------------------------------------------------------------------------- /static/js/home.js: -------------------------------------------------------------------------------- 1 | // Global variable for attached resources 2 | let attachedResources = []; 3 | 4 | function showServerModal(serverName) { 5 | const modalText = document.getElementById('serverModalText'); 6 | modalText.textContent = `Server: ${serverName}`; 7 | const modal = new bootstrap.Modal(document.getElementById('serverModal')); 8 | modal.show(); 9 | } 10 | 11 | function handleKeyPress(event, formID) { 12 | if (event.key === 'Enter') { 13 | if (!event.shiftKey) { 14 | event.preventDefault(); 15 | htmx.trigger(formID, 'submit'); 16 | } 17 | } 18 | // Auto-expand height 19 | adjustHeight(event.target); 20 | } 21 | 22 | function adjustHeight(element) { 23 | element.style.height = 'auto'; 24 | element.style.height = (element.scrollHeight) + 'px'; 25 | } 26 | 27 | function showPromptModal(promptIndex) { 28 | const promptData = promptsList[promptIndex]; 29 | if (!promptData) { 30 | console.error('Prompt not found at index:', promptIndex); 31 | return; 32 | } 33 | 34 | // Set the modal title and description 35 | document.getElementById('promptModalLabel').textContent = `Prompt: ${promptData.name}`; 36 | const descElem = document.getElementById('promptDescription'); 37 | descElem.textContent = promptData.description || ''; 38 | 39 | // Get the form element where we'll add the inputs 40 | const argContainer = document.getElementById('promptArguments'); 41 | argContainer.innerHTML = ''; 42 | 43 | // Create input fields for each argument 44 | if (promptData.arguments && promptData.arguments.length > 0) { 45 | promptData.arguments.forEach(arg => { 46 | const formGroup = document.createElement('div'); 47 | formGroup.className = 'mb-3'; 48 | 49 | const label = document.createElement('label'); 50 | label.htmlFor = `arg-${arg.name}`; 51 | label.className = 'form-label'; 52 | label.textContent = arg.name; 53 | if (arg.required) { 54 | label.textContent += ' *'; 55 | } 56 | 57 | const input = document.createElement('input'); 58 | input.type = 'text'; 59 | input.className = 'form-control'; 60 | input.id = `arg-${arg.name}`; 61 | input.name = arg.name; 62 | input.required = arg.required; 63 | 64 | formGroup.appendChild(label); 65 | formGroup.appendChild(input); 66 | 67 | if (arg.description) { 68 | const helpText = document.createElement('div'); 69 | helpText.className = 'form-text'; 70 | helpText.textContent = arg.description; 71 | formGroup.appendChild(helpText); 72 | } 73 | 74 | argContainer.appendChild(formGroup); 75 | }); 76 | } else { 77 | // If there are no arguments, show a message 78 | const noArgsMsg = document.createElement('p'); 79 | noArgsMsg.textContent = 'This prompt has no arguments.'; 80 | argContainer.appendChild(noArgsMsg); 81 | } 82 | 83 | // Set up the "Use Prompt" button handler 84 | document.getElementById('usePromptBtn').onclick = function() { 85 | // Collect prompt data and arguments 86 | const args = {}; 87 | const promptName = promptData.name; 88 | 89 | if (promptData.arguments) { 90 | promptData.arguments.forEach(arg => { 91 | const input = document.getElementById(`arg-${arg.name}`); 92 | if (input) { 93 | args[arg.name] = input.value; 94 | } 95 | }); 96 | } 97 | 98 | // Determine which form to use 99 | const isWelcomePage = document.getElementById('chat-form-welcome') !== null; 100 | const formId = isWelcomePage ? 'chat-form-welcome' : 'chat-form-chatbox'; 101 | const form = document.getElementById(formId); 102 | 103 | // Get the textarea and temporarily remove required attribute 104 | const textarea = form.querySelector('textarea[name="message"]'); 105 | textarea.removeAttribute('required'); 106 | 107 | // Clear the message field and set prompt data 108 | textarea.value = ''; 109 | form.querySelector('input[name="prompt_name"]').value = promptName; 110 | form.querySelector('input[name="prompt_args"]').value = JSON.stringify(args); 111 | 112 | // Submit the form 113 | htmx.trigger(form, 'submit'); 114 | 115 | // Close the modal 116 | bootstrap.Modal.getInstance(document.getElementById('promptModal')).hide(); 117 | }; 118 | 119 | // Show the modal 120 | new bootstrap.Modal(document.getElementById('promptModal')).show(); 121 | } 122 | 123 | function attachResource(resource) { 124 | // Add resource to the tracking array if not already present 125 | if (!attachedResources.some(r => r.uri === resource.uri)) { 126 | attachedResources.push(resource); 127 | updateAttachedResourcesDisplay(); 128 | } 129 | } 130 | 131 | function removeResource(uri) { 132 | // Remove the resource from the tracking array 133 | attachedResources = attachedResources.filter(r => r.uri !== uri); 134 | updateAttachedResourcesDisplay(); 135 | } 136 | 137 | function clearAttachedResources() { 138 | attachedResources = []; 139 | updateAttachedResourcesDisplay(); 140 | } 141 | 142 | function updateAttachedResourcesDisplay() { 143 | // Get the container element 144 | const container = document.getElementById('attached-resources-container'); 145 | const list = document.getElementById('attached-resources-list'); 146 | 147 | if (!container || !list) return; 148 | 149 | // Show/hide the container based on whether there are resources 150 | container.style.display = attachedResources.length > 0 ? 'block' : 'none'; 151 | 152 | // Clear the list 153 | list.innerHTML = ''; 154 | 155 | // Add badges for each resource 156 | attachedResources.forEach(resource => { 157 | const badge = document.createElement('div'); 158 | 159 | // Create display text with name and URI 160 | let displayText = resource.name || 'Resource'; 161 | if (resource.uri) { 162 | displayText += ` (${resource.uri})`; 163 | } 164 | 165 | badge.className = 'badge bg-secondary text-white d-flex align-items-center p-2 me-1 mb-1'; 166 | badge.innerHTML = ` 167 | ${displayText} 168 | 170 | `; 171 | list.appendChild(badge); 172 | }); 173 | 174 | // Update the hidden form input 175 | const isWelcomePage = document.getElementById('chat-form-welcome') !== null; 176 | const formId = isWelcomePage ? 'chat-form-welcome' : 'chat-form-chatbox'; 177 | const form = document.getElementById(formId); 178 | 179 | if (form) { 180 | const input = form.querySelector('input[name="attached_resources"]'); 181 | if (input) { 182 | input.value = JSON.stringify(attachedResources.map(r => r.uri)); 183 | } 184 | } 185 | } 186 | 187 | function showResourceModal(resourceIndex) { 188 | const resourceData = resourcesList[resourceIndex]; 189 | if (!resourceData) { 190 | console.error('Resource not found at index:', resourceIndex); 191 | return; 192 | } 193 | 194 | // Set the modal content 195 | document.getElementById('resourceName').textContent = resourceData.name || 'Unnamed Resource'; 196 | document.getElementById('resourceDescription').textContent = resourceData.description || 'No description available'; 197 | document.getElementById('resourceUri').textContent = resourceData.uri || ''; 198 | document.getElementById('resourceMimeType').textContent = resourceData.mimeType || 'Unknown'; 199 | 200 | // Set up the "Use Resource" button handler 201 | document.getElementById('useResourceBtn').onclick = function() { 202 | attachResource(resourceData); 203 | bootstrap.Modal.getInstance(document.getElementById('resourceModal')).hide(); 204 | }; 205 | 206 | // Show the modal 207 | new bootstrap.Modal(document.getElementById('resourceModal')).show(); 208 | } 209 | 210 | document.addEventListener('DOMContentLoaded', function() { 211 | // Fix any trailing commas in arrays 212 | for (let i = 0; i < promptsList.length; i++) { 213 | if (promptsList[i].arguments && promptsList[i].arguments.length > 0) { 214 | // Remove any undefined items that may have been created by trailing commas 215 | promptsList[i].arguments = promptsList[i].arguments.filter(item => item !== undefined); 216 | } 217 | } 218 | 219 | // Clear any prompt data on page load 220 | const isWelcomePage = document.getElementById('chat-form-welcome') !== null; 221 | const formId = isWelcomePage ? 'chat-form-welcome' : 'chat-form-chatbox'; 222 | const form = document.getElementById(formId); 223 | 224 | // Clear any attached resources on page load 225 | clearAttachedResources(); 226 | 227 | if (form) { 228 | const promptNameInput = form.querySelector('input[name="prompt_name"]'); 229 | if (promptNameInput) promptNameInput.value = ''; 230 | 231 | const promptArgsInput = form.querySelector('input[name="prompt_args"]'); 232 | if (promptArgsInput) promptArgsInput.value = ''; 233 | } 234 | }); 235 | 236 | document.addEventListener('htmx:beforeRequest', function(event) { 237 | const form = event.detail.elt; 238 | 239 | // Only process if this is one of our chat forms 240 | if (form.id === 'chat-form-welcome' || form.id === 'chat-form-chatbox') { 241 | // Set up a one-time event listener for after the request completes 242 | form.addEventListener('htmx:afterRequest', function afterRequest() { 243 | // Restore the required attribute and clear prompt fields 244 | const textarea = form.querySelector('textarea[name="message"]'); 245 | if (textarea) textarea.setAttribute('required', ''); 246 | 247 | const promptNameInput = form.querySelector('input[name="prompt_name"]'); 248 | if (promptNameInput) promptNameInput.value = ''; 249 | 250 | const promptArgsInput = form.querySelector('input[name="prompt_args"]'); 251 | if (promptArgsInput) promptArgsInput.value = ''; 252 | 253 | // Clear attached resources after submission 254 | clearAttachedResources(); 255 | 256 | // Remove this event listener 257 | form.removeEventListener('htmx:afterRequest', afterRequest); 258 | }, { once: true }); 259 | } 260 | }); 261 | -------------------------------------------------------------------------------- /templates/layout/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {{block "title" .}}MCP Web UI{{end}} 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | {{block "content" .}}{{end}} 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /templates/pages/home.html: -------------------------------------------------------------------------------- 1 | {{template "base.html" .}} 2 | 3 | {{define "content"}} 4 |
5 |
6 |
7 | 8 |
9 |
10 |
11 |
Chats
12 | 13 | New Chat 14 | 15 |
16 |
17 |
23 | {{range .Chats}} 24 | {{template "chat_title" .}} 25 | {{end}} 26 |
27 |
28 | 29 |
30 |
31 |
MCP
32 |
33 |
34 |
35 | {{template "list_servers.html" .}} 36 | {{template "list_tools.html" .}} 37 | {{template "list_resources.html" .}} 38 | {{template "list_prompts.html" .}} 39 |
40 |
41 |
42 |
43 | 44 |
45 | {{if .CurrentChatID}} 46 | {{template "chatbox" .}} 47 | {{else}} 48 | {{template "welcome" .}} 49 | 58 | {{end}} 59 |
60 |
61 |
62 | 63 | 79 | 80 | 102 | 103 | 127 | 128 | 153 | 154 | {{end}} 155 | -------------------------------------------------------------------------------- /templates/partials/ai_message.html: -------------------------------------------------------------------------------- 1 | {{define "ai_message"}} 2 |
3 |
4 |
5 |
6 | AI 7 |
8 |
9 | 10 |
11 |
12 |
{{.Content}}
22 | {{if (eq .StreamingState "loading")}} 23 |
24 |
25 | Loading... 26 |
27 | AI is thinking... 28 |
29 | {{end}} 30 |
31 |
32 | {{.Timestamp.Format "15:04"}} 33 |
34 |
35 |
36 |
37 | {{end}} 38 | -------------------------------------------------------------------------------- /templates/partials/chat_title.html: -------------------------------------------------------------------------------- 1 | {{define "chat_title"}} 2 |
3 |
4 | 9 | 10 | {{if .Title}} 11 | 23 | {{end}} 24 |
25 |
26 | {{end}} 27 | -------------------------------------------------------------------------------- /templates/partials/chatbox.html: -------------------------------------------------------------------------------- 1 | {{define "chatbox"}} 2 |
3 |
4 | {{range .Messages}} 5 | {{if eq .Role "user"}} 6 | {{template "user_message" .}} 7 | {{else}} 8 | {{template "ai_message" .}} 9 | {{end}} 10 | {{end}} 11 |
12 | 13 | 19 | 20 | 53 |
54 | {{end}} 55 | -------------------------------------------------------------------------------- /templates/partials/list_prompts.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |

4 | 7 |

8 |
9 |
10 |
11 | {{range $index, $prompt := .Prompts}} 12 |
14 |
15 | {{$prompt.Name}} 16 |
17 |
18 | {{end}} 19 |
20 |
21 |
22 |
23 | 24 | 25 | 47 | -------------------------------------------------------------------------------- /templates/partials/list_resources.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |

4 | 7 |

8 |
9 |
10 |
11 | {{range $index, $resource := .Resources}} 12 |
14 |
15 | {{$resource.Name}} 16 |
17 |
18 | {{end}} 19 |
20 |
21 |
22 |
23 | 24 | 25 | 49 | -------------------------------------------------------------------------------- /templates/partials/list_servers.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |

4 | 7 |

8 |
9 |
10 |
11 | {{range .Servers}} 12 |
14 |
15 | {{.Name}} 16 | {{.Version}} 17 |
18 |
19 | {{end}} 20 |
21 |
22 |
23 |
24 | 25 | 26 | 42 | -------------------------------------------------------------------------------- /templates/partials/list_tools.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |

4 | 7 |

8 |
9 |
10 |
11 | {{range .Tools}} 12 |
13 |
14 | {{.Name}} 15 |
16 |
17 | {{end}} 18 |
19 |
20 |
21 |
22 | -------------------------------------------------------------------------------- /templates/partials/user_message.html: -------------------------------------------------------------------------------- 1 | {{define "user_message"}} 2 |
3 |
4 |
5 |
6 |
{{.Content}}
7 |
8 |
9 | {{.Timestamp.Format "15:04"}} 10 |
11 |
12 | 13 |
14 |
15 | You 16 |
17 |
18 |
19 |
20 | {{end}} 21 | -------------------------------------------------------------------------------- /templates/partials/welcome.html: -------------------------------------------------------------------------------- 1 | {{define "welcome"}} 2 |
3 |
4 |

Hello there!

5 |
6 | 7 | 13 | 14 | 46 |
47 | {{end}} 48 | --------------------------------------------------------------------------------