├── .github
├── CODEOWNERS
├── dependabot.yml
└── workflows
│ ├── build.yml
│ ├── dependabot-sync.yml
│ ├── goreleaser.yml
│ ├── lint-sync.yml
│ ├── lint.yml
│ └── nightly.yml
├── .gitignore
├── .golangci.yml
├── .goreleaser.yml
├── LICENSE
├── README.md
├── anim.go
├── config.go
├── config_template.yml
├── config_test.go
├── db.go
├── db_test.go
├── error.go
├── examples.go
├── examples.md
├── examples
├── conversations.tape
├── demo.tape
└── v1.5.tape
├── features.md
├── flag.go
├── flag_test.go
├── go.mod
├── go.sum
├── internal
├── anthropic
│ ├── anthropic.go
│ └── format.go
├── cache
│ ├── cache.go
│ ├── cache_test.go
│ ├── convo.go
│ └── expiring.go
├── cohere
│ ├── cohere.go
│ └── format.go
├── copilot
│ └── copilot.go
├── google
│ ├── format.go
│ ├── google.go
│ └── http.go
├── ollama
│ ├── format.go
│ └── ollama.go
├── openai
│ ├── format.go
│ └── openai.go
├── proto
│ ├── proto.go
│ ├── proto_test.go
│ └── testdata
│ │ └── TestStringer.golden
└── stream
│ └── stream.go
├── load.go
├── load_test.go
├── main.go
├── main_test.go
├── mcp.go
├── messages.go
├── messages_test.go
├── mods.go
├── mods_errors.go
├── mods_test.go
├── sha.go
├── stream.go
├── styles.go
├── sync.go
└── term.go
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @caarlos0
2 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | updates:
4 | - package-ecosystem: "gomod"
5 | directory: "/"
6 | schedule:
7 | interval: "weekly"
8 | day: "monday"
9 | time: "05:00"
10 | timezone: "America/New_York"
11 | labels:
12 | - "dependencies"
13 | commit-message:
14 | prefix: "chore"
15 | include: "scope"
16 |
17 | - package-ecosystem: "github-actions"
18 | directory: "/"
19 | schedule:
20 | interval: "weekly"
21 | day: "monday"
22 | time: "05:00"
23 | timezone: "America/New_York"
24 | labels:
25 | - "dependencies"
26 | commit-message:
27 | prefix: "chore"
28 | include: "scope"
29 |
30 | - package-ecosystem: "docker"
31 | directory: "/"
32 | schedule:
33 | interval: "weekly"
34 | day: "monday"
35 | time: "05:00"
36 | timezone: "America/New_York"
37 | labels:
38 | - "dependencies"
39 | commit-message:
40 | prefix: "chore"
41 | include: "scope"
42 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: build
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | build:
7 | strategy:
8 | matrix:
9 | os: [ubuntu-latest, macos-latest, windows-latest]
10 | runs-on: ${{ matrix.os }}
11 | env:
12 | GO111MODULE: "on"
13 | steps:
14 | - name: Install Go
15 | uses: actions/setup-go@v5
16 | with:
17 | go-version: stable
18 | check-latest: true
19 |
20 | - name: Checkout code
21 | uses: actions/checkout@v4
22 |
23 | - name: Download Go modules
24 | run: go mod download
25 |
26 | - name: Build
27 | run: go build -v ./...
28 |
29 | - name: Test
30 | run: go test -v -cover -timeout=30s ./...
31 |
32 | snapshot:
33 | uses: charmbracelet/meta/.github/workflows/snapshot.yml@main
34 | secrets:
35 | goreleaser_key: ${{ secrets.GORELEASER_KEY }}
36 |
37 | dependabot:
38 | needs: [build]
39 | runs-on: ubuntu-latest
40 | permissions:
41 | pull-requests: write
42 | contents: write
43 | if: ${{ github.actor == 'dependabot[bot]' && github.event_name == 'pull_request'}}
44 | steps:
45 | - id: metadata
46 | uses: dependabot/fetch-metadata@v2
47 | with:
48 | github-token: "${{ secrets.GITHUB_TOKEN }}"
49 | - run: |
50 | gh pr review --approve "$PR_URL"
51 | gh pr merge --squash --auto "$PR_URL"
52 | env:
53 | PR_URL: ${{github.event.pull_request.html_url}}
54 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
55 |
--------------------------------------------------------------------------------
/.github/workflows/dependabot-sync.yml:
--------------------------------------------------------------------------------
1 | name: dependabot-sync
2 | on:
3 | schedule:
4 | - cron: "0 0 * * 0" # every Sunday at midnight
5 | workflow_dispatch: # allows manual triggering
6 |
7 | permissions:
8 | contents: write
9 | pull-requests: write
10 |
11 | jobs:
12 | dependabot-sync:
13 | uses: charmbracelet/meta/.github/workflows/dependabot-sync.yml@main
14 | with:
15 | repo_name: ${{ github.event.repository.name }}
16 | secrets:
17 | gh_token: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
18 |
--------------------------------------------------------------------------------
/.github/workflows/goreleaser.yml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
2 |
3 | name: goreleaser
4 |
5 | on:
6 | push:
7 | tags:
8 | - v*.*.*
9 |
10 | concurrency:
11 | group: goreleaser
12 | cancel-in-progress: true
13 |
14 | jobs:
15 | goreleaser:
16 | uses: charmbracelet/meta/.github/workflows/goreleaser.yml@main
17 | secrets:
18 | docker_username: ${{ secrets.DOCKERHUB_USERNAME }}
19 | docker_token: ${{ secrets.DOCKERHUB_TOKEN }}
20 | gh_pat: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
21 | goreleaser_key: ${{ secrets.GORELEASER_KEY }}
22 | aur_key: ${{ secrets.AUR_KEY }}
23 | fury_token: ${{ secrets.FURY_TOKEN }}
24 | nfpm_gpg_key: ${{ secrets.NFPM_GPG_KEY }}
25 | nfpm_passphrase: ${{ secrets.NFPM_PASSPHRASE }}
26 | macos_sign_p12: ${{ secrets.MACOS_SIGN_P12 }}
27 | macos_sign_password: ${{ secrets.MACOS_SIGN_PASSWORD }}
28 | macos_notary_issuer_id: ${{ secrets.MACOS_NOTARY_ISSUER_ID }}
29 | macos_notary_key_id: ${{ secrets.MACOS_NOTARY_KEY_ID }}
30 | macos_notary_key: ${{ secrets.MACOS_NOTARY_KEY }}
31 |
--------------------------------------------------------------------------------
/.github/workflows/lint-sync.yml:
--------------------------------------------------------------------------------
1 | name: lint-sync
2 | on:
3 | schedule:
4 | # every Sunday at midnight
5 | - cron: "0 0 * * 0"
6 | workflow_dispatch: # allows manual triggering
7 |
8 | permissions:
9 | contents: write
10 | pull-requests: write
11 |
12 | jobs:
13 | lint:
14 | uses: charmbracelet/meta/.github/workflows/lint-sync.yml@main
15 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 | on:
3 | push:
4 | pull_request:
5 |
6 | jobs:
7 | lint:
8 | uses: charmbracelet/meta/.github/workflows/lint.yml@main
9 |
--------------------------------------------------------------------------------
/.github/workflows/nightly.yml:
--------------------------------------------------------------------------------
1 | name: nightly
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | nightly:
10 | uses: charmbracelet/meta/.github/workflows/nightly.yml@main
11 | secrets:
12 | docker_username: ${{ secrets.DOCKERHUB_USERNAME }}
13 | docker_token: ${{ secrets.DOCKERHUB_TOKEN }}
14 | goreleaser_key: ${{ secrets.GORELEASER_KEY }}
15 | macos_sign_p12: ${{ secrets.MACOS_SIGN_P12 }}
16 | macos_sign_password: ${{ secrets.MACOS_SIGN_PASSWORD }}
17 | macos_notary_issuer_id: ${{ secrets.MACOS_NOTARY_ISSUER_ID }}
18 | macos_notary_key_id: ${{ secrets.MACOS_NOTARY_KEY_ID }}
19 | macos_notary_key: ${{ secrets.MACOS_NOTARY_KEY }}
20 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | mods
2 | .envrc
3 | completions/
4 | manpages/
5 | dist/
6 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | run:
3 | tests: false
4 | linters:
5 | enable:
6 | - bodyclose
7 | - exhaustive
8 | - goconst
9 | - godot
10 | - gomoddirectives
11 | - goprintffuncname
12 | - gosec
13 | - misspell
14 | - nakedret
15 | - nestif
16 | - nilerr
17 | - noctx
18 | - nolintlint
19 | - prealloc
20 | - revive
21 | - rowserrcheck
22 | - sqlclosecheck
23 | - tparallel
24 | - unconvert
25 | - unparam
26 | - whitespace
27 | - wrapcheck
28 | exclusions:
29 | generated: lax
30 | presets:
31 | - common-false-positives
32 | issues:
33 | max-issues-per-linter: 0
34 | max-same-issues: 0
35 | formatters:
36 | enable:
37 | - gofumpt
38 | - goimports
39 | exclusions:
40 | generated: lax
41 |
--------------------------------------------------------------------------------
/.goreleaser.yml:
--------------------------------------------------------------------------------
1 | # The lines beneath this are called `modelines`. See `:help modeline`
2 | # Feel free to remove those if you don't want/use them.
3 | # yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
4 | # vim: set ts=2 sw=2 tw=0 fo=cnqoj
5 | version: 2
6 | includes:
7 | - from_url:
8 | url: charmbracelet/meta/main/goreleaser-mods.yaml
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Charmbracelet, Inc
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Mods
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | AI for the command line, built for pipelines.
11 |
12 | 
13 |
14 | Large Language Models (LLM) based AI is useful to ingest command output and
15 | format results in Markdown, JSON, and other text based formats. Mods is a
16 | tool to add a sprinkle of AI in your command line and make your pipelines
17 | artificially intelligent.
18 |
19 | It works great with LLMs running locally through [LocalAI]. You can also use
20 | [OpenAI], [Cohere], [Groq], or [Azure OpenAI].
21 |
22 | [LocalAI]: https://github.com/go-skynet/LocalAI
23 | [OpenAI]: https://platform.openai.com/account/api-keys
24 | [Cohere]: https://dashboard.cohere.com/api-keys
25 | [Groq]: https://console.groq.com/keys
26 | [Azure OpenAI]: https://azure.microsoft.com/en-us/products/cognitive-services/openai-service
27 |
28 | ### Installation
29 |
30 | Use a package manager:
31 |
32 | ```bash
33 | # macOS or Linux
34 | brew install charmbracelet/tap/mods
35 |
36 | # Windows (with Winget)
37 | winget install charmbracelet.mods
38 |
39 | # Arch Linux (btw)
40 | yay -S mods
41 |
42 | # Nix
43 | nix-shell -p mods
44 | ```
45 |
46 |
47 | Debian/Ubuntu
48 |
49 | ```bash
50 | sudo mkdir -p /etc/apt/keyrings
51 | curl -fsSL https://repo.charm.sh/apt/gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/charm.gpg
52 | echo "deb [signed-by=/etc/apt/keyrings/charm.gpg] https://repo.charm.sh/apt/ * *" | sudo tee /etc/apt/sources.list.d/charm.list
53 | sudo apt update && sudo apt install mods
54 | ```
55 |
56 |
57 |
58 |
59 | Fedora/RHEL
60 |
61 | ```bash
62 | echo '[charm]
63 | name=Charm
64 | baseurl=https://repo.charm.sh/yum/
65 | enabled=1
66 | gpgcheck=1
67 | gpgkey=https://repo.charm.sh/yum/gpg.key' | sudo tee /etc/yum.repos.d/charm.repo
68 | sudo yum install mods
69 | ```
70 |
71 |
72 |
73 | Or, download it:
74 |
75 | - [Packages][releases] are available in Debian and RPM formats
76 | - [Binaries][releases] are available for Linux, macOS, and Windows
77 |
78 | [releases]: https://github.com/charmbracelet/mods/releases
79 |
80 | Or, just install it with `go`:
81 |
82 | ```sh
83 | go install github.com/charmbracelet/mods@latest
84 | ```
85 |
86 |
87 | Shell Completions
88 |
89 | All the packages and archives come with pre-generated completion files for Bash,
90 | ZSH, Fish, and PowerShell.
91 |
92 | If you built it from source, you can generate them with:
93 |
94 | ```bash
95 | mods completion bash -h
96 | mods completion zsh -h
97 | mods completion fish -h
98 | mods completion powershell -h
99 | ```
100 |
101 | If you use a package (like Homebrew, Debs, etc), the completions should be set
102 | up automatically, given your shell is configured properly.
103 |
104 |
105 |
106 | ## What Can It Do?
107 |
108 | Mods works by reading standard in and prefacing it with a prompt supplied in
109 | the `mods` arguments. It sends the input text to an LLM and prints out the
110 | result, optionally asking the LLM to format the response as Markdown. This
111 | gives you a way to "question" the output of a command. Mods will also work on
112 | standard in or an argument supplied prompt individually.
113 |
114 | Be sure to check out the [examples](examples.md) and a list of all the
115 | [features](features.md).
116 |
117 | Mods works with OpenAI compatible endpoints. By default, Mods is configured to
118 | support OpenAI's official API and a LocalAI installation running on port 8080.
119 | You can configure additional endpoints in your settings file by running
120 | `mods --settings`.
121 |
122 | ## Saved Conversations
123 |
124 | Conversations are saved locally by default. Each conversation has a SHA-1
125 | identifier and a title (like `git`!).
126 |
127 |
128 |
129 |
130 |
131 | Check the [`./features.md`](./features.md) for more details.
132 |
133 | ## Usage
134 |
135 | - `-m`, `--model`: Specify Large Language Model to use
136 | - `-M`, `--ask-model`: Ask which model to use via interactive prompt
137 | - `-f`, `--format`: Ask the LLM to format the response in a given format
138 | - `--format-as`: Specify the format for the output (used with `--format`)
139 | - `-P`, `--prompt` Include the prompt from the arguments and stdin, truncate stdin to specified number of lines
140 | - `-p`, `--prompt-args`: Include the prompt from the arguments in the response
141 | - `-q`, `--quiet`: Only output errors to standard err
142 | - `-r`, `--raw`: Print raw response without syntax highlighting
143 | - `--settings`: Open settings
144 | - `-x`, `--http-proxy`: Use HTTP proxy to connect to the API endpoints
145 | - `--max-retries`: Maximum number of retries
146 | - `--max-tokens`: Specify maximum tokens with which to respond
147 | - `--no-limit`: Do not limit the response tokens
148 | - `--role`: Specify the role to use (See [custom roles](#custom-roles))
149 | - `--word-wrap`: Wrap output at width (defaults to 80)
150 | - `--reset-settings`: Restore settings to default
151 | - `--theme`: Theme to use in the forms; valid choices are: `charm`, `catppuccin`, `dracula`, and `base16`
152 | - `--status-text`: Text to show while generating
153 |
154 | #### Conversations
155 |
156 | - `-t`, `--title`: Set the title for the conversation.
157 | - `-l`, `--list`: List saved conversations.
158 | - `-c`, `--continue`: Continue from last response or specific title or SHA-1.
159 | - `-C`, `--continue-last`: Continue the last conversation.
160 | - `-s`, `--show`: Show saved conversation for the given title or SHA-1
161 | - `-S`, `--show-last`: Show previous conversation
162 | - `--delete-older-than=`: Deletes conversations older than given duration (`10d`, `1mo`).
163 | - `--delete`: Deletes the saved conversations for the given titles or SHA-1s
164 | - `--no-cache`: Do not save conversations
165 |
166 | #### MCP
167 |
168 | - `--mcp-list`: List all available MCP servers
169 | - `--mcp-list-tools`: List all available tools from enabled MCP servers
170 | - `--mcp-disable`: Disable specific MCP servers
171 |
172 | #### Advanced
173 |
174 | - `--fanciness`: Level of fanciness
175 | - `--temp`: Sampling temperature
176 | - `--topp`: Top P value
177 | - `--topk`: Top K value
178 |
179 | ## Custom Roles
180 |
181 | Roles allow you to set system prompts. Here is an example of a `shell` role:
182 |
183 | ```yaml
184 | roles:
185 | shell:
186 | - you are a shell expert
187 | - you do not explain anything
188 | - you simply output one liners to solve the problems you're asked
189 | - you do not provide any explanation whatsoever, ONLY the command
190 | ```
191 |
192 | Then, use the custom role in `mods`:
193 |
194 | ```sh
195 | mods --role shell list files in the current directory
196 | ```
197 |
198 | ## Setup
199 |
200 | ### Open AI
201 |
202 | Mods uses GPT-4 by default. It will fall back to GPT-3.5 Turbo.
203 |
204 | Set the `OPENAI_API_KEY` environment variable. If you don't have one yet, you
205 | can grab it the [OpenAI website](https://platform.openai.com/account/api-keys).
206 |
207 | Alternatively, set the [`AZURE_OPENAI_KEY`] environment variable to use Azure
208 | OpenAI. Grab a key from [Azure](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service).
209 |
210 | ### Cohere
211 |
212 | Cohere provides enterprise optimized models.
213 |
214 | Set the `COHERE_API_KEY` environment variable. If you don't have one yet, you can
215 | get it from the [Cohere dashboard](https://dashboard.cohere.com/api-keys).
216 |
217 | ### Local AI
218 |
219 | Local AI allows you to run models locally. Mods works with the GPT4ALL-J model
220 | as setup in [this tutorial](https://github.com/go-skynet/LocalAI#example-use-gpt4all-j-model).
221 |
222 | ### Groq
223 |
224 | Groq provides models powered by their LPU inference engine.
225 |
226 | Set the `GROQ_API_KEY` environment variable. If you don't have one yet, you can
227 | get it from the [Groq console](https://console.groq.com/keys).
228 |
229 | ### Gemini
230 |
231 | Mods supports using Gemini models from Google.
232 |
233 | Set the `GOOGLE_API_KEY` enviroment variable. If you don't have one yet,
234 | you can get it from the [Google AI Studio](https://aistudio.google.com/apikey).
235 |
236 | ## Whatcha Think?
237 |
238 | We’d love to hear your thoughts on this project. Feel free to drop us a note.
239 |
240 | - [Twitter](https://twitter.com/charmcli)
241 | - [The Fediverse](https://mastodon.social/@charmcli)
242 | - [Discord](https://charm.sh/chat)
243 |
244 | ## License
245 |
246 | [MIT](https://github.com/charmbracelet/mods/raw/main/LICENSE)
247 |
248 | ---
249 |
250 | Part of [Charm](https://charm.sh).
251 |
252 |
253 |
254 |
255 | Charm热爱开源 • Charm loves open source
256 |
--------------------------------------------------------------------------------
/anim.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "math/rand"
5 | "strings"
6 | "time"
7 |
8 | "github.com/charmbracelet/bubbles/spinner"
9 | tea "github.com/charmbracelet/bubbletea"
10 | "github.com/charmbracelet/lipgloss"
11 | "github.com/lucasb-eyer/go-colorful"
12 | "github.com/muesli/termenv"
13 | )
14 |
15 | const (
16 | charCyclingFPS = time.Second / 22
17 | colorCycleFPS = time.Second / 5
18 | maxCyclingChars = 120
19 | )
20 |
21 | var charRunes = []rune("0123456789abcdefABCDEF~!@#$£€%^&*()+=_")
22 |
23 | type charState int
24 |
25 | const (
26 | charInitialState charState = iota
27 | charCyclingState
28 | charEndOfLifeState
29 | )
30 |
31 | // cyclingChar is a single animated character.
32 | type cyclingChar struct {
33 | finalValue rune // if < 0 cycle forever
34 | currentValue rune
35 | initialDelay time.Duration
36 | lifetime time.Duration
37 | }
38 |
39 | func (c cyclingChar) randomRune() rune {
40 | return (charRunes)[rand.Intn(len(charRunes))] //nolint:gosec
41 | }
42 |
43 | func (c cyclingChar) state(start time.Time) charState {
44 | now := time.Now()
45 | if now.Before(start.Add(c.initialDelay)) {
46 | return charInitialState
47 | }
48 | if c.finalValue > 0 && now.After(start.Add(c.initialDelay)) {
49 | return charEndOfLifeState
50 | }
51 | return charCyclingState
52 | }
53 |
54 | type stepCharsMsg struct{}
55 |
56 | func stepChars() tea.Cmd {
57 | return tea.Tick(charCyclingFPS, func(time.Time) tea.Msg {
58 | return stepCharsMsg{}
59 | })
60 | }
61 |
62 | type colorCycleMsg struct{}
63 |
64 | func cycleColors() tea.Cmd {
65 | return tea.Tick(colorCycleFPS, func(time.Time) tea.Msg {
66 | return colorCycleMsg{}
67 | })
68 | }
69 |
70 | // anim is the model that manages the animation that displays while the
71 | // output is being generated.
72 | type anim struct {
73 | start time.Time
74 | cyclingChars []cyclingChar
75 | labelChars []cyclingChar
76 | ramp []lipgloss.Style
77 | label []rune
78 | ellipsis spinner.Model
79 | ellipsisStarted bool
80 | styles styles
81 | }
82 |
83 | func newAnim(cyclingCharsSize uint, label string, r *lipgloss.Renderer, s styles) anim {
84 | // #nosec G115
85 | n := int(cyclingCharsSize)
86 | if n > maxCyclingChars {
87 | n = maxCyclingChars
88 | }
89 |
90 | gap := " "
91 | if n == 0 {
92 | gap = ""
93 | }
94 |
95 | c := anim{
96 | start: time.Now(),
97 | label: []rune(gap + label),
98 | ellipsis: spinner.New(spinner.WithSpinner(spinner.Ellipsis)),
99 | styles: s,
100 | }
101 |
102 | // If we're in truecolor mode (and there are enough cycling characters)
103 | // color the cycling characters with a gradient ramp.
104 | const minRampSize = 3
105 | if n >= minRampSize && r.ColorProfile() == termenv.TrueColor {
106 | // Note: double capacity for color cycling as we'll need to reverse and
107 | // append the ramp for seamless transitions.
108 | c.ramp = make([]lipgloss.Style, n, n*2) //nolint:mnd
109 | ramp := makeGradientRamp(n)
110 | for i, color := range ramp {
111 | c.ramp[i] = r.NewStyle().Foreground(color)
112 | }
113 | c.ramp = append(c.ramp, reverse(c.ramp)...) // reverse and append for color cycling
114 | }
115 |
116 | makeDelay := func(a int32, b time.Duration) time.Duration {
117 | return time.Duration(rand.Int31n(a)) * (time.Millisecond * b) //nolint:gosec
118 | }
119 |
120 | makeInitialDelay := func() time.Duration {
121 | return makeDelay(8, 60) //nolint:mnd
122 | }
123 |
124 | // Initial characters that cycle forever.
125 | c.cyclingChars = make([]cyclingChar, n)
126 |
127 | for i := 0; i < n; i++ {
128 | c.cyclingChars[i] = cyclingChar{
129 | finalValue: -1, // cycle forever
130 | initialDelay: makeInitialDelay(),
131 | }
132 | }
133 |
134 | // Label text that only cycles for a little while.
135 | c.labelChars = make([]cyclingChar, len(c.label))
136 |
137 | for i, r := range c.label {
138 | c.labelChars[i] = cyclingChar{
139 | finalValue: r,
140 | initialDelay: makeInitialDelay(),
141 | lifetime: makeDelay(5, 180), //nolint:mnd
142 | }
143 | }
144 |
145 | return c
146 | }
147 |
148 | // Init initializes the animation.
149 | func (anim) Init() tea.Cmd {
150 | return tea.Batch(stepChars(), cycleColors())
151 | }
152 |
153 | // Update handles messages.
154 | func (a anim) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
155 | var cmd tea.Cmd
156 | switch msg.(type) {
157 | case stepCharsMsg:
158 | a.updateChars(&a.cyclingChars)
159 | a.updateChars(&a.labelChars)
160 |
161 | if !a.ellipsisStarted {
162 | var eol int
163 | for _, c := range a.labelChars {
164 | if c.state(a.start) == charEndOfLifeState {
165 | eol++
166 | }
167 | }
168 | if eol == len(a.label) {
169 | // If our entire label has reached end of life, start the
170 | // ellipsis "spinner" after a short pause.
171 | a.ellipsisStarted = true
172 | cmd = tea.Tick(time.Millisecond*220, func(time.Time) tea.Msg { //nolint:mnd
173 | return a.ellipsis.Tick()
174 | })
175 | }
176 | }
177 |
178 | return a, tea.Batch(stepChars(), cmd)
179 | case colorCycleMsg:
180 | const minColorCycleSize = 2
181 | if len(a.ramp) < minColorCycleSize {
182 | return a, nil
183 | }
184 | a.ramp = append(a.ramp[1:], a.ramp[0])
185 | return a, cycleColors()
186 | case spinner.TickMsg:
187 | var cmd tea.Cmd
188 | a.ellipsis, cmd = a.ellipsis.Update(msg)
189 | return a, cmd
190 | default:
191 | return a, nil
192 | }
193 | }
194 |
195 | func (a *anim) updateChars(chars *[]cyclingChar) {
196 | for i, c := range *chars {
197 | switch c.state(a.start) {
198 | case charInitialState:
199 | (*chars)[i].currentValue = '.'
200 | case charCyclingState:
201 | (*chars)[i].currentValue = c.randomRune()
202 | case charEndOfLifeState:
203 | (*chars)[i].currentValue = c.finalValue
204 | }
205 | }
206 | }
207 |
208 | // View renders the animation.
209 | func (a anim) View() string {
210 | var b strings.Builder
211 |
212 | for i, c := range a.cyclingChars {
213 | if len(a.ramp) > i {
214 | b.WriteString(a.ramp[i].Render(string(c.currentValue)))
215 | continue
216 | }
217 | b.WriteRune(c.currentValue)
218 | }
219 |
220 | for _, c := range a.labelChars {
221 | b.WriteRune(c.currentValue)
222 | }
223 |
224 | return b.String() + a.ellipsis.View()
225 | }
226 |
227 | func makeGradientRamp(length int) []lipgloss.Color {
228 | const startColor = "#F967DC"
229 | const endColor = "#6B50FF"
230 | var (
231 | c = make([]lipgloss.Color, length)
232 | start, _ = colorful.Hex(startColor)
233 | end, _ = colorful.Hex(endColor)
234 | )
235 | for i := 0; i < length; i++ {
236 | step := start.BlendLuv(end, float64(i)/float64(length))
237 | c[i] = lipgloss.Color(step.Hex())
238 | }
239 | return c
240 | }
241 |
242 | func makeGradientText(baseStyle lipgloss.Style, str string) string {
243 | const minSize = 3
244 | if len(str) < minSize {
245 | return str
246 | }
247 | b := strings.Builder{}
248 | runes := []rune(str)
249 | for i, c := range makeGradientRamp(len(str)) {
250 | b.WriteString(baseStyle.Foreground(c).Render(string(runes[i])))
251 | }
252 | return b.String()
253 | }
254 |
255 | func reverse[T any](in []T) []T {
256 | out := make([]T, len(in))
257 | copy(out, in[:])
258 | for i, j := 0, len(out)-1; i < j; i, j = i+1, j-1 {
259 | out[i], out[j] = out[j], out[i]
260 | }
261 | return out
262 | }
263 |
--------------------------------------------------------------------------------
/config.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | "text/template"
9 | "time"
10 |
11 | _ "embed"
12 |
13 | "github.com/adrg/xdg"
14 | "github.com/caarlos0/duration"
15 | "github.com/caarlos0/env/v9"
16 | "github.com/charmbracelet/x/exp/strings"
17 | "github.com/muesli/termenv"
18 | "github.com/spf13/cobra"
19 | flag "github.com/spf13/pflag"
20 | "gopkg.in/yaml.v3"
21 | )
22 |
23 | //go:embed config_template.yml
24 | var configTemplate string
25 |
26 | const (
27 | defaultMarkdownFormatText = "Format the response as markdown without enclosing backticks."
28 | defaultJSONFormatText = "Format the response as json without enclosing backticks."
29 | )
30 |
31 | var help = map[string]string{
32 | "api": "OpenAI compatible REST API (openai, localai, anthropic, ...)",
33 | "apis": "Aliases and endpoints for OpenAI compatible REST API",
34 | "http-proxy": "HTTP proxy to use for API requests",
35 | "model": "Default model (gpt-3.5-turbo, gpt-4, ggml-gpt4all-j...)",
36 | "ask-model": "Ask which model to use via interactive prompt",
37 | "max-input-chars": "Default character limit on input to model",
38 | "format": "Ask for the response to be formatted as markdown unless otherwise set",
39 | "format-text": "Text to append when using the -f flag",
40 | "role": "System role to use",
41 | "roles": "List of predefined system messages that can be used as roles",
42 | "list-roles": "List the roles defined in your configuration file",
43 | "prompt": "Include the prompt from the arguments and stdin, truncate stdin to specified number of lines",
44 | "prompt-args": "Include the prompt from the arguments in the response",
45 | "raw": "Render output as raw text when connected to a TTY",
46 | "quiet": "Quiet mode (hide the spinner while loading and stderr messages for success)",
47 | "help": "Show help and exit",
48 | "version": "Show version and exit",
49 | "max-retries": "Maximum number of times to retry API calls",
50 | "no-limit": "Turn off the client-side limit on the size of the input into the model",
51 | "word-wrap": "Wrap formatted output at specific width (default is 80)",
52 | "max-tokens": "Maximum number of tokens in response",
53 | "temp": "Temperature (randomness) of results, from 0.0 to 2.0",
54 | "stop": "Up to 4 sequences where the API will stop generating further tokens",
55 | "topp": "TopP, an alternative to temperature that narrows response, from 0.0 to 1.0",
56 | "topk": "TopK, only sample from the top K options for each subsequent token",
57 | "fanciness": "Your desired level of fanciness",
58 | "status-text": "Text to show while generating",
59 | "settings": "Open settings in your $EDITOR",
60 | "dirs": "Print the directories in which mods store its data",
61 | "reset-settings": "Backup your old settings file and reset everything to the defaults",
62 | "continue": "Continue from the last response or a given save title",
63 | "continue-last": "Continue from the last response",
64 | "no-cache": "Disables caching of the prompt/response",
65 | "title": "Saves the current conversation with the given title",
66 | "list": "Lists saved conversations",
67 | "delete": "Deletes one or more saved conversations with the given titles or IDs",
68 | "delete-older-than": "Deletes all saved conversations older than the specified duration; valid values are " + strings.EnglishJoin(duration.ValidUnits(), true),
69 | "show": "Show a saved conversation with the given title or ID",
70 | "theme": "Theme to use in the forms; valid choices are charm, catppuccin, dracula, and base16",
71 | "show-last": "Show the last saved conversation",
72 | "editor": "Edit the prompt in your $EDITOR; only taken into account if no other args and if STDIN is a TTY",
73 | "mcp-servers": "MCP Servers configurations",
74 | "mcp-disable": "Disable specific MCP servers",
75 | "mcp-list": "List all available MCP servers",
76 | "mcp-list-tools": "List all available tools from enabled MCP servers",
77 | }
78 |
79 | // Model represents the LLM model used in the API call.
80 | type Model struct {
81 | Name string
82 | API string
83 | MaxChars int64 `yaml:"max-input-chars"`
84 | Aliases []string `yaml:"aliases"`
85 | Fallback string `yaml:"fallback"`
86 | }
87 |
88 | // API represents an API endpoint and its models.
89 | type API struct {
90 | Name string
91 | APIKey string `yaml:"api-key"`
92 | APIKeyEnv string `yaml:"api-key-env"`
93 | APIKeyCmd string `yaml:"api-key-cmd"`
94 | Version string `yaml:"version"` // XXX: not used anywhere
95 | BaseURL string `yaml:"base-url"`
96 | Models map[string]Model `yaml:"models"`
97 | User string `yaml:"user"`
98 | }
99 |
100 | // APIs is a type alias to allow custom YAML decoding.
101 | type APIs []API
102 |
103 | // UnmarshalYAML implements sorted API YAML decoding.
104 | func (apis *APIs) UnmarshalYAML(node *yaml.Node) error {
105 | for i := 0; i < len(node.Content); i += 2 {
106 | var api API
107 | if err := node.Content[i+1].Decode(&api); err != nil {
108 | return fmt.Errorf("error decoding YAML file: %s", err)
109 | }
110 | api.Name = node.Content[i].Value
111 | *apis = append(*apis, api)
112 | }
113 | return nil
114 | }
115 |
116 | // FormatText is a map[format]formatting_text.
117 | type FormatText map[string]string
118 |
119 | // UnmarshalYAML conforms with yaml.Unmarshaler.
120 | func (ft *FormatText) UnmarshalYAML(unmarshal func(interface{}) error) error {
121 | var text string
122 | if err := unmarshal(&text); err != nil {
123 | var formats map[string]string
124 | if err := unmarshal(&formats); err != nil {
125 | return err
126 | }
127 | *ft = (FormatText)(formats)
128 | return nil
129 | }
130 |
131 | *ft = map[string]string{
132 | "markdown": text,
133 | }
134 | return nil
135 | }
136 |
137 | // Config holds the main configuration and is mapped to the YAML settings file.
138 | type Config struct {
139 | API string `yaml:"default-api" env:"API"`
140 | Model string `yaml:"default-model" env:"MODEL"`
141 | Format bool `yaml:"format" env:"FORMAT"`
142 | FormatText FormatText `yaml:"format-text"`
143 | FormatAs string `yaml:"format-as" env:"FORMAT_AS"`
144 | Raw bool `yaml:"raw" env:"RAW"`
145 | Quiet bool `yaml:"quiet" env:"QUIET"`
146 | MaxTokens int64 `yaml:"max-tokens" env:"MAX_TOKENS"`
147 | MaxCompletionTokens int64 `yaml:"max-completion-tokens" env:"MAX_COMPLETION_TOKENS"`
148 | MaxInputChars int64 `yaml:"max-input-chars" env:"MAX_INPUT_CHARS"`
149 | Temperature float64 `yaml:"temp" env:"TEMP"`
150 | Stop []string `yaml:"stop" env:"STOP"`
151 | TopP float64 `yaml:"topp" env:"TOPP"`
152 | TopK int64 `yaml:"topk" env:"TOPK"`
153 | NoLimit bool `yaml:"no-limit" env:"NO_LIMIT"`
154 | CachePath string `yaml:"cache-path" env:"CACHE_PATH"`
155 | NoCache bool `yaml:"no-cache" env:"NO_CACHE"`
156 | IncludePromptArgs bool `yaml:"include-prompt-args" env:"INCLUDE_PROMPT_ARGS"`
157 | IncludePrompt int `yaml:"include-prompt" env:"INCLUDE_PROMPT"`
158 | MaxRetries int `yaml:"max-retries" env:"MAX_RETRIES"`
159 | WordWrap int `yaml:"word-wrap" env:"WORD_WRAP"`
160 | Fanciness uint `yaml:"fanciness" env:"FANCINESS"`
161 | StatusText string `yaml:"status-text" env:"STATUS_TEXT"`
162 | HTTPProxy string `yaml:"http-proxy" env:"HTTP_PROXY"`
163 | APIs APIs `yaml:"apis"`
164 | System string `yaml:"system"`
165 | Role string `yaml:"role" env:"ROLE"`
166 | AskModel bool
167 | Roles map[string][]string
168 | ShowHelp bool
169 | ResetSettings bool
170 | Prefix string
171 | Version bool
172 | Settings bool
173 | Dirs bool
174 | Theme string
175 | SettingsPath string
176 | ContinueLast bool
177 | Continue string
178 | Title string
179 | ShowLast bool
180 | Show string
181 | List bool
182 | ListRoles bool
183 | Delete []string
184 | DeleteOlderThan time.Duration
185 | User string
186 |
187 | MCPServers map[string]MCPServerConfig `yaml:"mcp-servers"`
188 | MCPList bool
189 | MCPListTools bool
190 | MCPDisable []string
191 |
192 | openEditor bool
193 | cacheReadFromID, cacheWriteToID, cacheWriteToTitle string
194 | }
195 |
196 | // MCPServerConfig holds configuration for an MCP server.
197 | type MCPServerConfig struct {
198 | Command string `yaml:"command"`
199 | Env []string `yaml:"env"`
200 | Args []string `yaml:"args"`
201 | }
202 |
203 | func ensureConfig() (Config, error) {
204 | var c Config
205 | sp, err := xdg.ConfigFile(filepath.Join("mods", "mods.yml"))
206 | if err != nil {
207 | return c, modsError{err, "Could not find settings path."}
208 | }
209 | c.SettingsPath = sp
210 |
211 | dir := filepath.Dir(sp)
212 | if dirErr := os.MkdirAll(dir, 0o700); dirErr != nil { //nolint:mnd
213 | return c, modsError{dirErr, "Could not create cache directory."}
214 | }
215 |
216 | if dirErr := writeConfigFile(sp); dirErr != nil {
217 | return c, dirErr
218 | }
219 | content, err := os.ReadFile(sp)
220 | if err != nil {
221 | return c, modsError{err, "Could not read settings file."}
222 | }
223 | if err := yaml.Unmarshal(content, &c); err != nil {
224 | return c, modsError{err, "Could not parse settings file."}
225 | }
226 |
227 | if err := env.ParseWithOptions(&c, env.Options{Prefix: "MODS_"}); err != nil {
228 | return c, modsError{err, "Could not parse environment into settings file."}
229 | }
230 |
231 | if c.CachePath == "" {
232 | c.CachePath = filepath.Join(xdg.DataHome, "mods")
233 | }
234 |
235 | if err := os.MkdirAll(c.CachePath, 0o700); err != nil { //nolint:mnd
236 | return c, modsError{err, "Could not create cache directory."}
237 | }
238 |
239 | if c.WordWrap == 0 {
240 | c.WordWrap = 80
241 | }
242 |
243 | return c, nil
244 | }
245 |
246 | func writeConfigFile(path string) error {
247 | if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
248 | return createConfigFile(path)
249 | } else if err != nil {
250 | return modsError{err, "Could not stat path."}
251 | }
252 | return nil
253 | }
254 |
255 | func createConfigFile(path string) error {
256 | tmpl := template.Must(template.New("config").Parse(configTemplate))
257 |
258 | f, err := os.Create(path)
259 | if err != nil {
260 | return modsError{err, "Could not create configuration file."}
261 | }
262 | defer func() { _ = f.Close() }()
263 |
264 | m := struct {
265 | Config Config
266 | Help map[string]string
267 | }{
268 | Config: defaultConfig(),
269 | Help: help,
270 | }
271 | if err := tmpl.Execute(f, m); err != nil {
272 | return modsError{err, "Could not render template."}
273 | }
274 | return nil
275 | }
276 |
277 | func defaultConfig() Config {
278 | return Config{
279 | FormatAs: "markdown",
280 | FormatText: FormatText{
281 | "markdown": defaultMarkdownFormatText,
282 | "json": defaultJSONFormatText,
283 | },
284 | }
285 | }
286 |
287 | func useLine() string {
288 | appName := filepath.Base(os.Args[0])
289 |
290 | if stdoutRenderer().ColorProfile() == termenv.TrueColor {
291 | appName = makeGradientText(stdoutStyles().AppName, appName)
292 | }
293 |
294 | return fmt.Sprintf(
295 | "%s %s",
296 | appName,
297 | stdoutStyles().CliArgs.Render("[OPTIONS] [PREFIX TERM]"),
298 | )
299 | }
300 |
301 | func usageFunc(cmd *cobra.Command) error {
302 | fmt.Printf(
303 | "Usage:\n %s\n\n",
304 | useLine(),
305 | )
306 | fmt.Println("Options:")
307 | cmd.Flags().VisitAll(func(f *flag.Flag) {
308 | if f.Hidden {
309 | return
310 | }
311 | if f.Shorthand == "" {
312 | fmt.Printf(
313 | " %-44s %s\n",
314 | stdoutStyles().Flag.Render("--"+f.Name),
315 | stdoutStyles().FlagDesc.Render(f.Usage),
316 | )
317 | } else {
318 | fmt.Printf(
319 | " %s%s %-40s %s\n",
320 | stdoutStyles().Flag.Render("-"+f.Shorthand),
321 | stdoutStyles().FlagComma,
322 | stdoutStyles().Flag.Render("--"+f.Name),
323 | stdoutStyles().FlagDesc.Render(f.Usage),
324 | )
325 | }
326 | })
327 | if cmd.HasExample() {
328 | fmt.Printf(
329 | "\nExample:\n %s\n %s\n",
330 | stdoutStyles().Comment.Render("# "+cmd.Example),
331 | cheapHighlighting(stdoutStyles(), examples[cmd.Example]),
332 | )
333 | }
334 |
335 | return nil
336 | }
337 |
--------------------------------------------------------------------------------
/config_template.yml:
--------------------------------------------------------------------------------
1 | # {{ index .Help "api" }}
2 | default-api: openai
3 | # {{ index .Help "model" }}
4 | default-model: gpt-4o
5 | # {{ index .Help "format-text" }}
6 | format-text:
7 | markdown: '{{ index .Config.FormatText "markdown" }}'
8 | json: '{{ index .Config.FormatText "json" }}'
9 | # {{ index .Help "mcp-servers" }}
10 | mcp-servers:
11 | # Example: GitHub MCP via Docker:
12 | # github:
13 | # command: docker
14 | # env:
15 | # - GITHUB_PERSONAL_ACCESS_TOKEN=xxxyyy
16 | # args:
17 | # - run
18 | # - "-i"
19 | # - "--rm"
20 | # - "-e"
21 | # - GITHUB_PERSONAL_ACCESS_TOKEN
22 | # - "ghcr.io/github/github-mcp-server"
23 | # {{ index .Help "roles" }}
24 | roles:
25 | "default": []
26 | # Example, a role called `shell`:
27 | # shell:
28 | # - you are a shell expert
29 | # - you do not explain anything
30 | # - you simply output one liners to solve the problems you're asked
31 | # - you do not provide any explanation whatsoever, ONLY the command
32 | # {{ index .Help "format" }}
33 | format: false
34 | # {{ index .Help "role" }}
35 | role: "default"
36 | # {{ index .Help "raw" }}
37 | raw: false
38 | # {{ index .Help "quiet" }}
39 | quiet: false
40 | # {{ index .Help "temp" }}
41 | temp: 1.0
42 | # {{ index .Help "topp" }}
43 | topp: 1.0
44 | # {{ index .Help "topk" }}
45 | topk: 50
46 | # {{ index .Help "no-limit" }}
47 | no-limit: false
48 | # {{ index .Help "word-wrap" }}
49 | word-wrap: 80
50 | # {{ index .Help "prompt-args" }}
51 | include-prompt-args: false
52 | # {{ index .Help "prompt" }}
53 | include-prompt: 0
54 | # {{ index .Help "max-retries" }}
55 | max-retries: 5
56 | # {{ index .Help "fanciness" }}
57 | fanciness: 10
58 | # {{ index .Help "status-text" }}
59 | status-text: Generating
60 | # {{ index .Help "theme" }}
61 | theme: charm
62 | # {{ index .Help "max-input-chars" }}
63 | max-input-chars: 12250
64 | # {{ index .Help "max-tokens" }}
65 | # max-tokens: 100
66 | # {{ index .Help "max-completion-tokens" }}
67 | max-completion-tokens: 100
68 | # {{ index .Help "apis" }}
69 | apis:
70 | openai:
71 | base-url: https://api.openai.com/v1
72 | api-key:
73 | api-key-env: OPENAI_API_KEY
74 | # api-key-cmd: rbw get -f OPENAI_API_KEY chat.openai.com
75 | models: # https://platform.openai.com/docs/models
76 | gpt-4.5-preview: #128k https://platform.openai.com/docs/models/gpt-4.5-preview
77 | aliases: ["gpt-4.5", "gpt4.5"]
78 | max-input-chars: 392000
79 | fallback: gpt-4
80 | gpt-4.5-preview-2025-02-27:
81 | max-input-chars: 392000
82 | fallback: gpt-4
83 | gpt-4o-mini:
84 | aliases: ["4o-mini"]
85 | max-input-chars: 392000
86 | fallback: gpt-4o
87 | gpt-4o:
88 | aliases: ["4o"]
89 | max-input-chars: 392000
90 | fallback: gpt-4
91 | gpt-4:
92 | aliases: ["4"]
93 | max-input-chars: 24500
94 | fallback: gpt-3.5-turbo
95 | gpt-4-1106-preview:
96 | aliases: ["128k"]
97 | max-input-chars: 392000
98 | fallback: gpt-4
99 | gpt-4-32k:
100 | aliases: ["32k"]
101 | max-input-chars: 98000
102 | fallback: gpt-4
103 | gpt-3.5-turbo:
104 | aliases: ["35t"]
105 | max-input-chars: 12250
106 | fallback: gpt-3.5
107 | gpt-3.5-turbo-1106:
108 | aliases: ["35t-1106"]
109 | max-input-chars: 12250
110 | fallback: gpt-3.5-turbo
111 | gpt-3.5-turbo-16k:
112 | aliases: ["35t16k"]
113 | max-input-chars: 44500
114 | fallback: gpt-3.5
115 | gpt-3.5:
116 | aliases: ["35"]
117 | max-input-chars: 12250
118 | fallback:
119 | o1:
120 | aliases: ["o1"]
121 | max-input-chars: 200000
122 | o1-preview:
123 | aliases: ["o1-preview"]
124 | max-input-chars: 128000
125 | o1-mini:
126 | aliases: ["o1-mini"]
127 | max-input-chars: 128000
128 | o3-mini:
129 | aliases: ["o3m", "o3-mini"]
130 | max-input-chars: 200000
131 | copilot:
132 | base-url: https://api.githubcopilot.com
133 | models:
134 | gpt-4o-2024-05-13:
135 | aliases: ["4o-2024", "4o", "gpt-4o"]
136 | max-input-chars: 392000
137 | gpt-4:
138 | aliases: ["4"]
139 | max-input-chars: 24500
140 | gpt-3.5-turbo:
141 | aliases: ["35t"]
142 | max-input-chars: 12250
143 | o1-preview-2024-09-12:
144 | aliases: ["o1-preview", "o1p"]
145 | max-input-chars: 128000
146 | claude-3.5-sonnet:
147 | aliases: ["claude3.5-sonnet", "sonnet-3.5", "claude-3-5-sonnet"]
148 | max-input-chars: 680000
149 | o1-preview:
150 | aliases: ["o1-preview"]
151 | max-input-chars: 128000
152 | o1-mini:
153 | aliases: ["o1-mini", "o1m", "o1-mini-2024-09-12"]
154 | max-input-chars: 128000
155 | o3-mini:
156 | aliases: ["o3m", "o3-mini"]
157 | max-input-chars: 128000
158 | gemini-2.0-flash-001:
159 | aliases: ["gm2f", "flash-2", "gemini-2-flash"]
160 | max-input-chars: 4194304
161 | anthropic:
162 | base-url: https://api.anthropic.com/v1
163 | api-key:
164 | api-key-env: ANTHROPIC_API_KEY
165 | models: # https://docs.anthropic.com/en/docs/about-claude/models
166 | claude-sonnet-4-20250514:
167 | aliases: ["claude-sonnet-4", "sonnet-4"]
168 | max-input-chars: 680000
169 | claude-3-7-sonnet-latest:
170 | aliases: ["claude3.7-sonnet", "claude-3-7-sonnet", "sonnet-3.7"]
171 | max-input-chars: 680000
172 | claude-3-7-sonnet-20250219:
173 | max-input-chars: 680000
174 | claude-3-5-sonnet-latest:
175 | aliases: ["claude3.5-sonnet", "claude-3-5-sonnet", "sonnet-3.5"]
176 | max-input-chars: 680000
177 | claude-3-5-sonnet-20241022:
178 | max-input-chars: 680000
179 | claude-3-5-sonnet-20240620:
180 | max-input-chars: 680000
181 | claude-3-opus-20240229:
182 | aliases: ["claude3-opus", "opus"]
183 | max-input-chars: 680000
184 | cohere:
185 | base-url: https://api.cohere.com/v1
186 | models:
187 | command-r-plus:
188 | max-input-chars: 128000
189 | command-r:
190 | max-input-chars: 128000
191 | google:
192 | models: # https://ai.google.dev/gemini-api/docs/models/gemini
193 | gemini-1.5-pro-latest:
194 | aliases: ["gmp", "gemini", "gemini-1.5-pro"]
195 | max-input-chars: 392000
196 | gemini-1.5-flash-latest:
197 | aliases: ["gmf", "flash", "gemini-1.5-flash"]
198 | max-input-chars: 392000
199 | gemini-2.0-flash-001:
200 | aliases: ["gm2f", "flash-2", "gemini-2-flash"]
201 | max-input-chars: 4194304
202 | gemini-2.0-flash-lite:
203 | aliases: ["gm2fl", "flash-2-lite", "gemini-2-flash-lite"]
204 | max-input-chars: 4194304
205 |
206 | ollama:
207 | base-url: http://localhost:11434
208 | models: # https://ollama.com/library
209 | "llama3.2:3b":
210 | aliases: ["llama3.2"]
211 | max-input-chars: 650000
212 | "llama3.2:1b":
213 | aliases: ["llama3.2_1b"]
214 | max-input-chars: 650000
215 | "llama3:70b":
216 | aliases: ["llama3"]
217 | max-input-chars: 650000
218 | perplexity:
219 | base-url: https://api.perplexity.ai
220 | api-key:
221 | api-key-env: PERPLEXITY_API_KEY
222 | models: # https://docs.perplexity.ai/guides/model-cards
223 | llama-3.1-sonar-small-128k-online:
224 | aliases: ["llam31-small"]
225 | max-input-chars: 127072
226 | llama-3.1-sonar-large-128k-online:
227 | aliases: ["llam31-large"]
228 | max-input-chars: 127072
229 | llama-3.1-sonar-huge-128k-online:
230 | aliases: ["llam31-huge"]
231 | max-input-chars: 127072
232 |
233 | groq:
234 | base-url: https://api.groq.com/openai/v1
235 | api-key:
236 | api-key-env: GROQ_API_KEY
237 | models: # https://console.groq.com/docs/models
238 | # Production models
239 | gemma2-9b-it:
240 | aliases: ["gemma2", "gemma"]
241 | max-input-chars: 24500 # 8,192
242 | llama-3.3-70b-versatile:
243 | aliases: ["llama3.3", "llama3.3-70b", "llama3.3-versatile"]
244 | max-input-chars: 392000 # 128K
245 | max-completion-tokens: 98000 # 32,768
246 | llama-3.1-8b-instant:
247 | aliases: ["llama3.1-8b", "llama3.1-instant"]
248 | max-input-chars: 392000 # 128K
249 | max-completion-tokens: 24500 # 8,192
250 | llama-guard-3-8b:
251 | aliases: ["llama-guard"]
252 | max-input-chars: 24500 # 8,192
253 | llama3-70b-8192:
254 | aliases: ["llama3", "llama3-70b"]
255 | max-input-chars: 24500 # 8,192
256 | fallback: llama3-8b-8192
257 | llama3-8b-8192:
258 | aliases: ["llama3-8b"]
259 | max-input-chars: 24500 # 8,192
260 | mixtral-8x7b-32768:
261 | aliases: ["mixtral"]
262 | max-input-chars: 98000 # 32,768
263 | meta-llama/llama-4-scout-17b-16e-instruct:
264 | aliases: ["llama4-scout"]
265 | max-input-chars: 392000 # 128K
266 | meta-llama/llama-4-maverick-17b-128e-instruct:
267 | aliases: ["llama4", "llama4-maverick"]
268 | max-input-chars: 392000 # 128K
269 | # Preview models
270 | mistral-saba-24b:
271 | aliases: ["saba", "mistral-saba", "saba-24b"]
272 | max-input-chars: 98000 # 32K
273 | qwen-2.5-coder-32b:
274 | aliases: ["qwen-coder", "qwen2.5-coder", "qwen-2.5-coder"]
275 | max-input-chars: 392000 # 128K
276 | deepseek-r1-distill-qwen-32b:
277 | aliases: ["deepseek-r1", "r1-qwen", "deepseek-qwen"]
278 | max-input-chars: 392000 # 128K
279 | max-completion-tokens: 49152 # 16,384
280 | deepseek-r1-distill-llama-70b-specdec:
281 | aliases: ["deepseek-r1-specdec", "r1-llama-specdec"]
282 | max-input-chars: 392000 # 128K
283 | max-completion-tokens: 49152 # 16,384
284 | deepseek-r1-distill-llama-70b:
285 | aliases: ["deepseek-r1-llama", "r1-llama"]
286 | max-input-chars: 392000 # 128K
287 | llama-3.3-70b-specdec:
288 | aliases: ["llama3.3-specdec"]
289 | max-input-chars: 24500 # 8,192
290 | llama-3.2-1b-preview:
291 | aliases: ["llama3.2-1b"]
292 | max-input-chars: 392000 # 128K
293 | max-completion-tokens: 24500 # 8,192
294 | llama-3.2-3b-preview:
295 | aliases: ["llama3.2-3b"]
296 | max-input-chars: 392000 # 128K
297 | max-completion-tokens: 24500 # 8,192
298 | llama-3.2-11b-vision-preview:
299 | aliases: ["llama3.2-vision", "llama3.2-11b-vision"]
300 | max-input-chars: 392000 # 128K
301 | max-completion-tokens: 24500 # 8,192
302 | llama-3.2-90b-vision-preview:
303 | aliases: ["llama3.2-90b-vision"]
304 | max-input-chars: 392000 # 128K
305 | max-completion-tokens: 24500 # 8,192
306 |
307 | cerebras:
308 | base-url: https://api.cerebras.ai/v1
309 | api-key:
310 | api-key-env: CEREBRAS_API_KEY
311 | models: # https://inference-docs.cerebras.ai/introduction
312 | llama3.1-8b:
313 | aliases: ["llama3.1-8b-cerebras"]
314 | max-input-chars: 24500
315 | llama3.1-70b:
316 | aliases: ["llama3.1-cerebras", "llama3.1-70b-cerebras"]
317 | max-input-chars: 24500
318 |
319 | sambanova:
320 | base-url: https://api.sambanova.ai/v1
321 | api-key:
322 | api-key-env: SAMBANOVA_API_KEY
323 | models: # https://docs.sambanova.ai/cloud/docs/get-started/supported-models
324 | # Preview models
325 | DeepSeek-R1:
326 | aliases: ["deepseek-r1-sambanova", "deepseek-r1-preview"]
327 | max-input-chars: 24500 # 8k tokens
328 | # Production models
329 | DeepSeek-R1-Distill-Llama-70B:
330 | aliases: ["deepseek-r1-llama-sambanova", "deepseek-r1-distill"]
331 | max-input-chars: 98000 # 32k tokens
332 | Llama-3.1-Tulu-3-405B:
333 | aliases: ["llama3.1-tulu", "tulu-405b"]
334 | max-input-chars: 49000 # 16k tokens
335 | Meta-Llama-3.3-70B-Instruct:
336 | aliases: ["llama3.3-sambanova", "llama3.3-70b-sambanova"]
337 | max-input-chars: 392000 # 128k tokens
338 | Meta-Llama-3.2-3B-Instruct:
339 | aliases: ["llama3.2-3b-sambanova"]
340 | max-input-chars: 24500 # 8k tokens
341 | Meta-Llama-3.2-1B-Instruct:
342 | aliases: ["llama3.2-1b-sambanova"]
343 | max-input-chars: 49000 # 16k tokens
344 | Meta-Llama-3.1-405B-Instruct:
345 | aliases: ["llama3.1-405b-sambanova"]
346 | max-input-chars: 49000 # 16k tokens
347 | Meta-Llama-3.1-70B-Instruct:
348 | aliases: ["llama3.1-70b-sambanova"]
349 | max-input-chars: 392000 # 128k tokens
350 | Meta-Llama-3.1-8B-Instruct:
351 | aliases: ["llama3.1-8b-sambanova"]
352 | max-input-chars: 49000 # 16k tokens
353 | Meta-Llama-Guard-3-8B:
354 | aliases: ["llama-guard-sambanova"]
355 | max-input-chars: 24500 # 8k tokens
356 | Llama-3.2-90B-Vision-Instruct:
357 | aliases: ["llama3.2-vision-90b", "llama3.2-90b-vision-sambanova"]
358 | max-input-chars: 12250 # 4k tokens
359 | Llama-3.2-11B-Vision-Instruct:
360 | aliases: ["llama3.2-vision-11b", "llama3.2-11b-vision-sambanova"]
361 | max-input-chars: 12250 # 4k tokens
362 | Qwen2.5-72B-Instruct:
363 | aliases: ["qwen2.5-sambanova", "qwen2.5-72b"]
364 | max-input-chars: 49000 # 16k tokens
365 | Qwen2.5-Coder-32B-Instruct:
366 | aliases: ["qwen2.5-coder-sambanova", "qwen-coder-sambanova"]
367 | max-input-chars: 49000 # 16k tokens
368 | QwQ-32B-Preview:
369 | aliases: ["qwq-sambanova", "qwq-32b"]
370 | max-input-chars: 49000 # 16k tokens
371 |
372 | localai:
373 | # LocalAI setup instructions: https://github.com/go-skynet/LocalAI#example-use-gpt4all-j-model
374 | base-url: http://localhost:8080
375 | models:
376 | ggml-gpt4all-j:
377 | aliases: ["local", "4all"]
378 | max-input-chars: 12250
379 | fallback:
380 | azure:
381 | # Set to 'azure-ad' to use Active Directory
382 | # Azure OpenAI setup: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource
383 | base-url: https://YOUR_RESOURCE_NAME.openai.azure.com
384 | api-key:
385 | api-key-env: AZURE_OPENAI_KEY
386 | models:
387 | gpt-4:
388 | aliases: ["az4"]
389 | max-input-chars: 24500
390 | fallback: gpt-35-turbo
391 | gpt-35-turbo:
392 | aliases: ["az35t"]
393 | max-input-chars: 12250
394 | fallback: gpt-35
395 | gpt-35:
396 | aliases: ["az35"]
397 | max-input-chars: 12250
398 | fallback:
399 | o1-preview:
400 | aliases: ["o1-preview"]
401 | max-input-chars: 128000
402 | o1-mini:
403 | aliases: ["o1-mini"]
404 | max-input-chars: 128000
405 | runpod:
406 | # https://docs.runpod.io/serverless/workers/vllm/openai-compatibility
407 | base-url: https://api.runpod.ai/v2/${YOUR_ENDPOINT}/openai/v1
408 | api-key:
409 | api-key-env: RUNPOD_API_KEY
410 | models:
411 | openchat/openchat-3.5-1210:
412 | aliases: ["openchat"]
413 | max-input-chars: 8192
414 | mistral:
415 | base-url: https://api.mistral.ai/v1
416 | api-key:
417 | api-key-env: MISTRAL_API_KEY
418 | models: # https://docs.mistral.ai/getting-started/models/
419 | mistral-large-latest:
420 | aliases: ["mistral-large"]
421 | max-input-chars: 384000
422 | open-mistral-nemo:
423 | aliases: ["mistral-nemo"]
424 | max-input-chars: 384000
425 | # DeepSeek
426 | # https://api-docs.deepseek.com
427 | deepseek:
428 | base-url: https://api.deepseek.com/
429 | api-key:
430 | api-key-env: DEEPSEEK_API_KEY
431 | models:
432 | deepseek-chat:
433 | aliases: ["chat"]
434 | max-input-chars: 384000
435 | deepseek-reasoner:
436 | aliases: ["r1"]
437 | max-input-chars: 384000
438 | # GitHub Models
439 | # https://github.com/marketplace/models
440 | github-models:
441 | base-url: https://models.github.ai/inference
442 | api-key:
443 | api-key-env: GITHUB_PERSONAL_ACCESS_TOKEN
444 | models:
445 | openai/gpt-4.1:
446 | max-input-chars: 392000
447 | openai/o3-mini:
448 | max-input-chars: 392000
449 | openai/o4-mini:
450 | max-input-chars: 392000
451 | openai/text-embedding-3-large:
452 | max-input-chars: 392000
453 | openai/text-embedding-3-small:
454 | max-input-chars: 392000
455 | ai21-labs/AI21-Jamba-1.5-Large:
456 | max-input-chars: 392000
457 | ai21-labs/AI21-Jamba-1.5-Mini:
458 | max-input-chars: 392000
459 | cohere/cohere-command-a:
460 | max-input-chars: 392000
461 | cohere/Cohere-command-r:
462 | max-input-chars: 392000
463 | cohere/Cohere-command-r-08-2024:
464 | max-input-chars: 392000
465 | cohere/Cohere-command-r-plus:
466 | max-input-chars: 392000
467 | cohere/Cohere-command-r-plus-08-2024:
468 | max-input-chars: 392000
469 | cohere/Cohere-embed-v3-english:
470 | max-input-chars: 392000
471 | cohere/Cohere-embed-v3-multilingual:
472 | max-input-chars: 392000
473 | core42/jais-30b-chat:
474 | max-input-chars: 392000
475 | deepseek/DeepSeek-R1:
476 | max-input-chars: 392000
477 | deepseek/DeepSeek-V3-0324:
478 | max-input-chars: 392000
479 | meta/Llama-3.2-11B-Vision-Instruct:
480 | max-input-chars: 392000
481 | meta/Llama-3.2-90B-Vision-Instruct:
482 | max-input-chars: 392000
483 | meta/Llama-3.3-70B-Instruct:
484 | max-input-chars: 392000
485 | meta/Llama-4-Maverick-17B-128E-Instruct-FP8:
486 | max-input-chars: 392000
487 | meta/Llama-4-Scout-17B-16E-Instruct:
488 | max-input-chars: 392000
489 | meta/Meta-Llama-3.1-405B-Instruct:
490 | max-input-chars: 392000
491 | meta/Meta-Llama-3.1-70B-Instruct:
492 | max-input-chars: 392000
493 | meta/Meta-Llama-3.1-8B-Instruct:
494 | max-input-chars: 392000
495 | meta/Meta-Llama-3-70B-Instruct:
496 | max-input-chars: 392000
497 | meta/Meta-Llama-3-8B-Instruct:
498 | max-input-chars: 392000
499 | mistral-ai/Codestral-2501:
500 | max-input-chars: 392000
501 | mistral-ai/Ministral-3B:
502 | max-input-chars: 392000
503 | mistral-ai/Mistral-Large-2411:
504 | max-input-chars: 392000
505 | mistral-ai/mistral-medium-2505:
506 | max-input-chars: 392000
507 | mistral-ai/Mistral-Nemo:
508 | max-input-chars: 392000
509 | mistral-ai/mistral-small-2503:
510 | max-input-chars: 392000
511 | xai/grok-3:
512 | max-input-chars: 392000
513 | xai/grok-3-mini:
514 | max-input-chars: 392000
515 | microsoft/MAI-DS-R1:
516 | max-input-chars: 392000
517 | microsoft/Phi-3.5-mini-instruct:
518 | max-input-chars: 392000
519 | microsoft/Phi-3.5-MoE-instruct:
520 | max-input-chars: 392000
521 | microsoft/Phi-3.5-vision-instruct:
522 | max-input-chars: 392000
523 | microsoft/Phi-3-medium-128k-instruct:
524 | max-input-chars: 392000
525 | microsoft/Phi-3-medium-4k-instruct:
526 | max-input-chars: 392000
527 | microsoft/Phi-3-mini-128k-instruct:
528 | max-input-chars: 392000
529 | microsoft/Phi-3-mini-4k-instruct:
530 | max-input-chars: 392000
531 | microsoft/Phi-3-small-128k-instruct:
532 | max-input-chars: 392000
533 | microsoft/Phi-3-small-8k-instruct:
534 | max-input-chars: 392000
535 | microsoft/Phi-4:
536 | max-input-chars: 392000
537 | microsoft/Phi-4-mini-instruct:
538 | max-input-chars: 392000
539 | microsoft/Phi-4-mini-reasoning:
540 | max-input-chars: 392000
541 | microsoft/Phi-4-multimodal-instruct:
542 | max-input-chars: 392000
543 | microsoft/Phi-4-reasoning:
544 | max-input-chars: 392000
545 |
--------------------------------------------------------------------------------
/config_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/require"
7 | "gopkg.in/yaml.v3"
8 | )
9 |
10 | func TestConfig(t *testing.T) {
11 | t.Run("old format text", func(t *testing.T) {
12 | var cfg Config
13 | require.NoError(t, yaml.Unmarshal([]byte("format-text: as markdown"), &cfg))
14 | require.Equal(t, FormatText(map[string]string{
15 | "markdown": "as markdown",
16 | }), cfg.FormatText)
17 | })
18 | t.Run("new format text", func(t *testing.T) {
19 | var cfg Config
20 | require.NoError(t, yaml.Unmarshal([]byte("format-text:\n markdown: as markdown\n json: as json"), &cfg))
21 | require.Equal(t, FormatText(map[string]string{
22 | "markdown": "as markdown",
23 | "json": "as json",
24 | }), cfg.FormatText)
25 | })
26 | }
27 |
--------------------------------------------------------------------------------
/db.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/jmoiron/sqlx"
9 | "modernc.org/sqlite"
10 | )
11 |
12 | var (
13 | errNoMatches = errors.New("no conversations found")
14 | errManyMatches = errors.New("multiple conversations matched the input")
15 | )
16 |
17 | func handleSqliteErr(err error) error {
18 | sqerr := &sqlite.Error{}
19 | if errors.As(err, &sqerr) {
20 | return fmt.Errorf(
21 | "%w: %s",
22 | sqerr,
23 | sqlite.ErrorCodeString[sqerr.Code()],
24 | )
25 | }
26 | return err
27 | }
28 |
29 | func openDB(ds string) (*convoDB, error) {
30 | db, err := sqlx.Open("sqlite", ds)
31 | if err != nil {
32 | return nil, fmt.Errorf(
33 | "could not create db: %w",
34 | handleSqliteErr(err),
35 | )
36 | }
37 | if err := db.Ping(); err != nil {
38 | return nil, fmt.Errorf(
39 | "could not ping db: %w",
40 | handleSqliteErr(err),
41 | )
42 | }
43 | if _, err := db.Exec(`
44 | CREATE TABLE
45 | IF NOT EXISTS conversations (
46 | id string NOT NULL PRIMARY KEY,
47 | title string NOT NULL,
48 | updated_at datetime NOT NULL DEFAULT (strftime ('%Y-%m-%d %H:%M:%f', 'now')),
49 | CHECK (id <> ''),
50 | CHECK (title <> '')
51 | )
52 | `); err != nil {
53 | return nil, fmt.Errorf("could not migrate db: %w", err)
54 | }
55 | if _, err := db.Exec(`
56 | CREATE INDEX IF NOT EXISTS idx_conv_id ON conversations (id)
57 | `); err != nil {
58 | return nil, fmt.Errorf("could not migrate db: %w", err)
59 | }
60 | if _, err := db.Exec(`
61 | CREATE INDEX IF NOT EXISTS idx_conv_title ON conversations (title)
62 | `); err != nil {
63 | return nil, fmt.Errorf("could not migrate db: %w", err)
64 | }
65 |
66 | if !hasColumn(db, "model") {
67 | if _, err := db.Exec(`
68 | ALTER TABLE conversations ADD COLUMN model string
69 | `); err != nil {
70 | return nil, fmt.Errorf("could not migrate db: %w", err)
71 | }
72 | }
73 | if !hasColumn(db, "api") {
74 | if _, err := db.Exec(`
75 | ALTER TABLE conversations ADD COLUMN api string
76 | `); err != nil {
77 | return nil, fmt.Errorf("could not migrate db: %w", err)
78 | }
79 | }
80 |
81 | return &convoDB{db: db}, nil
82 | }
83 |
84 | func hasColumn(db *sqlx.DB, col string) bool {
85 | var count int
86 | if err := db.Get(&count, `
87 | SELECT count(*)
88 | FROM pragma_table_info('conversations') c
89 | WHERE c.name = $1
90 | `, col); err != nil {
91 | return false
92 | }
93 | return count > 0
94 | }
95 |
96 | type convoDB struct {
97 | db *sqlx.DB
98 | }
99 |
100 | // Conversation in the database.
101 | type Conversation struct {
102 | ID string `db:"id"`
103 | Title string `db:"title"`
104 | UpdatedAt time.Time `db:"updated_at"`
105 | API *string `db:"api"`
106 | Model *string `db:"model"`
107 | }
108 |
109 | func (c *convoDB) Close() error {
110 | return c.db.Close() //nolint: wrapcheck
111 | }
112 |
113 | func (c *convoDB) Save(id, title, api, model string) error {
114 | res, err := c.db.Exec(c.db.Rebind(`
115 | UPDATE conversations
116 | SET
117 | title = ?,
118 | api = ?,
119 | model = ?,
120 | updated_at = CURRENT_TIMESTAMP
121 | WHERE
122 | id = ?
123 | `), title, api, model, id)
124 | if err != nil {
125 | return fmt.Errorf("Save: %w", err)
126 | }
127 |
128 | rows, err := res.RowsAffected()
129 | if err != nil {
130 | return fmt.Errorf("Save: %w", err)
131 | }
132 |
133 | if rows > 0 {
134 | return nil
135 | }
136 |
137 | if _, err := c.db.Exec(c.db.Rebind(`
138 | INSERT INTO
139 | conversations (id, title, api, model)
140 | VALUES
141 | (?, ?, ?, ?)
142 | `), id, title, api, model); err != nil {
143 | return fmt.Errorf("Save: %w", err)
144 | }
145 |
146 | return nil
147 | }
148 |
149 | func (c *convoDB) Delete(id string) error {
150 | if _, err := c.db.Exec(c.db.Rebind(`
151 | DELETE FROM conversations
152 | WHERE
153 | id = ?
154 | `), id); err != nil {
155 | return fmt.Errorf("Delete: %w", err)
156 | }
157 | return nil
158 | }
159 |
160 | func (c *convoDB) ListOlderThan(t time.Duration) ([]Conversation, error) {
161 | var convos []Conversation
162 | if err := c.db.Select(&convos, c.db.Rebind(`
163 | SELECT
164 | *
165 | FROM
166 | conversations
167 | WHERE
168 | updated_at < ?
169 | `), time.Now().Add(-t)); err != nil {
170 | return nil, fmt.Errorf("ListOlderThan: %w", err)
171 | }
172 | return convos, nil
173 | }
174 |
175 | func (c *convoDB) FindHEAD() (*Conversation, error) {
176 | var convo Conversation
177 | if err := c.db.Get(&convo, `
178 | SELECT
179 | *
180 | FROM
181 | conversations
182 | ORDER BY
183 | updated_at DESC
184 | LIMIT
185 | 1
186 | `); err != nil {
187 | return nil, fmt.Errorf("FindHead: %w", err)
188 | }
189 | return &convo, nil
190 | }
191 |
192 | func (c *convoDB) findByExactTitle(result *[]Conversation, in string) error {
193 | if err := c.db.Select(result, c.db.Rebind(`
194 | SELECT
195 | *
196 | FROM
197 | conversations
198 | WHERE
199 | title = ?
200 | `), in); err != nil {
201 | return fmt.Errorf("findByExactTitle: %w", err)
202 | }
203 | return nil
204 | }
205 |
206 | func (c *convoDB) findByIDOrTitle(result *[]Conversation, in string) error {
207 | if err := c.db.Select(result, c.db.Rebind(`
208 | SELECT
209 | *
210 | FROM
211 | conversations
212 | WHERE
213 | id glob ?
214 | OR title = ?
215 | `), in+"*", in); err != nil {
216 | return fmt.Errorf("findByIDOrTitle: %w", err)
217 | }
218 | return nil
219 | }
220 |
221 | func (c *convoDB) Completions(in string) ([]string, error) {
222 | var result []string
223 | if err := c.db.Select(&result, c.db.Rebind(`
224 | SELECT
225 | printf (
226 | '%s%c%s',
227 | CASE
228 | WHEN length (?) < ? THEN substr (id, 1, ?)
229 | ELSE id
230 | END,
231 | char(9),
232 | title
233 | )
234 | FROM
235 | conversations
236 | WHERE
237 | id glob ?
238 | UNION
239 | SELECT
240 | printf ("%s%c%s", title, char(9), substr (id, 1, ?))
241 | FROM
242 | conversations
243 | WHERE
244 | title glob ?
245 | `), in, sha1short, sha1short, in+"*", sha1short, in+"*"); err != nil {
246 | return result, fmt.Errorf("Completions: %w", err)
247 | }
248 | return result, nil
249 | }
250 |
251 | func (c *convoDB) Find(in string) (*Conversation, error) {
252 | var conversations []Conversation
253 | var err error
254 |
255 | if len(in) < sha1minLen {
256 | err = c.findByExactTitle(&conversations, in)
257 | } else {
258 | err = c.findByIDOrTitle(&conversations, in)
259 | }
260 | if err != nil {
261 | return nil, fmt.Errorf("Find %q: %w", in, err)
262 | }
263 |
264 | if len(conversations) > 1 {
265 | return nil, fmt.Errorf("%w: %s", errManyMatches, in)
266 | }
267 | if len(conversations) == 1 {
268 | return &conversations[0], nil
269 | }
270 | return nil, fmt.Errorf("%w: %s", errNoMatches, in)
271 | }
272 |
273 | func (c *convoDB) List() ([]Conversation, error) {
274 | var convos []Conversation
275 | if err := c.db.Select(&convos, `
276 | SELECT
277 | *
278 | FROM
279 | conversations
280 | ORDER BY
281 | updated_at DESC
282 | `); err != nil {
283 | return convos, fmt.Errorf("List: %w", err)
284 | }
285 | return convos, nil
286 | }
287 |
--------------------------------------------------------------------------------
/db_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | "time"
7 |
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func testDB(tb testing.TB) *convoDB {
12 | db, err := openDB(":memory:")
13 | require.NoError(tb, err)
14 | tb.Cleanup(func() {
15 | require.NoError(tb, db.Close())
16 | })
17 | return db
18 | }
19 |
20 | func TestConvoDB(t *testing.T) {
21 | const testid = "df31ae23ab8b75b5643c2f846c570997edc71333"
22 |
23 | t.Run("list-empty", func(t *testing.T) {
24 | db := testDB(t)
25 | list, err := db.List()
26 | require.NoError(t, err)
27 | require.Empty(t, list)
28 | })
29 |
30 | t.Run("save", func(t *testing.T) {
31 | db := testDB(t)
32 |
33 | require.NoError(t, db.Save(testid, "message 1", "openai", "gpt-4o"))
34 |
35 | convo, err := db.Find("df31")
36 | require.NoError(t, err)
37 | require.Equal(t, testid, convo.ID)
38 | require.Equal(t, "message 1", convo.Title)
39 |
40 | list, err := db.List()
41 | require.NoError(t, err)
42 | require.Len(t, list, 1)
43 | })
44 |
45 | t.Run("save no id", func(t *testing.T) {
46 | db := testDB(t)
47 | require.Error(t, db.Save("", "message 1", "openai", "gpt-4o"))
48 | })
49 |
50 | t.Run("save no message", func(t *testing.T) {
51 | db := testDB(t)
52 | require.Error(t, db.Save(newConversationID(), "", "openai", "gpt-4o"))
53 | })
54 |
55 | t.Run("update", func(t *testing.T) {
56 | db := testDB(t)
57 |
58 | require.NoError(t, db.Save(testid, "message 1", "openai", "gpt-4o"))
59 | time.Sleep(100 * time.Millisecond)
60 | require.NoError(t, db.Save(testid, "message 2", "openai", "gpt-4o"))
61 |
62 | convo, err := db.Find("df31")
63 | require.NoError(t, err)
64 | require.Equal(t, testid, convo.ID)
65 | require.Equal(t, "message 2", convo.Title)
66 |
67 | list, err := db.List()
68 | require.NoError(t, err)
69 | require.Len(t, list, 1)
70 | })
71 |
72 | t.Run("find head single", func(t *testing.T) {
73 | db := testDB(t)
74 |
75 | require.NoError(t, db.Save(testid, "message 2", "openai", "gpt-4o"))
76 |
77 | head, err := db.FindHEAD()
78 | require.NoError(t, err)
79 | require.Equal(t, testid, head.ID)
80 | require.Equal(t, "message 2", head.Title)
81 | })
82 |
83 | t.Run("find head multiple", func(t *testing.T) {
84 | db := testDB(t)
85 |
86 | require.NoError(t, db.Save(testid, "message 2", "openai", "gpt-4o"))
87 | time.Sleep(time.Millisecond * 100)
88 | nextConvo := newConversationID()
89 | require.NoError(t, db.Save(nextConvo, "another message", "openai", "gpt-4o"))
90 |
91 | head, err := db.FindHEAD()
92 | require.NoError(t, err)
93 | require.Equal(t, nextConvo, head.ID)
94 | require.Equal(t, "another message", head.Title)
95 |
96 | list, err := db.List()
97 | require.NoError(t, err)
98 | require.Len(t, list, 2)
99 | })
100 |
101 | t.Run("find by title", func(t *testing.T) {
102 | db := testDB(t)
103 |
104 | require.NoError(t, db.Save(newConversationID(), "message 1", "openai", "gpt-4o"))
105 | require.NoError(t, db.Save(testid, "message 2", "openai", "gpt-4o"))
106 |
107 | convo, err := db.Find("message 2")
108 | require.NoError(t, err)
109 | require.Equal(t, testid, convo.ID)
110 | require.Equal(t, "message 2", convo.Title)
111 | })
112 |
113 | t.Run("find match nothing", func(t *testing.T) {
114 | db := testDB(t)
115 | require.NoError(t, db.Save(testid, "message 1", "openai", "gpt-4o"))
116 | _, err := db.Find("message")
117 | require.ErrorIs(t, err, errNoMatches)
118 | })
119 |
120 | t.Run("find match many", func(t *testing.T) {
121 | db := testDB(t)
122 | const testid2 = "df31ae23ab9b75b5641c2f846c571000edc71315"
123 | require.NoError(t, db.Save(testid, "message 1", "openai", "gpt-4o"))
124 | require.NoError(t, db.Save(testid2, "message 2", "openai", "gpt-4o"))
125 | _, err := db.Find("df31ae")
126 | require.ErrorIs(t, err, errManyMatches)
127 | })
128 |
129 | t.Run("delete", func(t *testing.T) {
130 | db := testDB(t)
131 |
132 | require.NoError(t, db.Save(testid, "message 1", "openai", "gpt-4o"))
133 | require.NoError(t, db.Delete(newConversationID()))
134 |
135 | list, err := db.List()
136 | require.NoError(t, err)
137 | require.NotEmpty(t, list)
138 |
139 | for _, item := range list {
140 | require.NoError(t, db.Delete(item.ID))
141 | }
142 |
143 | list, err = db.List()
144 | require.NoError(t, err)
145 | require.Empty(t, list)
146 | })
147 |
148 | t.Run("completions", func(t *testing.T) {
149 | db := testDB(t)
150 |
151 | const testid1 = "fc5012d8c67073ea0a46a3c05488a0e1d87df74b"
152 | const title1 = "some title"
153 | const testid2 = "6c33f71694bf41a18c844a96d1f62f153e5f6f44"
154 | const title2 = "football teams"
155 | require.NoError(t, db.Save(testid1, title1, "openai", "gpt-4o"))
156 | require.NoError(t, db.Save(testid2, title2, "openai", "gpt-4o"))
157 |
158 | results, err := db.Completions("f")
159 | require.NoError(t, err)
160 | require.Equal(t, []string{
161 | fmt.Sprintf("%s\t%s", testid1[:sha1short], title1),
162 | fmt.Sprintf("%s\t%s", title2, testid2[:sha1short]),
163 | }, results)
164 |
165 | results, err = db.Completions(testid1[:8])
166 | require.NoError(t, err)
167 | require.Equal(t, []string{
168 | fmt.Sprintf("%s\t%s", testid1, title1),
169 | }, results)
170 | })
171 | }
172 |
--------------------------------------------------------------------------------
/error.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import "fmt"
4 |
5 | // newUserErrorf is a user-facing error.
6 | // this function is mostly to avoid linters complain about errors starting with a capitalized letter.
7 | func newUserErrorf(format string, a ...any) error {
8 | return fmt.Errorf(format, a...)
9 | }
10 |
11 | // modsError is a wrapper around an error that adds additional context.
12 | type modsError struct {
13 | err error
14 | reason string
15 | }
16 |
17 | func (m modsError) Error() string {
18 | return m.err.Error()
19 | }
20 |
21 | func (m modsError) Reason() string {
22 | return m.reason
23 | }
24 |
--------------------------------------------------------------------------------
/examples.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "math/rand"
5 | "regexp"
6 | )
7 |
8 | var examples = map[string]string{
9 | "Write new sections for a readme": `cat README.md | mods "write a new section to this README documenting a pdf sharing feature"`,
10 | "Editorialize your video files": `ls ~/vids | mods -f "summarize each of these titles, group them by decade" | glow`,
11 | "Let GPT pick something to watch": `ls ~/vids | mods "Pick 5 action packed shows from the 80s from this list" | gum choose | xargs vlc`,
12 | }
13 |
14 | func randomExample() string {
15 | keys := make([]string, 0, len(examples))
16 | for k := range examples {
17 | keys = append(keys, k)
18 | }
19 | desc := keys[rand.Intn(len(keys))] //nolint:gosec
20 | return desc
21 | }
22 |
23 | func cheapHighlighting(s styles, code string) string {
24 | code = regexp.
25 | MustCompile(`"([^"\\]|\\.)*"`).
26 | ReplaceAllStringFunc(code, func(x string) string {
27 | return s.Quote.Render(x)
28 | })
29 | code = regexp.
30 | MustCompile(`\|`).
31 | ReplaceAllStringFunc(code, func(x string) string {
32 | return s.Pipe.Render(x)
33 | })
34 | return code
35 | }
36 |
--------------------------------------------------------------------------------
/examples.md:
--------------------------------------------------------------------------------
1 | # Mods Examples
2 |
3 | ### Improve Your Code
4 |
5 | Piping source code to Mods and giving it an instruction on what to do with it
6 | gives you a lot of options for refactoring, enhancing or debugging code.
7 |
8 | `mods -f "what are your thoughts on improving this code?" < main.go | glow`
9 |
10 | 
11 |
12 | ### Come Up With Product Features
13 |
14 | Mods can also come up with entirely new features based on source code (or a
15 | README file).
16 |
17 | `mods -f "come up with 10 new features for this tool." < main.go | glow`
18 |
19 | 
20 |
21 | ### Help Write Docs
22 |
23 | Mods can quickly give you a first draft for new documentation.
24 |
25 | `mods "write a new section to this readme for a feature that sends you a free rabbit if you hit r" < README.md | glow`
26 |
27 | 
28 |
29 | ### Organize Your Videos
30 |
31 | The file system can be an amazing source of input for Mods. If you have music
32 | or video files, Mods can parse the output of `ls` and offer really good
33 | editorialization of your content.
34 |
35 | `ls ~/vids | mods -f "organize these by decade and summarize each" | glow`
36 |
37 | 
38 |
39 | ### Make Recommendations
40 |
41 | Mods is really good at generating recommendations based on what you have as
42 | well, both for similar content but also content in an entirely different media
43 | (like getting music recommendations based on movies you have).
44 |
45 | `ls ~/vids | mods -f "recommend me 10 shows based on these, make them obscure" | glow`
46 |
47 | `ls ~/vids | mods -f "recommend me 10 albums based on these shows, do not include any soundtrack music or music from the show" | glow`
48 |
49 | 
50 |
51 | ### Read Your Fortune
52 |
53 | It's easy to let your downloads folder grow into a chaotic never-ending pit of
54 | files, but with Mods you can use that to your advantage!
55 |
56 | `ls ~/Downloads | mods -f "tell my fortune based on these files" | glow`
57 |
58 | 
59 |
60 | ### Understand APIs
61 |
62 | Mods can parse and understand the output of an API call with `curl` and convert
63 | it to something human readable.
64 |
65 | `curl "https://api.open-meteo.com/v1/forecast?latitude=29.00&longitude=-90.00¤t_weather=true&hourly=temperature_2m,relativehumidity_2m,windspeed_10m" 2>/dev/null | mods -f "summarize this weather data for a human." | glow`
66 |
67 | 
68 |
69 | ### Read The Comments (so you don't have to)
70 |
71 | Just like with APIs, Mods can read through raw HTML and summarize the contents.
72 |
73 | `curl "https://news.ycombinator.com/item?id=30048332" 2>/dev/null | mods -f "what are the authors of these comments saying?" | glow`
74 |
75 | 
76 |
--------------------------------------------------------------------------------
/examples/conversations.tape:
--------------------------------------------------------------------------------
1 | Output conversations.gif
2 |
3 | Set Height 900
4 | Set Width 1600
5 | Set Framerate 24
6 | Set FontSize 28
7 |
8 | Sleep 500ms
9 | Type "mods --list"
10 | Sleep 500ms
11 | Enter
12 | Sleep 3s
13 |
14 | Type "mods --show "
15 | Sleep 500ms
16 |
17 | Hide
18 | Type@25ms "8a0d428"
19 | Show
20 |
21 | Sleep 500ms
22 |
23 | Enter
24 |
25 | Sleep 5s
26 |
--------------------------------------------------------------------------------
/examples/demo.tape:
--------------------------------------------------------------------------------
1 | Output mods.gif
2 |
3 | Set Height 900
4 | Set Width 1600
5 | Set Framerate 24
6 |
7 | Hide
8 | Type `export OPENAI_API_KEY=$(pass OPENAI_API_KEY)`
9 | Enter
10 | Ctrl+L
11 | Sleep 500ms
12 | Show
13 |
14 | Sleep 500ms
15 |
16 | Type@25ms `curl -s `
17 |
18 | Sleep 500ms
19 |
20 | # Simulate pasting link.
21 | Hide
22 | Type@5ms `https://api.github.com/orgs/charmbracelet/repos`
23 | Show
24 |
25 | Sleep 500ms
26 |
27 | Type@25ms ` | mods -f "rate this github org and summarize each repository"`
28 |
29 | Enter
30 |
31 | Sleep 30s
32 |
--------------------------------------------------------------------------------
/examples/v1.5.tape:
--------------------------------------------------------------------------------
1 | Output mods.gif
2 |
3 | Set Height 900
4 | Set Width 1600
5 | Set Framerate 24
6 |
7 | Type@150ms `mods -M`
8 | Sleep 500ms
9 | Enter
10 | Sleep 250ms
11 |
12 | Down
13 | Sleep 150ms
14 | Down
15 | Sleep 150ms
16 | Down
17 | Sleep 150ms
18 | Up
19 | Sleep 150ms
20 | Down
21 | Sleep 150ms
22 | Down
23 | Sleep 150ms
24 | Up
25 | Sleep 150ms
26 | Up
27 | Sleep 150ms
28 |
29 | Sleep 150ms
30 | Type@150ms `/`
31 | Sleep 250ms
32 | Type@150ms `gro`
33 | Sleep 500ms
34 | Enter
35 | Sleep 250ms
36 |
37 | Down
38 | Sleep 150ms
39 | Down
40 | Sleep 150ms
41 | Enter
42 | Sleep 250ms
43 |
44 | Type@150ms `Hello world`
45 | Sleep 500ms
46 | Enter
47 |
48 | Sleep 3s
49 |
--------------------------------------------------------------------------------
/features.md:
--------------------------------------------------------------------------------
1 | # Mods Features
2 |
3 | ## Regular usage
4 |
5 | By default:
6 |
7 | - all messages go to `STDERR`
8 | - all prompts are saved with the first line of the prompt as the title
9 | - glamour is used by default if `STDOUT` is a TTY
10 |
11 | ### Basic
12 |
13 | The most basic usage is:
14 |
15 | ```bash
16 | mods 'first 2 primes'
17 | ```
18 |
19 | ### Pipe from
20 |
21 | You can also pipe to it, in which case `STDIN` will not be a TTY:
22 |
23 | ```bash
24 | echo 'as json' | mods 'first 2 primes'
25 | ```
26 |
27 | In this case, `mods` should read `STDIN` and append it to the prompt.
28 |
29 | ### Pipe to
30 |
31 | You may also pipe the output to another program, in which case `STDOUT` will not
32 | be a TTY:
33 |
34 | ```bash
35 | echo 'as json' | mods 'first 2 primes' | jq .
36 | ```
37 |
38 | In this case, the "Generating" animation will go to `STDERR`, but the response
39 | will be streamed to `STDOUT`.
40 |
41 | ### Custom title
42 |
43 | You can set a custom title:
44 |
45 | ```bash
46 | mods --title='title' 'first 2 primes'
47 | ```
48 |
49 | ### Continue latest
50 |
51 | You can continue the latest conversation and save it with a new title using
52 | `--continue=title`:
53 |
54 | ```bash
55 | mods 'first 2 primes'
56 | mods --continue='primes as json' 'format as json'
57 | ```
58 |
59 | ### Untitled continue latest
60 |
61 | ```bash
62 | mods 'first 2 primes'
63 | mods --continue-last 'format as json'
64 | ```
65 |
66 | ### Continue from specific conversation, save with a new title
67 |
68 | ```bash
69 | mods --title='naturals' 'first 5 natural numbers'
70 | mods --continue='naturals' --title='naturals.json' 'format as json'
71 | ```
72 |
73 | ### Conversation branching
74 |
75 | You can use the `--continue` and `--title` to branch out conversations, for
76 | instance:
77 |
78 | ```bash
79 | mods --title='naturals' 'first 5 natural numbers'
80 | mods --continue='naturals' --title='naturals.json' 'format as json'
81 | mods --continue='naturals' --title='naturals.yaml' 'format as yaml'
82 | ```
83 |
84 | With this you'll end up with 3 conversations: `naturals`, `naturals.json`, and
85 | `naturals.yaml`.
86 |
87 | ## List conversations
88 |
89 | You can list your previous conversations with:
90 |
91 | ```bash
92 | mods --list
93 | # or
94 | mods -l
95 | ```
96 |
97 | ## Show a previous conversation
98 |
99 | You can also show a previous conversation by ID or title, e.g.:
100 |
101 | ```bash
102 | mods --show='naturals'
103 | mods -s='a2e2'
104 | ```
105 |
106 | For titles, the match should be exact.
107 | For IDs, only the first 4 chars are needed. If it matches multiple
108 | conversations, you can add more chars until it matches a single one again.
109 |
110 | ## Delete a conversation
111 |
112 | You can also delete conversations by title or ID, same as `--show`, different
113 | flag:
114 |
115 | ```bash
116 | mods --delete='naturals' --delete='a2e2'
117 | ```
118 |
119 | Keep in mind that these operations are not reversible.
120 | You can repeat the delete flag to delete multiple conversations at once.
121 |
--------------------------------------------------------------------------------
/flag.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "regexp"
5 | "strings"
6 | "time"
7 |
8 | "github.com/caarlos0/duration"
9 | )
10 |
11 | func newFlagParseError(err error) flagParseError {
12 | var reason, flag string
13 | s := err.Error()
14 | switch {
15 | case strings.HasPrefix(s, "flag needs an argument:"):
16 | reason = "Flag %s needs an argument."
17 | ps := strings.Split(s, "-")
18 | switch len(ps) {
19 | case 2: //nolint:mnd
20 | flag = "-" + ps[len(ps)-1]
21 | case 3: //nolint:mnd
22 | flag = "--" + ps[len(ps)-1]
23 | }
24 | case strings.HasPrefix(s, "unknown flag:"):
25 | reason = "Flag %s is missing."
26 | flag = strings.TrimPrefix(s, "unknown flag: ")
27 | case strings.HasPrefix(s, "unknown shorthand flag:"):
28 | reason = "Short flag %s is missing."
29 | re := regexp.MustCompile(`unknown shorthand flag: '.*' in (-\w)`)
30 | parts := re.FindStringSubmatch(s)
31 | if len(parts) > 1 {
32 | flag = parts[1]
33 | }
34 | case strings.HasPrefix(s, "invalid argument"):
35 | reason = "Flag %s have an invalid argument."
36 | re := regexp.MustCompile(`invalid argument ".*" for "(.*)" flag: .*`)
37 | parts := re.FindStringSubmatch(s)
38 | if len(parts) > 1 {
39 | flag = parts[1]
40 | }
41 | default:
42 | reason = s
43 | }
44 | return flagParseError{
45 | err: err,
46 | reason: reason,
47 | flag: flag,
48 | }
49 | }
50 |
51 | type flagParseError struct {
52 | err error
53 | reason string
54 | flag string
55 | }
56 |
57 | func (f flagParseError) Error() string {
58 | return f.err.Error()
59 | }
60 |
61 | func (f flagParseError) ReasonFormat() string {
62 | return f.reason
63 | }
64 |
65 | func (f flagParseError) Flag() string {
66 | return f.flag
67 | }
68 |
69 | func newDurationFlag(val time.Duration, p *time.Duration) *durationFlag {
70 | *p = val
71 | return (*durationFlag)(p)
72 | }
73 |
74 | type durationFlag time.Duration
75 |
76 | func (d *durationFlag) Set(s string) error {
77 | v, err := duration.Parse(s)
78 | *d = durationFlag(v)
79 | //nolint: wrapcheck
80 | return err
81 | }
82 |
83 | func (d *durationFlag) String() string {
84 | return time.Duration(*d).String()
85 | }
86 |
87 | func (*durationFlag) Type() string {
88 | return "duration"
89 | }
90 |
--------------------------------------------------------------------------------
/flag_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | var flagParseErrorTests = []struct {
11 | in string
12 | flag string
13 | reason string
14 | }{
15 | {
16 | "unknown flag: --nope",
17 | "--nope",
18 | "Flag %s is missing.",
19 | },
20 | {
21 | "flag needs an argument: --delete",
22 | "--delete",
23 | "Flag %s needs an argument.",
24 | },
25 | {
26 | "flag needs an argument: 'd' in -d",
27 | "-d",
28 | "Flag %s needs an argument.",
29 | },
30 | {
31 | `invalid argument "20dd" for "--delete-older-than" flag: time: unknown unit "dd" in duration "20dd"`,
32 | "--delete-older-than",
33 | "Flag %s have an invalid argument.",
34 | },
35 | {
36 | `invalid argument "sdfjasdl" for "--max-tokens" flag: strconv.ParseInt: parsing "sdfjasdl": invalid syntax`,
37 | "--max-tokens",
38 | "Flag %s have an invalid argument.",
39 | },
40 | {
41 | `invalid argument "nope" for "-r, --raw" flag: strconv.ParseBool: parsing "nope": invalid syntax`,
42 | "-r, --raw",
43 | "Flag %s have an invalid argument.",
44 | },
45 | }
46 |
47 | func TestFlagParseError(t *testing.T) {
48 | for _, tf := range flagParseErrorTests {
49 | t.Run(tf.in, func(t *testing.T) {
50 | err := newFlagParseError(errors.New(tf.in))
51 | require.Equal(t, tf.flag, err.Flag())
52 | require.Equal(t, tf.reason, err.ReasonFormat())
53 | require.Equal(t, tf.in, err.Error())
54 | })
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/charmbracelet/mods
2 |
3 | go 1.24.0
4 |
5 | require (
6 | github.com/adrg/xdg v0.5.3
7 | github.com/anthropics/anthropic-sdk-go v1.2.1
8 | github.com/atotto/clipboard v0.1.4
9 | github.com/caarlos0/duration v0.0.0-20240108180406-5d492514f3c7
10 | github.com/caarlos0/env/v9 v9.0.0
11 | github.com/caarlos0/go-shellwords v1.0.12
12 | github.com/caarlos0/timea.go v1.2.0
13 | github.com/charmbracelet/bubbles v0.21.0
14 | github.com/charmbracelet/bubbletea v1.3.5
15 | github.com/charmbracelet/glamour v0.10.0
16 | github.com/charmbracelet/huh v0.7.0
17 | github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834
18 | github.com/charmbracelet/x/editor v0.1.0
19 | github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91
20 | github.com/charmbracelet/x/exp/ordered v0.1.0
21 | github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0
22 | github.com/cohere-ai/cohere-go/v2 v2.14.1
23 | github.com/jmoiron/sqlx v1.4.0
24 | github.com/lucasb-eyer/go-colorful v1.2.0
25 | github.com/mark3labs/mcp-go v0.31.0
26 | github.com/mattn/go-isatty v0.0.20
27 | github.com/muesli/mango-cobra v1.2.0
28 | github.com/muesli/roff v0.1.0
29 | github.com/muesli/termenv v0.16.0
30 | github.com/ollama/ollama v0.9.0
31 | github.com/openai/openai-go v1.2.0
32 | github.com/spf13/cobra v1.9.1
33 | github.com/spf13/pflag v1.0.6
34 | github.com/stretchr/testify v1.10.0
35 | gopkg.in/yaml.v3 v3.0.1
36 | modernc.org/sqlite v1.37.1
37 | )
38 |
39 | require (
40 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
41 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
42 | github.com/alecthomas/chroma/v2 v2.14.0 // indirect
43 | github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect
44 | github.com/aws/smithy-go v1.20.3 // indirect
45 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
46 | github.com/aymanbagabas/go-udiff v0.2.0 // indirect
47 | github.com/aymerick/douceur v0.2.0 // indirect
48 | github.com/catppuccin/go v0.3.0 // indirect
49 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
50 | github.com/charmbracelet/x/ansi v0.8.0 // indirect
51 | github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
52 | github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf // indirect
53 | github.com/charmbracelet/x/term v0.2.1 // indirect
54 | github.com/davecgh/go-spew v1.1.1 // indirect
55 | github.com/dlclark/regexp2 v1.11.4 // indirect
56 | github.com/dustin/go-humanize v1.0.1 // indirect
57 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
58 | github.com/google/uuid v1.6.0 // indirect
59 | github.com/gorilla/css v1.0.1 // indirect
60 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
61 | github.com/mattn/go-localereader v0.0.1 // indirect
62 | github.com/mattn/go-runewidth v0.0.16 // indirect
63 | github.com/microcosm-cc/bluemonday v1.0.27 // indirect
64 | github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
65 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
66 | github.com/muesli/cancelreader v0.2.2 // indirect
67 | github.com/muesli/mango v0.1.0 // indirect
68 | github.com/muesli/mango-pflag v0.1.0 // indirect
69 | github.com/muesli/reflow v0.3.0 // indirect
70 | github.com/ncruces/go-strftime v0.1.9 // indirect
71 | github.com/pmezard/go-difflib v1.0.0 // indirect
72 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
73 | github.com/rivo/uniseg v0.4.7 // indirect
74 | github.com/spf13/cast v1.7.1 // indirect
75 | github.com/tidwall/gjson v1.14.4 // indirect
76 | github.com/tidwall/match v1.1.1 // indirect
77 | github.com/tidwall/pretty v1.2.1 // indirect
78 | github.com/tidwall/sjson v1.2.5 // indirect
79 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
80 | github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
81 | github.com/yuin/goldmark v1.7.8 // indirect
82 | github.com/yuin/goldmark-emoji v1.0.5 // indirect
83 | golang.org/x/crypto v0.36.0 // indirect
84 | golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
85 | golang.org/x/net v0.38.0 // indirect
86 | golang.org/x/sync v0.14.0 // indirect
87 | golang.org/x/sys v0.33.0 // indirect
88 | golang.org/x/term v0.31.0 // indirect
89 | golang.org/x/text v0.24.0 // indirect
90 | modernc.org/libc v1.65.7 // indirect
91 | modernc.org/mathutil v1.7.1 // indirect
92 | modernc.org/memory v1.11.0 // indirect
93 | )
94 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
2 | filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
3 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
4 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
5 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
6 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
7 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
8 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
9 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
10 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
11 | github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
12 | github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
13 | github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
14 | github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
15 | github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
16 | github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
17 | github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
18 | github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
19 | github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
20 | github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
21 | github.com/anthropics/anthropic-sdk-go v1.2.1 h1:zwRsDe3+KEJNDwKdbtum4P3UsQ9Uc8y/WmBE+V2WElk=
22 | github.com/anthropics/anthropic-sdk-go v1.2.1/go.mod h1:AapDW22irxK2PSumZiQXYUFvsdQgkwIWlpESweWZI/c=
23 | github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
24 | github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
25 | github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
26 | github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
27 | github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
28 | github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
29 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
30 | github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
31 | github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
32 | github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
33 | github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
34 | github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
35 | github.com/caarlos0/duration v0.0.0-20240108180406-5d492514f3c7 h1:kJP/C2eL9DCKrCOlX6lPVmAUAb6U4u9xllgws1kP9ds=
36 | github.com/caarlos0/duration v0.0.0-20240108180406-5d492514f3c7/go.mod h1:mSkwb/eZEwOJJJ4tqAKiuhLIPe0e9+FKhlU0oMCpbf8=
37 | github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc=
38 | github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
39 | github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8=
40 | github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw=
41 | github.com/caarlos0/timea.go v1.2.0 h1:JkjyWSUheN4nGO/OmYVGKbEv4ozHP/zuTZWD5Ih3Gog=
42 | github.com/caarlos0/timea.go v1.2.0/go.mod h1:p4uopjR7K+y0Oxh7j0vLh3vSo58jjzOgXHKcyKwQjuY=
43 | github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
44 | github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
45 | github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
46 | github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg=
47 | github.com/charmbracelet/bubbletea v1.3.5 h1:JAMNLTbqMOhSwoELIr0qyP4VidFq72/6E9j7HHmRKQc=
48 | github.com/charmbracelet/bubbletea v1.3.5/go.mod h1:TkCnmH+aBd4LrXhXcqrKiYwRs7qyQx5rBgH5fVY3v54=
49 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
50 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
51 | github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY=
52 | github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk=
53 | github.com/charmbracelet/huh v0.7.0 h1:W8S1uyGETgj9Tuda3/JdVkc3x7DBLZYPZc4c+/rnRdc=
54 | github.com/charmbracelet/huh v0.7.0/go.mod h1:UGC3DZHlgOKHvHC07a5vHag41zzhpPFj34U92sOmyuk=
55 | github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE=
56 | github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA=
57 | github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
58 | github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
59 | github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
60 | github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
61 | github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
62 | github.com/charmbracelet/x/conpty v0.1.0/go.mod h1:rMFsDJoDwVmiYM10aD4bH2XiRgwI7NYJtQgl5yskjEQ=
63 | github.com/charmbracelet/x/editor v0.1.0 h1:p69/dpvlwRTs9uYiPeAWruwsHqTFzHhTvQOd/WVSX98=
64 | github.com/charmbracelet/x/editor v0.1.0/go.mod h1:oivrEbcP/AYt/Hpvk5pwDXXrQ933gQS6UzL6fxqAGSA=
65 | github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86 h1:JSt3B+U9iqk37QUU2Rvb6DSBYRLtWqFqfxf8l5hOZUA=
66 | github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86/go.mod h1:2P0UgXMEa6TsToMSuFqKFQR+fZTO9CNGUNokkPatT/0=
67 | github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
68 | github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
69 | github.com/charmbracelet/x/exp/ordered v0.1.0 h1:55/qLwjIh0gL0Vni+QAWk7T/qRVP6sBf+2agPBgnOFE=
70 | github.com/charmbracelet/x/exp/ordered v0.1.0/go.mod h1:5UHwmG+is5THxMyCJHNPCn2/ecI07aKNrW+LcResjJ8=
71 | github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf h1:rLG0Yb6MQSDKdB52aGX55JT1oi0P0Kuaj7wi1bLUpnI=
72 | github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU=
73 | github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 h1:qko3AQ4gK1MTS/de7F5hPGx6/k1u0w4TeYmBFwzYVP4=
74 | github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0/go.mod h1:pBhA0ybfXv6hDjQUZ7hk1lVxBiUbupdw5R31yPUViVQ=
75 | github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
76 | github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
77 | github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY=
78 | github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
79 | github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
80 | github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
81 | github.com/cohere-ai/cohere-go/v2 v2.14.1 h1:fXNrV02rfrP9ieI+S6mHV6Nt2Z0uEDPkK3rnc5bJWCM=
82 | github.com/cohere-ai/cohere-go/v2 v2.14.1/go.mod h1:MuiJkCxlR18BDV2qQPbz2Yb/OCVphT1y6nD2zYaKeR0=
83 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
84 | github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
85 | github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
86 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
87 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
88 | github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo=
89 | github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
90 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
91 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
92 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
93 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
94 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
95 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
96 | github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
97 | github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
98 | github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
99 | github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
100 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
101 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
102 | github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
103 | github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
104 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
105 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
106 | github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
107 | github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
108 | github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
109 | github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
110 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
111 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
112 | github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
113 | github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
114 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
115 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
116 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
117 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
118 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
119 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
120 | github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
121 | github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
122 | github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
123 | github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
124 | github.com/mark3labs/mcp-go v0.31.0 h1:4UxSV8aM770OPmTvaVe/b1rA2oZAjBMhGBfUgOGut+4=
125 | github.com/mark3labs/mcp-go v0.31.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4=
126 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
127 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
128 | github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
129 | github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
130 | github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
131 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
132 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
133 | github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
134 | github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
135 | github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
136 | github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
137 | github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
138 | github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
139 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
140 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
141 | github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
142 | github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
143 | github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI=
144 | github.com/muesli/mango v0.1.0/go.mod h1:5XFpbC8jY5UUv89YQciiXNlbi+iJgt29VDC5xbzrLL4=
145 | github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbYvWg=
146 | github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA=
147 | github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg=
148 | github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0=
149 | github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
150 | github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
151 | github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8=
152 | github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig=
153 | github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
154 | github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
155 | github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
156 | github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
157 | github.com/ollama/ollama v0.9.0 h1:GvdGhi8G/QMnFrY0TMLDy1bXua+Ify8KTkFe4ZY/OZs=
158 | github.com/ollama/ollama v0.9.0/go.mod h1:aio9yQ7nc4uwIbn6S0LkGEPgn8/9bNQLL1nHuH+OcD0=
159 | github.com/openai/openai-go v1.2.0 h1:6pcZcz1u/hYeSn6KXil3AKXks3+wKPTWKgpuq8eQbU0=
160 | github.com/openai/openai-go v1.2.0/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
161 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
162 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
163 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
164 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
165 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
166 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
167 | github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
168 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
169 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
170 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
171 | github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
172 | github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
173 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
174 | github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
175 | github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
176 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
177 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
178 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
179 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
180 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
181 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
182 | github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
183 | github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
184 | github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
185 | github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
186 | github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
187 | github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
188 | github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
189 | github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
190 | github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
191 | github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
192 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
193 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
194 | github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
195 | github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
196 | github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
197 | github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
198 | github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
199 | github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk=
200 | github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U=
201 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
202 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
203 | golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
204 | golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
205 | golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
206 | golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
207 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
208 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
209 | golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
210 | golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
211 | golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
212 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
213 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
214 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
215 | golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
216 | golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
217 | golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
218 | golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
219 | golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
220 | golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
221 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
222 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
223 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
224 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
225 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
226 | modernc.org/cc/v4 v4.26.1 h1:+X5NtzVBn0KgsBCBe+xkDC7twLb/jNVj9FPgiwSQO3s=
227 | modernc.org/cc/v4 v4.26.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
228 | modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
229 | modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
230 | modernc.org/fileutil v1.3.1 h1:8vq5fe7jdtEvoCf3Zf9Nm0Q05sH6kGx0Op2CPx1wTC8=
231 | modernc.org/fileutil v1.3.1/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
232 | modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
233 | modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
234 | modernc.org/libc v1.65.7 h1:Ia9Z4yzZtWNtUIuiPuQ7Qf7kxYrxP1/jeHZzG8bFu00=
235 | modernc.org/libc v1.65.7/go.mod h1:011EQibzzio/VX3ygj1qGFt5kMjP0lHb0qCW5/D/pQU=
236 | modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
237 | modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
238 | modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
239 | modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
240 | modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
241 | modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
242 | modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
243 | modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
244 | modernc.org/sqlite v1.37.1 h1:EgHJK/FPoqC+q2YBXg7fUmES37pCHFc97sI7zSayBEs=
245 | modernc.org/sqlite v1.37.1/go.mod h1:XwdRtsE1MpiBcL54+MbKcaDvcuej+IYSMfLN6gSKV8g=
246 | modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
247 | modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
248 | modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
249 | modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
250 |
--------------------------------------------------------------------------------
/internal/anthropic/anthropic.go:
--------------------------------------------------------------------------------
1 | // Package anthropic implements [stream.Stream] for Anthropic.
2 | package anthropic
3 |
4 | import (
5 | "context"
6 | "net/http"
7 | "strings"
8 |
9 | "github.com/anthropics/anthropic-sdk-go"
10 | "github.com/anthropics/anthropic-sdk-go/option"
11 | "github.com/anthropics/anthropic-sdk-go/packages/ssestream"
12 | "github.com/charmbracelet/mods/internal/proto"
13 | "github.com/charmbracelet/mods/internal/stream"
14 | )
15 |
16 | var _ stream.Client = &Client{}
17 |
18 | // Client is a client for the Anthropic API.
19 | type Client struct {
20 | *anthropic.Client
21 | }
22 |
23 | // Request implements stream.Client.
24 | func (c *Client) Request(ctx context.Context, request proto.Request) stream.Stream {
25 | system, messages := fromProtoMessages(request.Messages)
26 | body := anthropic.MessageNewParams{
27 | Model: anthropic.Model(request.Model),
28 | Messages: messages,
29 | System: system,
30 | Tools: fromMCPTools(request.Tools),
31 | StopSequences: request.Stop,
32 | }
33 |
34 | if request.MaxTokens != nil {
35 | body.MaxTokens = *request.MaxTokens
36 | } else {
37 | body.MaxTokens = 4096
38 | }
39 |
40 | if request.Temperature != nil {
41 | body.Temperature = anthropic.Float(*request.Temperature)
42 | }
43 |
44 | if request.TopP != nil {
45 | body.TopP = anthropic.Float(*request.TopP)
46 | }
47 |
48 | s := &Stream{
49 | stream: c.Messages.NewStreaming(ctx, body),
50 | request: body,
51 | toolCall: request.ToolCaller,
52 | messages: request.Messages,
53 | }
54 |
55 | s.factory = func() *ssestream.Stream[anthropic.MessageStreamEventUnion] {
56 | return c.Messages.NewStreaming(ctx, s.request)
57 | }
58 | return s
59 | }
60 |
61 | // Config represents the configuration for the Anthropic API client.
62 | type Config struct {
63 | AuthToken string
64 | BaseURL string
65 | HTTPClient *http.Client
66 | EmptyMessagesLimit uint
67 | }
68 |
69 | // DefaultConfig returns the default configuration for the Anthropic API client.
70 | func DefaultConfig(authToken string) Config {
71 | return Config{
72 | AuthToken: authToken,
73 | HTTPClient: &http.Client{},
74 | }
75 | }
76 |
77 | // New anthropic client with the given configuration.
78 | func New(config Config) *Client {
79 | opts := []option.RequestOption{
80 | option.WithAPIKey(config.AuthToken),
81 | option.WithHTTPClient(config.HTTPClient),
82 | }
83 | if config.BaseURL != "" {
84 | opts = append(opts, option.WithBaseURL(strings.TrimSuffix(config.BaseURL, "/v1")))
85 | }
86 | client := anthropic.NewClient(opts...)
87 | return &Client{
88 | Client: &client,
89 | }
90 | }
91 |
92 | // Stream represents a stream for chat completion.
93 | type Stream struct {
94 | done bool
95 | stream *ssestream.Stream[anthropic.MessageStreamEventUnion]
96 | request anthropic.MessageNewParams
97 | factory func() *ssestream.Stream[anthropic.MessageStreamEventUnion]
98 | message anthropic.Message
99 | toolCall func(name string, data []byte) (string, error)
100 | messages []proto.Message
101 | }
102 |
103 | // CallTools implements stream.Stream.
104 | func (s *Stream) CallTools() []proto.ToolCallStatus {
105 | var statuses []proto.ToolCallStatus
106 | for _, block := range s.message.Content {
107 | switch call := block.AsAny().(type) {
108 | case anthropic.ToolUseBlock:
109 | msg, status := stream.CallTool(
110 | call.ID,
111 | call.Name,
112 | []byte(call.JSON.Input.Raw()),
113 | s.toolCall,
114 | )
115 | resp := anthropic.NewUserMessage(
116 | anthropic.NewToolResultBlock(
117 | call.ID,
118 | msg.Content,
119 | status.Err != nil,
120 | ),
121 | )
122 | s.request.Messages = append(s.request.Messages, resp)
123 | s.messages = append(s.messages, msg)
124 | statuses = append(statuses, status)
125 | }
126 | }
127 | return statuses
128 | }
129 |
130 | // Close implements stream.Stream.
131 | func (s *Stream) Close() error { return s.stream.Close() } //nolint:wrapcheck
132 |
133 | // Current implements stream.Stream.
134 | func (s *Stream) Current() (proto.Chunk, error) {
135 | event := s.stream.Current()
136 | if err := s.message.Accumulate(event); err != nil {
137 | return proto.Chunk{}, err //nolint:wrapcheck
138 | }
139 | switch eventVariant := event.AsAny().(type) {
140 | case anthropic.ContentBlockDeltaEvent:
141 | switch deltaVariant := eventVariant.Delta.AsAny().(type) {
142 | case anthropic.TextDelta:
143 | return proto.Chunk{
144 | Content: deltaVariant.Text,
145 | }, nil
146 | }
147 | }
148 | return proto.Chunk{}, stream.ErrNoContent
149 | }
150 |
151 | // Err implements stream.Stream.
152 | func (s *Stream) Err() error { return s.stream.Err() } //nolint:wrapcheck
153 |
154 | // Messages implements stream.Stream.
155 | func (s *Stream) Messages() []proto.Message { return s.messages }
156 |
157 | // Next implements stream.Stream.
158 | func (s *Stream) Next() bool {
159 | if s.done {
160 | s.done = false
161 | s.stream = s.factory()
162 | s.message = anthropic.Message{}
163 | }
164 |
165 | if s.stream.Next() {
166 | return true
167 | }
168 |
169 | s.done = true
170 | s.request.Messages = append(s.request.Messages, s.message.ToParam())
171 | s.messages = append(s.messages, toProtoMessage(s.message.ToParam()))
172 |
173 | return false
174 | }
175 |
--------------------------------------------------------------------------------
/internal/anthropic/format.go:
--------------------------------------------------------------------------------
1 | package anthropic
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 |
7 | "github.com/anthropics/anthropic-sdk-go"
8 | "github.com/charmbracelet/mods/internal/proto"
9 | "github.com/mark3labs/mcp-go/mcp"
10 | )
11 |
12 | func fromMCPTools(mcps map[string][]mcp.Tool) []anthropic.ToolUnionParam {
13 | var tools []anthropic.ToolUnionParam
14 | for name, serverTools := range mcps {
15 | for _, tool := range serverTools {
16 | tools = append(tools, anthropic.ToolUnionParam{
17 | OfTool: &anthropic.ToolParam{
18 | InputSchema: anthropic.ToolInputSchemaParam{
19 | Properties: tool.InputSchema.Properties,
20 | },
21 | Name: fmt.Sprintf("%s_%s", name, tool.Name),
22 | Description: anthropic.String(tool.Description),
23 | },
24 | })
25 | }
26 | }
27 | return tools
28 | }
29 |
30 | func fromProtoMessages(input []proto.Message) (system []anthropic.TextBlockParam, messages []anthropic.MessageParam) {
31 | for _, msg := range input {
32 | switch msg.Role {
33 | case proto.RoleSystem:
34 | // system is not a role in anthropic, must set it as the system part of the request.
35 | system = append(system, *anthropic.NewTextBlock(msg.Content).OfText)
36 | case proto.RoleTool:
37 | for _, call := range msg.ToolCalls {
38 | block := anthropic.NewToolResultBlock(call.ID, msg.Content, call.IsError)
39 | // tool is not a role in anthropic, must be a user message.
40 | messages = append(messages, anthropic.NewUserMessage(block))
41 | break
42 | }
43 | case proto.RoleUser:
44 | block := anthropic.NewTextBlock(msg.Content)
45 | messages = append(messages, anthropic.NewUserMessage(block))
46 | case proto.RoleAssistant:
47 | blocks := []anthropic.ContentBlockParamUnion{
48 | anthropic.NewTextBlock(msg.Content),
49 | }
50 | for _, tool := range msg.ToolCalls {
51 | block := anthropic.ContentBlockParamUnion{
52 | OfToolUse: &anthropic.ToolUseBlockParam{
53 | ID: tool.ID,
54 | Name: tool.Function.Name,
55 | Input: json.RawMessage(tool.Function.Arguments),
56 | },
57 | }
58 | blocks = append(blocks, block)
59 | }
60 | messages = append(messages, anthropic.NewAssistantMessage(blocks...))
61 | }
62 | }
63 | return system, messages
64 | }
65 |
66 | func toProtoMessage(in anthropic.MessageParam) proto.Message {
67 | msg := proto.Message{
68 | Role: string(in.Role),
69 | }
70 |
71 | for _, block := range in.Content {
72 | if txt := block.OfText; txt != nil {
73 | msg.Content += txt.Text
74 | }
75 |
76 | if call := block.OfToolResult; call != nil {
77 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{
78 | ID: call.ToolUseID,
79 | IsError: call.IsError.Value,
80 | })
81 | }
82 |
83 | if call := block.OfToolUse; call != nil {
84 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{
85 | ID: call.ID,
86 | Function: proto.Function{
87 | Name: call.Name,
88 | Arguments: call.Input.(json.RawMessage),
89 | },
90 | })
91 | }
92 | }
93 |
94 | return msg
95 | }
96 |
--------------------------------------------------------------------------------
/internal/cache/cache.go:
--------------------------------------------------------------------------------
1 | // Package cache provides a simple in-file cache implementation.
2 | package cache
3 |
4 | import (
5 | "errors"
6 | "fmt"
7 | "io"
8 | "os"
9 | "path/filepath"
10 | )
11 |
12 | // Type represents the type of cache being used.
13 | type Type string
14 |
15 | // Cache types for different purposes.
16 | const (
17 | ConversationCache Type = "conversations"
18 | TemporaryCache Type = "temp"
19 | )
20 |
21 | const cacheExt = ".gob"
22 |
23 | var errInvalidID = errors.New("invalid id")
24 |
25 | // Cache is a generic cache implementation that stores data in files.
26 | type Cache[T any] struct {
27 | baseDir string
28 | cType Type
29 | }
30 |
31 | // New creates a new cache instance with the specified base directory and cache type.
32 | func New[T any](baseDir string, cacheType Type) (*Cache[T], error) {
33 | dir := filepath.Join(baseDir, string(cacheType))
34 | if err := os.MkdirAll(dir, os.ModePerm); err != nil { //nolint:gosec
35 | return nil, fmt.Errorf("create cache directory: %w", err)
36 | }
37 | return &Cache[T]{
38 | baseDir: baseDir,
39 | cType: cacheType,
40 | }, nil
41 | }
42 |
43 | func (c *Cache[T]) dir() string {
44 | return filepath.Join(c.baseDir, string(c.cType))
45 | }
46 |
47 | func (c *Cache[T]) Read(id string, readFn func(io.Reader) error) error {
48 | if id == "" {
49 | return fmt.Errorf("read: %w", errInvalidID)
50 | }
51 | file, err := os.Open(filepath.Join(c.dir(), id+cacheExt))
52 | if err != nil {
53 | return fmt.Errorf("read: %w", err)
54 | }
55 | defer file.Close() //nolint:errcheck
56 |
57 | if err := readFn(file); err != nil {
58 | return fmt.Errorf("read: %w", err)
59 | }
60 | return nil
61 | }
62 |
63 | func (c *Cache[T]) Write(id string, writeFn func(io.Writer) error) error {
64 | if id == "" {
65 | return fmt.Errorf("write: %w", errInvalidID)
66 | }
67 |
68 | file, err := os.Create(filepath.Join(c.dir(), id+cacheExt))
69 | if err != nil {
70 | return fmt.Errorf("write: %w", err)
71 | }
72 | defer file.Close() //nolint:errcheck
73 |
74 | if err := writeFn(file); err != nil {
75 | return fmt.Errorf("write: %w", err)
76 | }
77 |
78 | return nil
79 | }
80 |
81 | // Delete removes a cached item by its ID.
82 | func (c *Cache[T]) Delete(id string) error {
83 | if id == "" {
84 | return fmt.Errorf("delete: %w", errInvalidID)
85 | }
86 | if err := os.Remove(filepath.Join(c.dir(), id+cacheExt)); err != nil {
87 | return fmt.Errorf("delete: %w", err)
88 | }
89 | return nil
90 | }
91 |
--------------------------------------------------------------------------------
/internal/cache/cache_test.go:
--------------------------------------------------------------------------------
1 | package cache
2 |
3 | import (
4 | "io"
5 | "os"
6 | "testing"
7 | "time"
8 |
9 | "github.com/charmbracelet/mods/internal/proto"
10 | "github.com/stretchr/testify/require"
11 | )
12 |
13 | func TestCache(t *testing.T) {
14 | t.Run("read non-existent", func(t *testing.T) {
15 | cache, err := NewConversations(t.TempDir())
16 | require.NoError(t, err)
17 | err = cache.Read("super-fake", &[]proto.Message{})
18 | require.ErrorIs(t, err, os.ErrNotExist)
19 | })
20 |
21 | t.Run("write", func(t *testing.T) {
22 | cache, err := NewConversations(t.TempDir())
23 | require.NoError(t, err)
24 | messages := []proto.Message{
25 | {
26 | Role: proto.RoleUser,
27 | Content: "first 4 natural numbers",
28 | },
29 | {
30 | Role: proto.RoleAssistant,
31 | Content: "1, 2, 3, 4",
32 | },
33 | }
34 | require.NoError(t, cache.Write("fake", &messages))
35 |
36 | result := []proto.Message{}
37 | require.NoError(t, cache.Read("fake", &result))
38 |
39 | require.ElementsMatch(t, messages, result)
40 | })
41 |
42 | t.Run("delete", func(t *testing.T) {
43 | cache, err := NewConversations(t.TempDir())
44 | require.NoError(t, err)
45 | cache.Write("fake", &[]proto.Message{})
46 | require.NoError(t, cache.Delete("fake"))
47 | require.ErrorIs(t, cache.Read("fake", nil), os.ErrNotExist)
48 | })
49 |
50 | t.Run("invalid id", func(t *testing.T) {
51 | t.Run("write", func(t *testing.T) {
52 | cache, err := NewConversations(t.TempDir())
53 | require.NoError(t, err)
54 | require.ErrorIs(t, cache.Write("", nil), errInvalidID)
55 | })
56 | t.Run("delete", func(t *testing.T) {
57 | cache, err := NewConversations(t.TempDir())
58 | require.NoError(t, err)
59 | require.ErrorIs(t, cache.Delete(""), errInvalidID)
60 | })
61 | t.Run("read", func(t *testing.T) {
62 | cache, err := NewConversations(t.TempDir())
63 | require.NoError(t, err)
64 | require.ErrorIs(t, cache.Read("", nil), errInvalidID)
65 | })
66 | })
67 | }
68 |
69 | func TestExpiringCache(t *testing.T) {
70 | t.Run("write and read", func(t *testing.T) {
71 | cache, err := NewExpiring[string](t.TempDir())
72 | require.NoError(t, err)
73 |
74 | // Write a value with expiry
75 | data := "test data"
76 | expiresAt := time.Now().Add(time.Hour).Unix()
77 | err = cache.Write("test", expiresAt, func(w io.Writer) error {
78 | _, err := w.Write([]byte(data))
79 | return err
80 | })
81 | require.NoError(t, err)
82 |
83 | // Read it back
84 | var result string
85 | err = cache.Read("test", func(r io.Reader) error {
86 | b, err := io.ReadAll(r)
87 | if err != nil {
88 | return err
89 | }
90 | result = string(b)
91 | return nil
92 | })
93 | require.NoError(t, err)
94 | require.Equal(t, data, result)
95 | })
96 |
97 | t.Run("expired token", func(t *testing.T) {
98 | cache, err := NewExpiring[string](t.TempDir())
99 | require.NoError(t, err)
100 |
101 | // Write a value that's already expired
102 | data := "test data"
103 | expiresAt := time.Now().Add(-time.Hour).Unix() // expired 1 hour ago
104 | err = cache.Write("test", expiresAt, func(w io.Writer) error {
105 | _, err := w.Write([]byte(data))
106 | return err
107 | })
108 | require.NoError(t, err)
109 |
110 | // Try to read it
111 | err = cache.Read("test", func(r io.Reader) error {
112 | return nil
113 | })
114 | require.Error(t, err)
115 | require.True(t, os.IsNotExist(err))
116 | })
117 |
118 | t.Run("overwrite token", func(t *testing.T) {
119 | cache, err := NewExpiring[string](t.TempDir())
120 | require.NoError(t, err)
121 |
122 | // Write initial value
123 | data1 := "test data 1"
124 | expiresAt1 := time.Now().Add(time.Hour).Unix()
125 | err = cache.Write("test", expiresAt1, func(w io.Writer) error {
126 | _, err := w.Write([]byte(data1))
127 | return err
128 | })
129 | require.NoError(t, err)
130 |
131 | // Write new value
132 | data2 := "test data 2"
133 | expiresAt2 := time.Now().Add(2 * time.Hour).Unix()
134 | err = cache.Write("test", expiresAt2, func(w io.Writer) error {
135 | _, err := w.Write([]byte(data2))
136 | return err
137 | })
138 | require.NoError(t, err)
139 |
140 | // Read it back - should get the new value
141 | var result string
142 | err = cache.Read("test", func(r io.Reader) error {
143 | b, err := io.ReadAll(r)
144 | if err != nil {
145 | return err
146 | }
147 | result = string(b)
148 | return nil
149 | })
150 | require.NoError(t, err)
151 | require.Equal(t, data2, result)
152 | })
153 | }
154 |
--------------------------------------------------------------------------------
/internal/cache/convo.go:
--------------------------------------------------------------------------------
1 | package cache
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "errors"
7 | "fmt"
8 | "io"
9 |
10 | "github.com/charmbracelet/mods/internal/proto"
11 | )
12 |
13 | // Conversations is the conversation cache.
14 | type Conversations struct {
15 | cache *Cache[[]proto.Message]
16 | }
17 |
18 | // NewConversations creates a new conversation cache.
19 | func NewConversations(dir string) (*Conversations, error) {
20 | cache, err := New[[]proto.Message](dir, ConversationCache)
21 | if err != nil {
22 | return nil, err
23 | }
24 | return &Conversations{
25 | cache: cache,
26 | }, nil
27 | }
28 |
29 | func (c *Conversations) Read(id string, messages *[]proto.Message) error {
30 | return c.cache.Read(id, func(r io.Reader) error {
31 | return decode(r, messages)
32 | })
33 | }
34 |
35 | func (c *Conversations) Write(id string, messages *[]proto.Message) error {
36 | return c.cache.Write(id, func(w io.Writer) error {
37 | return encode(w, messages)
38 | })
39 | }
40 |
41 | // Delete a conversation.
42 | func (c *Conversations) Delete(id string) error {
43 | return c.cache.Delete(id)
44 | }
45 |
46 | func init() {
47 | gob.Register(errors.New(""))
48 | }
49 |
50 | func encode(w io.Writer, messages *[]proto.Message) error {
51 | if err := gob.NewEncoder(w).Encode(messages); err != nil {
52 | return fmt.Errorf("encode: %w", err)
53 | }
54 | return nil
55 | }
56 |
57 | // decode decodes the given reader using gob.
58 | // we use a teereader in case the user tries to read a message in the old
59 | // format (from before MCP), and if so convert between types to avoid encoding
60 | // errors.
61 | func decode(r io.Reader, messages *[]proto.Message) error {
62 | var tr bytes.Buffer
63 | if err1 := gob.NewDecoder(io.TeeReader(r, &tr)).Decode(messages); err1 != nil {
64 | var noCalls []noCallMessage
65 | if err2 := gob.NewDecoder(&tr).Decode(&noCalls); err2 != nil {
66 | return fmt.Errorf("decode: %w", err1)
67 | }
68 | for _, msg := range noCalls {
69 | *messages = append(*messages, proto.Message{
70 | Role: msg.Role,
71 | Content: msg.Content,
72 | })
73 | }
74 | }
75 | return nil
76 | }
77 |
78 | // noCallMessage compatibility with messages with no tool calls.
79 | type noCallMessage struct {
80 | Content string
81 | Role string
82 | }
83 |
--------------------------------------------------------------------------------
/internal/cache/expiring.go:
--------------------------------------------------------------------------------
1 | package cache
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 | "path/filepath"
8 | "strconv"
9 | "strings"
10 | "time"
11 | )
12 |
13 | // ExpiringCache is a cache implementation that supports expiration of cached items.
14 | type ExpiringCache[T any] struct {
15 | cache *Cache[T]
16 | }
17 |
18 | // NewExpiring creates a new cache instance that supports item expiration.
19 | func NewExpiring[T any](path string) (*ExpiringCache[T], error) {
20 | cache, err := New[T](path, TemporaryCache)
21 | if err != nil {
22 | return nil, fmt.Errorf("create expiring cache: %w", err)
23 | }
24 | return &ExpiringCache[T]{cache: cache}, nil
25 | }
26 |
27 | func (c *ExpiringCache[T]) getCacheFilename(id string, expiresAt int64) string {
28 | return fmt.Sprintf("%s.%d", id, expiresAt)
29 | }
30 |
31 | func (c *ExpiringCache[T]) Read(id string, readFn func(io.Reader) error) error {
32 | pattern := fmt.Sprintf("%s.*", id)
33 | matches, err := filepath.Glob(filepath.Join(c.cache.dir(), pattern))
34 | if err != nil {
35 | return fmt.Errorf("failed to read read expiring cache: %w", err)
36 | }
37 |
38 | if len(matches) == 0 {
39 | return fmt.Errorf("item not found")
40 | }
41 |
42 | filename := filepath.Base(matches[0])
43 | parts := strings.Split(filename, ".")
44 | expectedFilenameParts := 2 // name and expiration timestamp
45 |
46 | if len(parts) != expectedFilenameParts {
47 | return fmt.Errorf("invalid cache filename")
48 | }
49 |
50 | expiresAt, err := strconv.ParseInt(parts[1], 10, 64)
51 | if err != nil {
52 | return fmt.Errorf("invalid expiration timestamp")
53 | }
54 |
55 | if expiresAt < time.Now().Unix() {
56 | if err := os.Remove(matches[0]); err != nil {
57 | return fmt.Errorf("failed to remove expired cache file: %w", err)
58 | }
59 | return os.ErrNotExist
60 | }
61 |
62 | file, err := os.Open(matches[0])
63 | if err != nil {
64 | return fmt.Errorf("failed to open expiring cache file: %w", err)
65 | }
66 | defer func() {
67 | if cerr := file.Close(); cerr != nil {
68 | err = cerr
69 | }
70 | }()
71 |
72 | return readFn(file)
73 | }
74 |
75 | func (c *ExpiringCache[T]) Write(id string, expiresAt int64, writeFn func(io.Writer) error) error {
76 | pattern := fmt.Sprintf("%s.*", id)
77 | oldFiles, _ := filepath.Glob(filepath.Join(c.cache.dir(), pattern))
78 | for _, file := range oldFiles {
79 | if err := os.Remove(file); err != nil {
80 | return fmt.Errorf("failed to remove old cache file: %w", err)
81 | }
82 | }
83 |
84 | filename := c.getCacheFilename(id, expiresAt)
85 | file, err := os.Create(filepath.Join(c.cache.dir(), filename))
86 | if err != nil {
87 | return fmt.Errorf("failed to create expiring cache file: %w", err)
88 | }
89 | defer func() {
90 | if cerr := file.Close(); cerr != nil {
91 | err = cerr
92 | }
93 | }()
94 |
95 | return writeFn(file)
96 | }
97 |
98 | // Delete removes an expired cached item by its ID.
99 | func (c *ExpiringCache[T]) Delete(id string) error {
100 | pattern := fmt.Sprintf("%s.*", id)
101 | matches, err := filepath.Glob(filepath.Join(c.cache.dir(), pattern))
102 | if err != nil {
103 | return fmt.Errorf("failed to delete expiring cache: %w", err)
104 | }
105 |
106 | for _, match := range matches {
107 | if err := os.Remove(match); err != nil {
108 | return fmt.Errorf("failed to delete expiring cache file: %w", err)
109 | }
110 | }
111 |
112 | return nil
113 | }
114 |
--------------------------------------------------------------------------------
/internal/cohere/cohere.go:
--------------------------------------------------------------------------------
1 | // Package cohere implements [stream.Stream] for Cohere.
2 | package cohere
3 |
4 | import (
5 | "context"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "net/http"
10 |
11 | "github.com/charmbracelet/mods/internal/proto"
12 | "github.com/charmbracelet/mods/internal/stream"
13 | cohere "github.com/cohere-ai/cohere-go/v2"
14 | "github.com/cohere-ai/cohere-go/v2/client"
15 | "github.com/cohere-ai/cohere-go/v2/core"
16 | "github.com/cohere-ai/cohere-go/v2/option"
17 | )
18 |
19 | var _ stream.Client = &Client{}
20 |
21 | // Config represents the configuration for the Ollama API client.
22 | type Config struct {
23 | AuthToken string
24 | BaseURL string
25 | HTTPClient *http.Client
26 | }
27 |
28 | // DefaultConfig returns the default configuration for the Ollama API client.
29 | func DefaultConfig(authToken string) Config {
30 | return Config{
31 | AuthToken: authToken,
32 | BaseURL: "",
33 | HTTPClient: &http.Client{},
34 | }
35 | }
36 |
37 | // Client cohere client.
38 | type Client struct {
39 | *client.Client
40 | }
41 |
42 | // New creates a new [Client] with the given [Config].
43 | func New(config Config) *Client {
44 | opts := []option.RequestOption{
45 | client.WithToken(config.AuthToken),
46 | client.WithHTTPClient(config.HTTPClient),
47 | }
48 |
49 | if config.BaseURL != "" {
50 | opts = append(opts, client.WithBaseURL(config.BaseURL))
51 | }
52 |
53 | return &Client{
54 | Client: client.NewClient(opts...),
55 | }
56 | }
57 |
58 | // Request implements stream.Client.
59 | func (c *Client) Request(ctx context.Context, request proto.Request) stream.Stream {
60 | s := &Stream{}
61 | history, message := fromProtoMessages(request.Messages)
62 | body := &cohere.ChatStreamRequest{
63 | Model: cohere.String(request.Model),
64 | Message: message,
65 | ChatHistory: history,
66 | Temperature: request.Temperature,
67 | P: request.TopP,
68 | StopSequences: request.Stop,
69 | }
70 |
71 | if request.MaxTokens != nil {
72 | body.MaxTokens = cohere.Int(int(*request.MaxTokens))
73 | }
74 |
75 | s.request = body
76 | s.done = false
77 | s.message = &cohere.Message{
78 | Role: "CHATBOT",
79 | Chatbot: &cohere.ChatMessage{},
80 | }
81 | s.stream, s.err = c.ChatStream(ctx, s.request)
82 | return s
83 | }
84 |
85 | // Stream is a cohere stream.
86 | type Stream struct {
87 | stream *core.Stream[cohere.StreamedChatResponse]
88 | request *cohere.ChatStreamRequest
89 | err error
90 | done bool
91 | message *cohere.Message
92 | }
93 |
94 | // CallTools implements stream.Stream.
95 | // Not supported.
96 | func (s *Stream) CallTools() []proto.ToolCallStatus { return nil }
97 |
98 | // Close implements stream.Stream.
99 | func (s *Stream) Close() error {
100 | s.done = true
101 | return s.stream.Close() //nolint:wrapcheck
102 | }
103 |
104 | // Current implements stream.Stream.
105 | func (s *Stream) Current() (proto.Chunk, error) {
106 | resp, err := s.stream.Recv()
107 | if errors.Is(err, io.EOF) {
108 | return proto.Chunk{}, stream.ErrNoContent
109 | }
110 | if err != nil {
111 | return proto.Chunk{}, fmt.Errorf("cohere: %w", err)
112 | }
113 | switch resp.EventType {
114 | case "text-generation":
115 | s.message.Chatbot.Message += resp.TextGeneration.Text
116 | return proto.Chunk{
117 | Content: resp.TextGeneration.Text,
118 | }, nil
119 | }
120 | return proto.Chunk{}, stream.ErrNoContent
121 | }
122 |
123 | // Err implements stream.Stream.
124 | func (s *Stream) Err() error { return s.err }
125 |
126 | // Messages implements stream.Stream.
127 | func (s *Stream) Messages() []proto.Message {
128 | return toProtoMessages(append(s.request.ChatHistory, &cohere.Message{
129 | Role: "USER",
130 | User: &cohere.ChatMessage{
131 | Message: s.request.Message,
132 | },
133 | }, s.message))
134 | }
135 |
136 | // Next implements stream.Stream.
137 | func (s *Stream) Next() bool {
138 | if s.err != nil {
139 | return false
140 | }
141 | return !s.done
142 | }
143 |
--------------------------------------------------------------------------------
/internal/cohere/format.go:
--------------------------------------------------------------------------------
1 | package cohere
2 |
3 | import (
4 | "github.com/charmbracelet/mods/internal/proto"
5 | cohere "github.com/cohere-ai/cohere-go/v2"
6 | )
7 |
8 | func fromProtoMessages(input []proto.Message) (history []*cohere.Message, message string) {
9 | var messages []*cohere.Message //nolint:prealloc
10 | for _, msg := range input {
11 | messages = append(messages, &cohere.Message{
12 | Role: fromProtoRole(msg.Role),
13 | Chatbot: &cohere.ChatMessage{
14 | Message: msg.Content,
15 | },
16 | })
17 | }
18 | if len(messages) > 1 {
19 | history = messages[:len(messages)-1]
20 | }
21 | message = messages[len(messages)-1].User.Message
22 | return history, message
23 | }
24 |
25 | func toProtoMessages(input []*cohere.Message) []proto.Message {
26 | var messages []proto.Message
27 | for _, in := range input {
28 | switch in.Role {
29 | case "USER":
30 | messages = append(messages, proto.Message{
31 | Role: proto.RoleUser,
32 | Content: in.User.Message,
33 | })
34 | case "SYSTEM":
35 | messages = append(messages, proto.Message{
36 | Role: proto.RoleSystem,
37 | Content: in.System.Message,
38 | })
39 | case "CHATBOT":
40 | messages = append(messages, proto.Message{
41 | Role: proto.RoleAssistant,
42 | Content: in.Chatbot.Message,
43 | })
44 | case "TOOL":
45 | // not supported yet
46 | }
47 | }
48 | return messages
49 | }
50 |
51 | func fromProtoRole(role string) string {
52 | switch role {
53 | case proto.RoleSystem:
54 | return "SYSTEM"
55 | case proto.RoleAssistant:
56 | return "CHATBOT"
57 | default:
58 | return "USER"
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/internal/copilot/copilot.go:
--------------------------------------------------------------------------------
1 | // Package copilot provides a client for GitHub Copilot's API.
2 | package copilot
3 |
4 | import (
5 | "context"
6 | "encoding/json"
7 | "fmt"
8 | "io"
9 | "net/http"
10 | "os"
11 | "path/filepath"
12 | "runtime"
13 | "strings"
14 | "time"
15 |
16 | "github.com/charmbracelet/mods/internal/cache"
17 | )
18 |
19 | const (
20 | copilotChatAuthURL = "https://api.github.com/copilot_internal/v2/token"
21 | copilotEditorVersion = "vscode/1.95.3"
22 | copilotUserAgent = "curl/7.81.0" // Necessay to bypass the user-agent check
23 | )
24 |
25 | // AccessToken response from GitHub Copilot's token endpoint.
26 | type AccessToken struct {
27 | Token string `json:"token"`
28 | ExpiresAt int64 `json:"expires_at"`
29 | Endpoints struct {
30 | API string `json:"api"` // Can change in Github Enterprise instances
31 | OriginTracker string `json:"origin-tracker"`
32 | Proxy string `json:"proxy"`
33 | Telemetry string `json:"telemetry"`
34 | } `json:"endpoints"`
35 | ErrorDetails *struct {
36 | URL string `json:"url,omitempty"`
37 | Message string `json:"message,omitempty"`
38 | Title string `json:"title,omitempty"`
39 | NotificationID string `json:"notification_id,omitempty"`
40 | } `json:"error_details,omitempty"`
41 | }
42 |
43 | // Client copilot client.
44 | type Client struct {
45 | client *http.Client
46 | cache string
47 | AccessToken *AccessToken
48 | }
49 |
50 | // New new copilot client.
51 | func New(cacheDir string) *Client {
52 | return &Client{
53 | client: &http.Client{},
54 | cache: cacheDir,
55 | }
56 | }
57 |
58 | // Do does the request.
59 | func (c *Client) Do(req *http.Request) (*http.Response, error) {
60 | req.Header.Set("Accept", "application/json")
61 | req.Header.Set("Editor-Version", copilotEditorVersion)
62 | req.Header.Set("User-Agent", copilotUserAgent)
63 |
64 | isTokenExpired := c.AccessToken != nil && c.AccessToken.ExpiresAt < time.Now().Unix()
65 |
66 | if c.AccessToken == nil || isTokenExpired {
67 | accessToken, err := c.Auth()
68 | if err != nil {
69 | return nil, fmt.Errorf("failed to get access token: %w", err)
70 | }
71 | c.AccessToken = &accessToken
72 | }
73 |
74 | if c.AccessToken != nil {
75 | req.Header.Set("Authorization", "Bearer "+c.AccessToken.Token)
76 | }
77 |
78 | httpResp, err := c.client.Do(req)
79 | if err != nil {
80 | return nil, fmt.Errorf("failed to make request: %w", err)
81 | }
82 |
83 | return httpResp, nil
84 | }
85 |
86 | func getCopilotRefreshToken() (string, error) {
87 | configPath := filepath.Join(os.Getenv("HOME"), ".config/github-copilot")
88 | if runtime.GOOS == "windows" {
89 | configPath = filepath.Join(os.Getenv("LOCALAPPDATA"), "github-copilot")
90 | }
91 |
92 | // Check both possible config file locations
93 | configFiles := []string{
94 | filepath.Join(configPath, "hosts.json"),
95 | filepath.Join(configPath, "apps.json"),
96 | }
97 |
98 | // Try to get token from config files
99 | for _, path := range configFiles {
100 | token, err := extractCopilotTokenFromFile(path)
101 | if err == nil && token != "" {
102 | return token, nil
103 | }
104 | }
105 |
106 | return "", fmt.Errorf("no token found in %s", strings.Join(configFiles, ", "))
107 | }
108 |
109 | func extractCopilotTokenFromFile(path string) (string, error) {
110 | bytes, err := os.ReadFile(path)
111 | if err != nil {
112 | return "", fmt.Errorf("failed to read Copilot configuration file at %s: %w", path, err)
113 | }
114 |
115 | var config map[string]json.RawMessage
116 | if err := json.Unmarshal(bytes, &config); err != nil {
117 | return "", fmt.Errorf("failed to parse Copilot configuration file at %s: %w", path, err)
118 | }
119 |
120 | for key, value := range config {
121 | if key == "github.com" || strings.HasPrefix(key, "github.com:") {
122 | var tokenData map[string]string
123 | if err := json.Unmarshal(value, &tokenData); err != nil {
124 | continue
125 | }
126 | if token, exists := tokenData["oauth_token"]; exists {
127 | return token, nil
128 | }
129 | }
130 | }
131 |
132 | return "", fmt.Errorf("no token found in %s", path)
133 | }
134 |
135 | // Auth authenticates the user and retrieves an access token.
136 | func (c *Client) Auth() (AccessToken, error) {
137 | cache, err := cache.NewExpiring[AccessToken](c.cache)
138 | if err == nil {
139 | var token AccessToken
140 | err = cache.Read("copilot", func(r io.Reader) error {
141 | return json.NewDecoder(r).Decode(&token)
142 | })
143 | if err == nil && token.ExpiresAt > time.Now().Unix() {
144 | return token, nil
145 | }
146 | }
147 |
148 | refreshToken, err := getCopilotRefreshToken()
149 | if err != nil {
150 | return AccessToken{}, fmt.Errorf("failed to get refresh token: %w", err)
151 | }
152 |
153 | tokenReq, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, copilotChatAuthURL, nil)
154 | if err != nil {
155 | return AccessToken{}, fmt.Errorf("failed to create token request: %w", err)
156 | }
157 |
158 | tokenReq.Header.Set("Authorization", "token "+refreshToken)
159 | tokenReq.Header.Set("Accept", "application/json")
160 | tokenReq.Header.Set("Editor-Version", copilotEditorVersion)
161 | tokenReq.Header.Set("User-Agent", copilotUserAgent)
162 |
163 | tokenResp, err := c.client.Do(tokenReq)
164 | if err != nil {
165 | return AccessToken{}, fmt.Errorf("failed to get access token: %w", err)
166 | }
167 | defer func() {
168 | if closeErr := tokenResp.Body.Close(); closeErr != nil && err == nil {
169 | err = fmt.Errorf("error closing response body: %w", closeErr)
170 | }
171 | }()
172 |
173 | var tokenResponse AccessToken
174 | if err := json.NewDecoder(tokenResp.Body).Decode(&tokenResponse); err != nil {
175 | return AccessToken{}, fmt.Errorf("failed to decode token response: %w", err)
176 | }
177 |
178 | if tokenResponse.ErrorDetails != nil {
179 | return AccessToken{}, fmt.Errorf("token error: %s", tokenResponse.ErrorDetails.Message)
180 | }
181 |
182 | if cache != nil {
183 | if err := cache.Write("copilot", tokenResponse.ExpiresAt, func(w io.Writer) error {
184 | return json.NewEncoder(w).Encode(tokenResponse)
185 | }); err != nil {
186 | return AccessToken{}, fmt.Errorf("failed to cache token: %w", err)
187 | }
188 | }
189 |
190 | return tokenResponse, nil
191 | }
192 |
--------------------------------------------------------------------------------
/internal/google/format.go:
--------------------------------------------------------------------------------
1 | package google
2 |
3 | import "github.com/charmbracelet/mods/internal/proto"
4 |
5 | func fromProtoMessages(input []proto.Message) []Content {
6 | result := make([]Content, 0, len(input))
7 | for _, in := range input {
8 | switch in.Role {
9 | case proto.RoleSystem, proto.RoleUser:
10 | result = append(result, Content{
11 | Role: proto.RoleUser,
12 | Parts: []Part{{Text: in.Content}},
13 | })
14 | }
15 | }
16 | return result
17 | }
18 |
--------------------------------------------------------------------------------
/internal/google/google.go:
--------------------------------------------------------------------------------
1 | // Package google implements [stream.Stream] for Google.
2 | package google
3 |
4 | import (
5 | "bufio"
6 | "bytes"
7 | "context"
8 | "encoding/json"
9 | "fmt"
10 | "net/http"
11 |
12 | "github.com/charmbracelet/mods/internal/proto"
13 | "github.com/charmbracelet/mods/internal/stream"
14 | "github.com/openai/openai-go"
15 | )
16 |
17 | var _ stream.Client = &Client{}
18 |
19 | const emptyMessagesLimit uint = 300
20 |
21 | var (
22 | googleHeaderData = []byte("data: ")
23 | errorPrefix = []byte(`event: error`)
24 | )
25 |
26 | // Config represents the configuration for the Google API client.
27 | type Config struct {
28 | BaseURL string
29 | HTTPClient *http.Client
30 | }
31 |
32 | // DefaultConfig returns the default configuration for the Google API client.
33 | func DefaultConfig(model, authToken string) Config {
34 | return Config{
35 | BaseURL: fmt.Sprintf("https://generativelanguage.googleapis.com/v1beta/models/%s:streamGenerateContent?alt=sse&key=%s", model, authToken),
36 | HTTPClient: &http.Client{},
37 | }
38 | }
39 |
40 | // Part is a datatype containing media that is part of a multi-part Content message.
41 | type Part struct {
42 | Text string `json:"text,omitempty"`
43 | }
44 |
45 | // Content is the base structured datatype containing multi-part content of a message.
46 | type Content struct {
47 | Parts []Part `json:"parts,omitempty"`
48 | Role string `json:"role,omitempty"`
49 | }
50 |
51 | // GenerationConfig are the options for model generation and outputs. Not all parameters are configurable for every model.
52 | type GenerationConfig struct {
53 | StopSequences []string `json:"stopSequences,omitempty"`
54 | ResponseMimeType string `json:"responseMimeType,omitempty"`
55 | CandidateCount uint `json:"candidateCount,omitempty"`
56 | MaxOutputTokens uint `json:"maxOutputTokens,omitempty"`
57 | Temperature float64 `json:"temperature,omitempty"`
58 | TopP float64 `json:"topP,omitempty"`
59 | TopK int64 `json:"topK,omitempty"`
60 | }
61 |
62 | // MessageCompletionRequest represents the valid parameters and value options for the request.
63 | type MessageCompletionRequest struct {
64 | Contents []Content `json:"contents,omitempty"`
65 | GenerationConfig GenerationConfig `json:"generationConfig,omitempty"`
66 | }
67 |
68 | // RequestBuilder is an interface for building HTTP requests for the Google API.
69 | type RequestBuilder interface {
70 | Build(ctx context.Context, method, url string, body any, header http.Header) (*http.Request, error)
71 | }
72 |
73 | // NewRequestBuilder creates a new HTTPRequestBuilder.
74 | func NewRequestBuilder() *HTTPRequestBuilder {
75 | return &HTTPRequestBuilder{
76 | marshaller: &JSONMarshaller{},
77 | }
78 | }
79 |
80 | // Client is a client for the Google API.
81 | type Client struct {
82 | config Config
83 |
84 | requestBuilder RequestBuilder
85 | }
86 |
87 | // Request implements stream.Client.
88 | func (c *Client) Request(ctx context.Context, request proto.Request) stream.Stream {
89 | stream := new(Stream)
90 | body := MessageCompletionRequest{
91 | Contents: fromProtoMessages(request.Messages),
92 | GenerationConfig: GenerationConfig{
93 | ResponseMimeType: "",
94 | CandidateCount: 1,
95 | StopSequences: request.Stop,
96 | MaxOutputTokens: 4096,
97 | },
98 | }
99 |
100 | if request.Temperature != nil {
101 | body.GenerationConfig.Temperature = *request.Temperature
102 | }
103 | if request.TopP != nil {
104 | body.GenerationConfig.TopP = *request.TopP
105 | }
106 | if request.TopK != nil {
107 | body.GenerationConfig.TopK = *request.TopK
108 | }
109 |
110 | if request.MaxTokens != nil {
111 | body.GenerationConfig.MaxOutputTokens = uint(*request.MaxTokens) //nolint:gosec
112 | }
113 |
114 | req, err := c.newRequest(ctx, http.MethodPost, c.config.BaseURL, withBody(body))
115 | if err != nil {
116 | stream.err = err
117 | return stream
118 | }
119 |
120 | stream, err = googleSendRequestStream(c, req)
121 | if err != nil {
122 | stream.err = err
123 | }
124 | return stream
125 | }
126 |
127 | // New creates a new Client with the given configuration.
128 | func New(config Config) *Client {
129 | return &Client{
130 | config: config,
131 | requestBuilder: NewRequestBuilder(),
132 | }
133 | }
134 |
135 | func (c *Client) newRequest(ctx context.Context, method, url string, setters ...requestOption) (*http.Request, error) {
136 | // Default Options
137 | args := &requestOptions{
138 | body: MessageCompletionRequest{},
139 | header: make(http.Header),
140 | }
141 | for _, setter := range setters {
142 | setter(args)
143 | }
144 | req, err := c.requestBuilder.Build(ctx, method, url, args.body, args.header)
145 | if err != nil {
146 | return new(http.Request), err
147 | }
148 | return req, nil
149 | }
150 |
151 | func (c *Client) handleErrorResp(resp *http.Response) error {
152 | // Print the response text
153 | var errRes openai.Error
154 | if err := json.NewDecoder(resp.Body).Decode(&errRes); err != nil {
155 | return &openai.Error{
156 | StatusCode: resp.StatusCode,
157 | Message: err.Error(),
158 | }
159 | }
160 | errRes.StatusCode = resp.StatusCode
161 | return &errRes
162 | }
163 |
164 | // Candidate represents a response candidate generated from the model.
165 | type Candidate struct {
166 | Content Content `json:"content,omitempty"`
167 | FinishReason string `json:"finishReason,omitempty"`
168 | TokenCount uint `json:"tokenCount,omitempty"`
169 | Index uint `json:"index,omitempty"`
170 | }
171 |
172 | // CompletionMessageResponse represents a response to an Google completion message.
173 | type CompletionMessageResponse struct {
174 | Candidates []Candidate `json:"candidates,omitempty"`
175 | }
176 |
177 | // Stream struct represents a stream of messages from the Google API.
178 | type Stream struct {
179 | isFinished bool
180 |
181 | reader *bufio.Reader
182 | response *http.Response
183 | err error
184 | unmarshaler Unmarshaler
185 |
186 | httpHeader
187 | }
188 |
189 | // CallTools implements stream.Stream.
190 | func (s *Stream) CallTools() []proto.ToolCallStatus {
191 | panic("unimplemented")
192 | }
193 |
194 | // Err implements stream.Stream.
195 | func (s *Stream) Err() error { return s.err }
196 |
197 | // Messages implements stream.Stream.
198 | func (s *Stream) Messages() []proto.Message {
199 | panic("unimplemented")
200 | }
201 |
202 | // Next implements stream.Stream.
203 | func (s *Stream) Next() bool {
204 | return !s.isFinished
205 | }
206 |
207 | // Close closes the stream.
208 | func (s *Stream) Close() error {
209 | return s.response.Body.Close() //nolint:wrapcheck
210 | }
211 |
212 | // Current implements stream.Stream.
213 | //
214 | //nolint:gocognit
215 | func (s *Stream) Current() (proto.Chunk, error) {
216 | var (
217 | emptyMessagesCount uint
218 | hasError bool
219 | )
220 |
221 | for {
222 | rawLine, readErr := s.reader.ReadBytes('\n')
223 | if readErr != nil {
224 | return proto.Chunk{}, fmt.Errorf("googleStreamReader.processLines: %w", readErr)
225 | }
226 |
227 | noSpaceLine := bytes.TrimSpace(rawLine)
228 |
229 | if bytes.HasPrefix(noSpaceLine, errorPrefix) {
230 | hasError = true
231 | // NOTE: Continue to the next event to get the error data.
232 | continue
233 | }
234 |
235 | if !bytes.HasPrefix(noSpaceLine, googleHeaderData) || hasError {
236 | if hasError {
237 | noSpaceLine = bytes.TrimPrefix(noSpaceLine, googleHeaderData)
238 | return proto.Chunk{}, fmt.Errorf("googleStreamReader.processLines: %s", noSpaceLine)
239 | }
240 | emptyMessagesCount++
241 | if emptyMessagesCount > emptyMessagesLimit {
242 | return proto.Chunk{}, ErrTooManyEmptyStreamMessages
243 | }
244 | continue
245 | }
246 |
247 | noPrefixLine := bytes.TrimPrefix(noSpaceLine, googleHeaderData)
248 |
249 | var chunk CompletionMessageResponse
250 | unmarshalErr := s.unmarshaler.Unmarshal(noPrefixLine, &chunk)
251 | if unmarshalErr != nil {
252 | return proto.Chunk{}, fmt.Errorf("googleStreamReader.processLines: %w", unmarshalErr)
253 | }
254 | if len(chunk.Candidates) == 0 {
255 | return proto.Chunk{}, stream.ErrNoContent
256 | }
257 | parts := chunk.Candidates[0].Content.Parts
258 | if len(parts) == 0 {
259 | return proto.Chunk{}, stream.ErrNoContent
260 | }
261 |
262 | return proto.Chunk{
263 | Content: chunk.Candidates[0].Content.Parts[0].Text,
264 | }, nil
265 | }
266 | }
267 |
268 | func googleSendRequestStream(client *Client, req *http.Request) (*Stream, error) {
269 | req.Header.Set("content-type", "application/json")
270 |
271 | resp, err := client.config.HTTPClient.Do(req) //nolint:bodyclose // body is closed in stream.Close()
272 | if err != nil {
273 | return new(Stream), err
274 | }
275 | if isFailureStatusCode(resp) {
276 | return new(Stream), client.handleErrorResp(resp)
277 | }
278 | return &Stream{
279 | reader: bufio.NewReader(resp.Body),
280 | response: resp,
281 | unmarshaler: &JSONUnmarshaler{},
282 | httpHeader: httpHeader(resp.Header),
283 | }, nil
284 | }
285 |
--------------------------------------------------------------------------------
/internal/google/http.go:
--------------------------------------------------------------------------------
1 | package google
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "io"
10 | "net/http"
11 | )
12 |
13 | type httpHeader http.Header
14 |
15 | // ErrTooManyEmptyStreamMessages represents an error when a stream has sent too many empty messages.
16 | var ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages")
17 |
18 | // Marshaller is an interface for marshalling values to bytes.
19 | type Marshaller interface {
20 | Marshal(value any) ([]byte, error)
21 | }
22 |
23 | // JSONMarshaller is a marshaller that marshals values to JSON.
24 | type JSONMarshaller struct{}
25 |
26 | // Marshal marshals a value to JSON.
27 | func (jm *JSONMarshaller) Marshal(value any) ([]byte, error) {
28 | result, err := json.Marshal(value)
29 | if err != nil {
30 | return result, fmt.Errorf("JSONMarshaller.Marshal: %w", err)
31 | }
32 | return result, nil
33 | }
34 |
35 | // HTTPRequestBuilder is an implementation of OllamaRequestBuilder that builds HTTP requests.
36 | type HTTPRequestBuilder struct {
37 | marshaller Marshaller
38 | }
39 |
40 | // Build builds an HTTP request.
41 | func (b *HTTPRequestBuilder) Build(
42 | ctx context.Context,
43 | method string,
44 | url string,
45 | body any,
46 | header http.Header,
47 | ) (req *http.Request, err error) {
48 | var bodyReader io.Reader
49 | if body != nil {
50 | if v, ok := body.(io.Reader); ok {
51 | bodyReader = v
52 | } else {
53 | var reqBytes []byte
54 | reqBytes, err = b.marshaller.Marshal(body)
55 | if err != nil {
56 | return
57 | }
58 | bodyReader = bytes.NewBuffer(reqBytes)
59 | }
60 | }
61 | req, err = http.NewRequestWithContext(ctx, method, url, bodyReader)
62 | if err != nil {
63 | return
64 | }
65 | if header != nil {
66 | req.Header = header
67 | }
68 | return
69 | }
70 |
71 | type requestOptions struct {
72 | body MessageCompletionRequest
73 | header http.Header
74 | }
75 |
76 | type requestOption func(*requestOptions)
77 |
78 | func withBody(body MessageCompletionRequest) requestOption {
79 | return func(args *requestOptions) {
80 | args.body = body
81 | }
82 | }
83 |
84 | // ErrorAccumulator is an interface for accumulating errors.
85 | type ErrorAccumulator interface {
86 | Write(p []byte) error
87 | Bytes() []byte
88 | }
89 |
90 | // Unmarshaler is an interface for unmarshalling bytes.
91 | type Unmarshaler interface {
92 | Unmarshal(data []byte, v any) error
93 | }
94 |
95 | func isFailureStatusCode(resp *http.Response) bool {
96 | return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest
97 | }
98 |
99 | // JSONUnmarshaler is an unmarshaler that unmarshals JSON data.
100 | type JSONUnmarshaler struct{}
101 |
102 | // Unmarshal unmarshals JSON data.
103 | func (jm *JSONUnmarshaler) Unmarshal(data []byte, v any) error {
104 | err := json.Unmarshal(data, v)
105 | if err != nil {
106 | return fmt.Errorf("JSONUnmarshaler.Unmarshal: %w", err)
107 | }
108 | return nil
109 | }
110 |
--------------------------------------------------------------------------------
/internal/ollama/format.go:
--------------------------------------------------------------------------------
1 | package ollama
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "strconv"
7 |
8 | "github.com/charmbracelet/mods/internal/proto"
9 | "github.com/mark3labs/mcp-go/mcp"
10 | "github.com/ollama/ollama/api"
11 | )
12 |
13 | func fromMCPTools(mcps map[string][]mcp.Tool) []api.Tool {
14 | var tools []api.Tool
15 | for name, serverTools := range mcps {
16 | for _, tool := range serverTools {
17 | t := api.Tool{
18 | Type: "function",
19 | Items: nil,
20 | Function: api.ToolFunction{
21 | Name: fmt.Sprintf("%s_%s", name, tool.Name),
22 | Description: tool.Description,
23 | },
24 | }
25 | _ = json.Unmarshal(tool.RawInputSchema, &t.Function.Parameters)
26 | tools = append(tools, t)
27 | }
28 | }
29 | return tools
30 | }
31 |
32 | func fromProtoMessages(input []proto.Message) []api.Message {
33 | messages := make([]api.Message, 0, len(input))
34 | for _, msg := range input {
35 | messages = append(messages, fromProtoMessage(msg))
36 | }
37 | return messages
38 | }
39 |
40 | func fromProtoMessage(input proto.Message) api.Message {
41 | m := api.Message{
42 | Content: input.Content,
43 | Role: input.Role,
44 | }
45 | for _, call := range input.ToolCalls {
46 | var args api.ToolCallFunctionArguments
47 | _ = json.Unmarshal(call.Function.Arguments, &args)
48 | idx, _ := strconv.Atoi(call.ID)
49 | m.ToolCalls = append(m.ToolCalls, api.ToolCall{
50 | Function: api.ToolCallFunction{
51 | Index: idx,
52 | Name: call.Function.Name,
53 | Arguments: args,
54 | },
55 | })
56 | }
57 | return m
58 | }
59 |
60 | func toProtoMessage(in api.Message) proto.Message {
61 | msg := proto.Message{
62 | Role: in.Role,
63 | Content: in.Content,
64 | }
65 | for _, call := range in.ToolCalls {
66 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{
67 | ID: strconv.Itoa(call.Function.Index),
68 | Function: proto.Function{
69 | Arguments: []byte(call.Function.Arguments.String()),
70 | Name: call.Function.Name,
71 | },
72 | })
73 | }
74 | return msg
75 | }
76 |
--------------------------------------------------------------------------------
/internal/ollama/ollama.go:
--------------------------------------------------------------------------------
1 | // Package ollama implements [stream.Stream] for Ollama.
2 | package ollama
3 |
4 | import (
5 | "context"
6 | "net/http"
7 | "net/url"
8 | "strconv"
9 |
10 | "github.com/charmbracelet/mods/internal/proto"
11 | "github.com/charmbracelet/mods/internal/stream"
12 | "github.com/ollama/ollama/api"
13 | )
14 |
15 | var _ stream.Client = &Client{}
16 |
17 | // Config represents the configuration for the Ollama API client.
18 | type Config struct {
19 | BaseURL string
20 | HTTPClient *http.Client
21 | EmptyMessagesLimit uint
22 | }
23 |
24 | // DefaultConfig returns the default configuration for the Ollama API client.
25 | func DefaultConfig() Config {
26 | return Config{
27 | BaseURL: "http://localhost:11434/",
28 | HTTPClient: &http.Client{},
29 | }
30 | }
31 |
32 | // Client ollama client.
33 | type Client struct {
34 | *api.Client
35 | }
36 |
37 | // New creates a new [Client] with the given [Config].
38 | func New(config Config) (*Client, error) {
39 | u, err := url.Parse(config.BaseURL)
40 | if err != nil {
41 | return nil, err //nolint:wrapcheck
42 | }
43 | client := api.NewClient(u, config.HTTPClient)
44 | return &Client{
45 | Client: client,
46 | }, nil
47 | }
48 |
49 | // Request implements stream.Client.
50 | func (c *Client) Request(ctx context.Context, request proto.Request) stream.Stream {
51 | b := true
52 | s := &Stream{
53 | toolCall: request.ToolCaller,
54 | }
55 | body := api.ChatRequest{
56 | Model: request.Model,
57 | Messages: fromProtoMessages(request.Messages),
58 | Stream: &b,
59 | Tools: fromMCPTools(request.Tools),
60 | Options: map[string]any{},
61 | }
62 |
63 | if len(request.Stop) > 0 {
64 | body.Options["stop"] = request.Stop[0]
65 | }
66 | if request.MaxTokens != nil {
67 | body.Options["num_ctx"] = *request.MaxTokens
68 | }
69 | if request.Temperature != nil {
70 | body.Options["temperature"] = *request.Temperature
71 | }
72 | if request.TopP != nil {
73 | body.Options["top_p"] = *request.TopP
74 | }
75 | s.request = body
76 | s.messages = request.Messages
77 | s.factory = func() {
78 | s.done = false
79 | s.err = nil
80 | s.respCh = make(chan api.ChatResponse)
81 | go func() {
82 | if err := c.Chat(ctx, &s.request, s.fn); err != nil {
83 | s.err = err
84 | }
85 | }()
86 | }
87 | s.factory()
88 | return s
89 | }
90 |
91 | // Stream ollama stream.
92 | type Stream struct {
93 | request api.ChatRequest
94 | err error
95 | done bool
96 | factory func()
97 | respCh chan api.ChatResponse
98 | message api.Message
99 | toolCall func(name string, data []byte) (string, error)
100 | messages []proto.Message
101 | }
102 |
103 | func (s *Stream) fn(resp api.ChatResponse) error {
104 | s.respCh <- resp
105 | return nil
106 | }
107 |
108 | // CallTools implements stream.Stream.
109 | func (s *Stream) CallTools() []proto.ToolCallStatus {
110 | statuses := make([]proto.ToolCallStatus, 0, len(s.message.ToolCalls))
111 | for _, call := range s.message.ToolCalls {
112 | msg, status := stream.CallTool(
113 | strconv.Itoa(call.Function.Index),
114 | call.Function.Name,
115 | []byte(call.Function.Arguments.String()),
116 | s.toolCall,
117 | )
118 | s.request.Messages = append(s.request.Messages, fromProtoMessage(msg))
119 | s.messages = append(s.messages, msg)
120 | statuses = append(statuses, status)
121 | }
122 | return statuses
123 | }
124 |
125 | // Close implements stream.Stream.
126 | func (s *Stream) Close() error {
127 | close(s.respCh)
128 | s.done = true
129 | return nil
130 | }
131 |
132 | // Current implements stream.Stream.
133 | func (s *Stream) Current() (proto.Chunk, error) {
134 | select {
135 | case resp := <-s.respCh:
136 | chunk := proto.Chunk{
137 | Content: resp.Message.Content,
138 | }
139 | s.message.Content += resp.Message.Content
140 | s.message.ToolCalls = append(s.message.ToolCalls, resp.Message.ToolCalls...)
141 | if resp.Done {
142 | s.done = true
143 | }
144 | return chunk, nil
145 | default:
146 | return proto.Chunk{}, stream.ErrNoContent
147 | }
148 | }
149 |
150 | // Err implements stream.Stream.
151 | func (s *Stream) Err() error { return s.err }
152 |
153 | // Messages implements stream.Stream.
154 | func (s *Stream) Messages() []proto.Message { return s.messages }
155 |
156 | // Next implements stream.Stream.
157 | func (s *Stream) Next() bool {
158 | if s.err != nil {
159 | return false
160 | }
161 | if s.done {
162 | s.done = false
163 | s.factory()
164 | s.messages = append(s.messages, toProtoMessage(s.message))
165 | s.request.Messages = append(s.request.Messages, s.message)
166 | s.message = api.Message{}
167 | }
168 | return true
169 | }
170 |
--------------------------------------------------------------------------------
/internal/openai/format.go:
--------------------------------------------------------------------------------
1 | package openai
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/charmbracelet/mods/internal/proto"
7 | "github.com/mark3labs/mcp-go/mcp"
8 | "github.com/openai/openai-go"
9 | "github.com/openai/openai-go/shared/constant"
10 | )
11 |
12 | func fromMCPTools(mcps map[string][]mcp.Tool) []openai.ChatCompletionToolParam {
13 | var tools []openai.ChatCompletionToolParam
14 | for name, serverTools := range mcps {
15 | for _, tool := range serverTools {
16 | params := map[string]any{
17 | "type": "object",
18 | "properties": tool.InputSchema.Properties,
19 | }
20 | if len(tool.InputSchema.Required) > 0 {
21 | params["required"] = tool.InputSchema.Required
22 | }
23 |
24 | tools = append(tools, openai.ChatCompletionToolParam{
25 | Type: constant.Function("function"),
26 | Function: openai.FunctionDefinitionParam{
27 | Name: fmt.Sprintf("%s_%s", name, tool.Name),
28 | Description: openai.String(tool.Description),
29 | Parameters: params,
30 | },
31 | })
32 | }
33 | }
34 | return tools
35 | }
36 |
37 | func fromProtoMessages(input []proto.Message) []openai.ChatCompletionMessageParamUnion {
38 | var messages []openai.ChatCompletionMessageParamUnion
39 | for _, msg := range input {
40 | switch msg.Role {
41 | case proto.RoleSystem:
42 | messages = append(messages, openai.SystemMessage(msg.Content))
43 | case proto.RoleTool:
44 | for _, call := range msg.ToolCalls {
45 | messages = append(messages, openai.ToolMessage(msg.Content, call.ID))
46 | break
47 | }
48 | case proto.RoleUser:
49 | messages = append(messages, openai.UserMessage(msg.Content))
50 | case proto.RoleAssistant:
51 | m := openai.AssistantMessage(msg.Content)
52 | for _, tool := range msg.ToolCalls {
53 | m.OfAssistant.ToolCalls = append(m.OfAssistant.ToolCalls, openai.ChatCompletionMessageToolCallParam{
54 | ID: tool.ID,
55 | Function: openai.ChatCompletionMessageToolCallFunctionParam{
56 | Arguments: string(tool.Function.Arguments),
57 | Name: tool.Function.Name,
58 | },
59 | })
60 | }
61 | messages = append(messages, m)
62 | }
63 | }
64 | return messages
65 | }
66 |
67 | func toProtoMessage(in openai.ChatCompletionMessageParamUnion) proto.Message {
68 | msg := proto.Message{
69 | Role: msgRole(in),
70 | }
71 | switch content := in.GetContent().AsAny().(type) {
72 | case *string:
73 | if content == nil || *content == "" {
74 | break
75 | }
76 | msg.Content = *content
77 | case *[]openai.ChatCompletionContentPartTextParam:
78 | if content == nil || len(*content) == 0 {
79 | break
80 | }
81 | for _, c := range *content {
82 | msg.Content += c.Text
83 | }
84 | }
85 | if msg.Role == proto.RoleAssistant {
86 | for _, tool := range in.OfAssistant.ToolCalls {
87 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{
88 | ID: tool.ID,
89 | Function: proto.Function{
90 | Name: tool.Function.Name,
91 | Arguments: []byte(tool.Function.Arguments),
92 | },
93 | })
94 | }
95 | }
96 | return msg
97 | }
98 |
99 | func msgRole(in openai.ChatCompletionMessageParamUnion) string {
100 | if in.OfSystem != nil {
101 | return proto.RoleSystem
102 | }
103 | if in.OfAssistant != nil {
104 | return proto.RoleAssistant
105 | }
106 | if in.OfUser != nil {
107 | return proto.RoleUser
108 | }
109 | if in.OfTool != nil {
110 | return proto.RoleTool
111 | }
112 | return ""
113 | }
114 |
--------------------------------------------------------------------------------
/internal/openai/openai.go:
--------------------------------------------------------------------------------
1 | // Package openai implements [stream.Stream] for OpenAI.
2 | package openai
3 |
4 | import (
5 | "context"
6 | "net/http"
7 | "strings"
8 |
9 | "github.com/charmbracelet/mods/internal/proto"
10 | "github.com/charmbracelet/mods/internal/stream"
11 | "github.com/openai/openai-go"
12 | "github.com/openai/openai-go/azure"
13 | "github.com/openai/openai-go/option"
14 | "github.com/openai/openai-go/packages/ssestream"
15 | "github.com/openai/openai-go/shared"
16 | )
17 |
18 | var _ stream.Client = &Client{}
19 |
20 | // Client is the openai client.
21 | type Client struct {
22 | *openai.Client
23 | }
24 |
25 | // Config represents the configuration for the OpenAI API client.
26 | type Config struct {
27 | AuthToken string
28 | BaseURL string
29 | HTTPClient interface {
30 | Do(*http.Request) (*http.Response, error)
31 | }
32 | APIType string
33 | }
34 |
35 | // DefaultConfig returns the default configuration for the OpenAI API client.
36 | func DefaultConfig(authToken string) Config {
37 | return Config{
38 | AuthToken: authToken,
39 | }
40 | }
41 |
42 | // New creates a new [Client] with the given [Config].
43 | func New(config Config) *Client {
44 | opts := []option.RequestOption{}
45 |
46 | if config.HTTPClient != nil {
47 | opts = append(opts, option.WithHTTPClient(config.HTTPClient))
48 | }
49 |
50 | if config.APIType == "azure-ad" {
51 | opts = append(opts, azure.WithAPIKey(config.AuthToken))
52 | if config.BaseURL != "" {
53 | opts = append(opts, azure.WithEndpoint(config.BaseURL, "v1"))
54 | }
55 | } else {
56 | opts = append(opts, option.WithAPIKey(config.AuthToken))
57 | if config.BaseURL != "" {
58 | opts = append(opts, option.WithBaseURL(config.BaseURL))
59 | }
60 | }
61 | client := openai.NewClient(opts...)
62 | return &Client{
63 | Client: &client,
64 | }
65 | }
66 |
67 | // Request makes a new request and returns a stream.
68 | func (c *Client) Request(ctx context.Context, request proto.Request) stream.Stream {
69 | body := openai.ChatCompletionNewParams{
70 | Model: request.Model,
71 | User: openai.String(request.User),
72 | Messages: fromProtoMessages(request.Messages),
73 | Tools: fromMCPTools(request.Tools),
74 | }
75 |
76 | if request.API != "perplexity" || !strings.Contains(request.Model, "online") {
77 | if request.Temperature != nil {
78 | body.Temperature = openai.Float(*request.Temperature)
79 | }
80 | if request.TopP != nil {
81 | body.TopP = openai.Float(*request.TopP)
82 | }
83 | body.Stop = openai.ChatCompletionNewParamsStopUnion{
84 | OfStringArray: request.Stop,
85 | }
86 | if request.MaxTokens != nil {
87 | body.MaxTokens = openai.Int(*request.MaxTokens)
88 | }
89 | if request.API == "openai" && request.ResponseFormat != nil && *request.ResponseFormat == "json" {
90 | body.ResponseFormat = openai.ChatCompletionNewParamsResponseFormatUnion{
91 | OfJSONObject: &shared.ResponseFormatJSONObjectParam{},
92 | }
93 | }
94 | }
95 |
96 | s := &Stream{
97 | stream: c.Chat.Completions.NewStreaming(ctx, body),
98 | request: body,
99 | toolCall: request.ToolCaller,
100 | messages: request.Messages,
101 | }
102 | s.factory = func() *ssestream.Stream[openai.ChatCompletionChunk] {
103 | return c.Chat.Completions.NewStreaming(ctx, s.request)
104 | }
105 | return s
106 | }
107 |
108 | // Stream openai stream.
109 | type Stream struct {
110 | done bool
111 | request openai.ChatCompletionNewParams
112 | stream *ssestream.Stream[openai.ChatCompletionChunk]
113 | factory func() *ssestream.Stream[openai.ChatCompletionChunk]
114 | message openai.ChatCompletionAccumulator
115 | messages []proto.Message
116 | toolCall func(name string, data []byte) (string, error)
117 | }
118 |
119 | // CallTools implements stream.Stream.
120 | func (s *Stream) CallTools() []proto.ToolCallStatus {
121 | calls := s.message.Choices[0].Message.ToolCalls
122 | statuses := make([]proto.ToolCallStatus, 0, len(calls))
123 | for _, call := range calls {
124 | msg, status := stream.CallTool(
125 | call.ID,
126 | call.Function.Name,
127 | []byte(call.Function.Arguments),
128 | s.toolCall,
129 | )
130 | resp := openai.ToolMessage(
131 | msg.Content,
132 | call.ID,
133 | )
134 | s.request.Messages = append(s.request.Messages, resp)
135 | s.messages = append(s.messages, msg)
136 | statuses = append(statuses, status)
137 | }
138 | return statuses
139 | }
140 |
141 | // Close implements stream.Stream.
142 | func (s *Stream) Close() error { return s.stream.Close() } //nolint:wrapcheck
143 |
144 | // Current implements stream.Stream.
145 | func (s *Stream) Current() (proto.Chunk, error) {
146 | event := s.stream.Current()
147 | s.message.AddChunk(event)
148 | if len(event.Choices) > 0 {
149 | return proto.Chunk{
150 | Content: event.Choices[0].Delta.Content,
151 | }, nil
152 | }
153 | return proto.Chunk{}, stream.ErrNoContent
154 | }
155 |
156 | // Err implements stream.Stream.
157 | func (s *Stream) Err() error { return s.stream.Err() } //nolint:wrapcheck
158 |
159 | // Messages implements stream.Stream.
160 | func (s *Stream) Messages() []proto.Message { return s.messages }
161 |
162 | // Next implements stream.Stream.
163 | func (s *Stream) Next() bool {
164 | if s.done {
165 | s.done = false
166 | s.stream = s.factory()
167 | s.message = openai.ChatCompletionAccumulator{}
168 | }
169 |
170 | if s.stream.Next() {
171 | return true
172 | }
173 |
174 | s.done = true
175 | if len(s.message.Choices) > 0 {
176 | msg := s.message.Choices[0].Message.ToParam()
177 | s.request.Messages = append(s.request.Messages, msg)
178 | s.messages = append(s.messages, toProtoMessage(msg))
179 | }
180 |
181 | return false
182 | }
183 |
--------------------------------------------------------------------------------
/internal/proto/proto.go:
--------------------------------------------------------------------------------
1 | // Package proto shared protocol.
2 | package proto
3 |
4 | import (
5 | "errors"
6 | "fmt"
7 | "strings"
8 |
9 | "github.com/mark3labs/mcp-go/mcp"
10 | )
11 |
12 | // Roles.
13 | const (
14 | RoleSystem = "system"
15 | RoleUser = "user"
16 | RoleAssistant = "assistant"
17 | RoleTool = "tool"
18 | )
19 |
20 | // Chunk is a streaming chunk of text.
21 | type Chunk struct {
22 | Content string
23 | }
24 |
25 | // ToolCallStatus is the status of a tool call.
26 | type ToolCallStatus struct {
27 | Name string
28 | Err error
29 | }
30 |
31 | func (c ToolCallStatus) String() string {
32 | var sb strings.Builder
33 | sb.WriteString(fmt.Sprintf("\n> Ran tool: `%s`\n", c.Name))
34 | if c.Err != nil {
35 | sb.WriteString(">\n> *Failed*:\n> ```\n")
36 | for line := range strings.SplitSeq(c.Err.Error(), "\n") {
37 | sb.WriteString("> " + line)
38 | }
39 | sb.WriteString("\n> ```\n")
40 | }
41 | sb.WriteByte('\n')
42 | return sb.String()
43 | }
44 |
45 | // Message is a message in the conversation.
46 | type Message struct {
47 | Role string
48 | Content string
49 | ToolCalls []ToolCall
50 | }
51 |
52 | // ToolCall is a tool call in a message.
53 | type ToolCall struct {
54 | ID string
55 | Function Function
56 | IsError bool
57 | }
58 |
59 | // Function is the function signature of a tool call.
60 | type Function struct {
61 | Name string
62 | Arguments []byte
63 | }
64 |
65 | // Request is a chat request.
66 | type Request struct {
67 | Messages []Message
68 | API string
69 | Model string
70 | User string
71 | Tools map[string][]mcp.Tool
72 | Temperature *float64
73 | TopP *float64
74 | TopK *int64
75 | Stop []string
76 | MaxTokens *int64
77 | ResponseFormat *string
78 | ToolCaller func(name string, data []byte) (string, error)
79 | }
80 |
81 | // Conversation is a conversation.
82 | type Conversation []Message
83 |
84 | func (cc Conversation) String() string {
85 | var sb strings.Builder
86 | for _, msg := range cc {
87 | if msg.Content == "" {
88 | continue
89 | }
90 | switch msg.Role {
91 | case RoleSystem:
92 | sb.WriteString("**System**: ")
93 | case RoleUser:
94 | sb.WriteString("**User**: ")
95 | case RoleTool:
96 | for _, tool := range msg.ToolCalls {
97 | s := ToolCallStatus{
98 | Name: tool.Function.Name,
99 | }
100 | if tool.IsError {
101 | s.Err = errors.New(msg.Content)
102 | }
103 | sb.WriteString(s.String())
104 | }
105 | continue
106 | case RoleAssistant:
107 | sb.WriteString("**Assistant**: ")
108 | }
109 | sb.WriteString(msg.Content)
110 | sb.WriteString("\n\n")
111 | }
112 | return sb.String()
113 | }
114 |
--------------------------------------------------------------------------------
/internal/proto/proto_test.go:
--------------------------------------------------------------------------------
1 | package proto
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/charmbracelet/x/exp/golden"
7 | )
8 |
9 | func TestStringer(t *testing.T) {
10 | messages := []Message{
11 | {
12 | Role: RoleSystem,
13 | Content: "you are a medieval king",
14 | },
15 | {
16 | Role: RoleUser,
17 | Content: "first 4 natural numbers",
18 | },
19 | {
20 | Role: RoleAssistant,
21 | Content: "1, 2, 3, 4",
22 | },
23 | {
24 | Role: RoleTool,
25 | Content: `{"the":"result"}`,
26 | ToolCalls: []ToolCall{
27 | {
28 | ID: "aaa",
29 | Function: Function{
30 | Name: "myfunc",
31 | Arguments: []byte(`{"a":"b"}`),
32 | },
33 | },
34 | },
35 | },
36 | {
37 | Role: RoleUser,
38 | Content: "as a json array",
39 | },
40 | {
41 | Role: RoleAssistant,
42 | Content: "[ 1, 2, 3, 4 ]",
43 | },
44 | {
45 | Role: RoleAssistant,
46 | Content: "something from an assistant",
47 | },
48 | }
49 |
50 | golden.RequireEqual(t, []byte(Conversation(messages).String()))
51 | }
52 |
--------------------------------------------------------------------------------
/internal/proto/testdata/TestStringer.golden:
--------------------------------------------------------------------------------
1 | **System**: you are a medieval king
2 |
3 | **User**: first 4 natural numbers
4 |
5 | **Assistant**: 1, 2, 3, 4
6 |
7 |
8 | > Ran tool: `myfunc`
9 |
10 | **User**: as a json array
11 |
12 | **Assistant**: [ 1, 2, 3, 4 ]
13 |
14 | **Assistant**: something from an assistant
15 |
16 |
--------------------------------------------------------------------------------
/internal/stream/stream.go:
--------------------------------------------------------------------------------
1 | // Package stream provides interfaces for streaming conversations.
2 | package stream
3 |
4 | import (
5 | "context"
6 | "errors"
7 |
8 | "github.com/charmbracelet/mods/internal/proto"
9 | )
10 |
11 | // ErrNoContent happens when the client is returning no content.
12 | var ErrNoContent = errors.New("no content")
13 |
14 | // Client is a streaming client.
15 | type Client interface {
16 | Request(context.Context, proto.Request) Stream
17 | }
18 |
19 | // Stream is an ongoing stream.
20 | type Stream interface {
21 | // returns false when no more messages, caller should run [Stream.CallTools()]
22 | // once that happens, and then check for this again
23 | Next() bool
24 |
25 | // the current chunk
26 | // implementation should accumulate chunks into a message, and keep its
27 | // internal conversation state
28 | Current() (proto.Chunk, error)
29 |
30 | // closes the underlying stream
31 | Close() error
32 |
33 | // streaming error
34 | Err() error
35 |
36 | // the whole conversation
37 | Messages() []proto.Message
38 |
39 | // handles any pending tool calls
40 | CallTools() []proto.ToolCallStatus
41 | }
42 |
43 | // CallTool calls a tool using the provided data and caller, and returns the
44 | // resulting [proto.Message] and [proto.ToolCallStatus].
45 | func CallTool(
46 | id, name string,
47 | data []byte,
48 | caller func(name string, data []byte) (string, error),
49 | ) (proto.Message, proto.ToolCallStatus) {
50 | content, err := caller(name, data)
51 | if content == "" && err != nil {
52 | content = err.Error()
53 | }
54 | return proto.Message{
55 | Role: proto.RoleTool,
56 | Content: content,
57 | ToolCalls: []proto.ToolCall{
58 | {
59 | ID: id,
60 | IsError: err != nil,
61 | Function: proto.Function{
62 | Name: name,
63 | Arguments: data,
64 | },
65 | },
66 | },
67 | },
68 | proto.ToolCallStatus{
69 | Name: name,
70 | Err: err,
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/load.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "net/http"
6 | "os"
7 | "strings"
8 | )
9 |
10 | func loadMsg(msg string) (string, error) {
11 | if strings.HasPrefix(msg, "https://") || strings.HasPrefix(msg, "http://") {
12 | resp, err := http.Get(msg) //nolint:gosec,noctx
13 | if err != nil {
14 | return "", err //nolint:wrapcheck
15 | }
16 | defer func() { _ = resp.Body.Close() }()
17 | bts, err := io.ReadAll(resp.Body)
18 | if err != nil {
19 | return "", err //nolint:wrapcheck
20 | }
21 | return string(bts), nil
22 | }
23 |
24 | if strings.HasPrefix(msg, "file://") {
25 | bts, err := os.ReadFile(strings.TrimPrefix(msg, "file://"))
26 | if err != nil {
27 | return "", err //nolint:wrapcheck
28 | }
29 | return string(bts), nil
30 | }
31 |
32 | return msg, nil
33 | }
34 |
--------------------------------------------------------------------------------
/load_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestLoad(t *testing.T) {
12 | const content = "just text"
13 | t.Run("normal msg", func(t *testing.T) {
14 | msg, err := loadMsg(content)
15 | require.NoError(t, err)
16 | require.Equal(t, content, msg)
17 | })
18 |
19 | t.Run("file", func(t *testing.T) {
20 | path := filepath.Join(t.TempDir(), "foo.txt")
21 | require.NoError(t, os.WriteFile(path, []byte(content), 0o644))
22 |
23 | msg, err := loadMsg("file://" + path)
24 | require.NoError(t, err)
25 | require.Equal(t, content, msg)
26 | })
27 |
28 | t.Run("http url", func(t *testing.T) {
29 | msg, err := loadMsg("http://raw.githubusercontent.com/charmbracelet/mods/main/LICENSE")
30 | require.NoError(t, err)
31 | require.Contains(t, msg, "MIT License")
32 | })
33 |
34 | t.Run("https url", func(t *testing.T) {
35 | msg, err := loadMsg("https://raw.githubusercontent.com/charmbracelet/mods/main/LICENSE")
36 | require.NoError(t, err)
37 | require.Contains(t, msg, "MIT License")
38 | })
39 | }
40 |
--------------------------------------------------------------------------------
/main_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "strings"
5 | "testing"
6 | )
7 |
8 | func TestIsCompletionCmd(t *testing.T) {
9 | for args, is := range map[string]bool{
10 | "": false,
11 | "something": false,
12 | "something something": false,
13 | "completion for my bash script how to": false,
14 | "completion bash how to": false,
15 | "completion": false,
16 | "completion -h": true,
17 | "completion --help": true,
18 | "completion help": true,
19 | "completion bash": true,
20 | "completion fish": true,
21 | "completion zsh": true,
22 | "completion powershell": true,
23 | "completion bash -h": true,
24 | "completion fish -h": true,
25 | "completion zsh -h": true,
26 | "completion powershell -h": true,
27 | "completion bash --help": true,
28 | "completion fish --help": true,
29 | "completion zsh --help": true,
30 | "completion powershell --help": true,
31 | "__complete": true,
32 | "__complete blah blah blah": true,
33 | } {
34 | t.Run(args, func(t *testing.T) {
35 | vargs := append([]string{"mods"}, strings.Fields(args)...)
36 | if b := isCompletionCmd(vargs); b != is {
37 | t.Errorf("%v: expected %v, got %v", vargs, is, b)
38 | }
39 | })
40 | }
41 | }
42 |
43 | func TestIsManCmd(t *testing.T) {
44 | for args, is := range map[string]bool{
45 | "": false,
46 | "something": false,
47 | "something something": false,
48 | "man is no more": false,
49 | "mans": false,
50 | "man foo": false,
51 | "man": true,
52 | "man -h": true,
53 | "man --help": true,
54 | } {
55 | t.Run(args, func(t *testing.T) {
56 | vargs := append([]string{"mods"}, strings.Fields(args)...)
57 | if b := isManCmd(vargs); b != is {
58 | t.Errorf("%v: expected %v, got %v", vargs, is, b)
59 | }
60 | })
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/mcp.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "os"
9 | "slices"
10 | "strings"
11 |
12 | "github.com/mark3labs/mcp-go/client"
13 | "github.com/mark3labs/mcp-go/mcp"
14 | )
15 |
16 | func enabledMCPs() map[string]MCPServerConfig {
17 | result := map[string]MCPServerConfig{}
18 | for k, v := range config.MCPServers {
19 | if !isMCPEnabled(k) {
20 | continue
21 | }
22 | result[k] = v
23 | }
24 | return result
25 | }
26 |
27 | func isMCPEnabled(name string) bool {
28 | return !slices.Contains(config.MCPDisable, "*") &&
29 | !slices.Contains(config.MCPDisable, name)
30 | }
31 |
32 | func mcpList() {
33 | for name := range config.MCPServers {
34 | s := name
35 | if isMCPEnabled(name) {
36 | s += stdoutStyles().Timeago.Render(" (enabled)")
37 | }
38 | fmt.Println(s)
39 | }
40 | }
41 |
42 | func mcpListTools(ctx context.Context) error {
43 | for sname, server := range enabledMCPs() {
44 | tools, err := mcpToolsFor(ctx, sname, server)
45 | if err != nil {
46 | return err
47 | }
48 | for _, tool := range tools {
49 | fmt.Print(stdoutStyles().Timeago.Render(sname + " > "))
50 | fmt.Println(tool.Name)
51 | }
52 | }
53 | return nil
54 | }
55 |
56 | func mcpTools(ctx context.Context) (map[string][]mcp.Tool, error) {
57 | result := map[string][]mcp.Tool{}
58 | for sname, server := range enabledMCPs() {
59 | serverTools, err := mcpToolsFor(ctx, sname, server)
60 | if err != nil {
61 | return nil, err
62 | }
63 | result[sname] = append(result[sname], serverTools...)
64 | }
65 | return result, nil
66 | }
67 |
68 | func mcpToolsFor(ctx context.Context, name string, server MCPServerConfig) ([]mcp.Tool, error) {
69 | cli, err := client.NewStdioMCPClient(
70 | server.Command,
71 | append(os.Environ(), server.Env...),
72 | server.Args...,
73 | )
74 | if err != nil {
75 | return nil, fmt.Errorf("could not setup %s: %w", name, err)
76 | }
77 | defer cli.Close() //nolint:errcheck
78 | if _, err := cli.Initialize(ctx, mcp.InitializeRequest{}); err != nil {
79 | return nil, fmt.Errorf("could not setup %s: %w", name, err)
80 | }
81 | tools, err := cli.ListTools(ctx, mcp.ListToolsRequest{})
82 | if err != nil {
83 | return nil, fmt.Errorf("could not setup %s: %w", name, err)
84 | }
85 | return tools.Tools, nil
86 | }
87 |
88 | func toolCall(ctx context.Context, name string, data []byte) (string, error) {
89 | sname, tool, ok := strings.Cut(name, "_")
90 | if !ok {
91 | return "", fmt.Errorf("mcp: invalid tool name: %q", name)
92 | }
93 | server, ok := enabledMCPs()[sname]
94 | if !ok {
95 | return "", fmt.Errorf("mcp: invalid server name: %q", sname)
96 | }
97 | client, err := client.NewStdioMCPClient(
98 | server.Command,
99 | append(os.Environ(), server.Env...),
100 | server.Args...,
101 | )
102 | if err != nil {
103 | return "", fmt.Errorf("mcp: %w", err)
104 | }
105 | defer client.Close() //nolint:errcheck
106 |
107 | // Initialize the client
108 | if _, err = client.Initialize(ctx, mcp.InitializeRequest{}); err != nil {
109 | return "", fmt.Errorf("mcp: %w", err)
110 | }
111 |
112 | var args map[string]any
113 | if len(data) > 0 {
114 | if err := json.Unmarshal(data, &args); err != nil {
115 | return "", fmt.Errorf("mcp: %w: %s", err, string(data))
116 | }
117 | }
118 |
119 | request := mcp.CallToolRequest{}
120 | request.Params.Name = tool
121 | request.Params.Arguments = args
122 | result, err := client.CallTool(context.Background(), request)
123 | if err != nil {
124 | return "", fmt.Errorf("mcp: %w", err)
125 | }
126 |
127 | var sb strings.Builder
128 | for _, content := range result.Content {
129 | switch content := content.(type) {
130 | case mcp.TextContent:
131 | sb.WriteString(content.Text)
132 | default:
133 | sb.WriteString("[Non-text content]")
134 | }
135 | }
136 |
137 | if result.IsError {
138 | return "", errors.New(sb.String())
139 | }
140 | return sb.String(), nil
141 | }
142 |
--------------------------------------------------------------------------------
/messages.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "strings"
5 |
6 | "github.com/charmbracelet/mods/internal/proto"
7 | )
8 |
9 | func lastPrompt(messages []proto.Message) string {
10 | var result string
11 | for _, msg := range messages {
12 | if msg.Role != proto.RoleUser {
13 | continue
14 | }
15 | if msg.Content == "" {
16 | continue
17 | }
18 | result = msg.Content
19 | }
20 | return result
21 | }
22 |
23 | func firstLine(s string) string {
24 | first, _, _ := strings.Cut(s, "\n")
25 | return first
26 | }
27 |
--------------------------------------------------------------------------------
/messages_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/charmbracelet/mods/internal/proto"
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestLastPrompt(t *testing.T) {
11 | t.Run("no prompt", func(t *testing.T) {
12 | require.Equal(t, "", lastPrompt(nil))
13 | })
14 |
15 | t.Run("single prompt", func(t *testing.T) {
16 | require.Equal(t, "single", lastPrompt([]proto.Message{
17 | {
18 | Role: proto.RoleUser,
19 | Content: "single",
20 | },
21 | }))
22 | })
23 |
24 | t.Run("multiple prompts", func(t *testing.T) {
25 | require.Equal(t, "last", lastPrompt([]proto.Message{
26 | {
27 | Role: proto.RoleUser,
28 | Content: "first",
29 | },
30 | {
31 | Role: proto.RoleAssistant,
32 | Content: "hallo",
33 | },
34 | {
35 | Role: proto.RoleUser,
36 | Content: "middle 1",
37 | },
38 | {
39 | Role: proto.RoleUser,
40 | Content: "middle 2",
41 | },
42 | {
43 | Role: proto.RoleUser,
44 | Content: "last",
45 | },
46 | }))
47 | })
48 | }
49 |
50 | func TestFirstLine(t *testing.T) {
51 | t.Run("single line", func(t *testing.T) {
52 | require.Equal(t, "line", firstLine("line"))
53 | })
54 | t.Run("single line ending with \n", func(t *testing.T) {
55 | require.Equal(t, "line", firstLine("line\n"))
56 | })
57 | t.Run("multiple lines", func(t *testing.T) {
58 | require.Equal(t, "line", firstLine("line\nsomething else\nline3\nfoo\nends with a double \n\n"))
59 | })
60 | }
61 |
--------------------------------------------------------------------------------
/mods.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "maps"
10 | "math"
11 | "net/http"
12 | "net/url"
13 | "os"
14 | "os/exec"
15 | "regexp"
16 | "slices"
17 | "strconv"
18 | "strings"
19 | "sync"
20 | "time"
21 | "unicode"
22 |
23 | "github.com/caarlos0/go-shellwords"
24 | "github.com/charmbracelet/bubbles/viewport"
25 | tea "github.com/charmbracelet/bubbletea"
26 | "github.com/charmbracelet/glamour"
27 | "github.com/charmbracelet/lipgloss"
28 | "github.com/charmbracelet/mods/internal/anthropic"
29 | "github.com/charmbracelet/mods/internal/cache"
30 | "github.com/charmbracelet/mods/internal/cohere"
31 | "github.com/charmbracelet/mods/internal/copilot"
32 | "github.com/charmbracelet/mods/internal/google"
33 | "github.com/charmbracelet/mods/internal/ollama"
34 | "github.com/charmbracelet/mods/internal/openai"
35 | "github.com/charmbracelet/mods/internal/proto"
36 | "github.com/charmbracelet/mods/internal/stream"
37 | "github.com/charmbracelet/x/exp/ordered"
38 | )
39 |
40 | type state int
41 |
42 | const (
43 | startState state = iota
44 | configLoadedState
45 | requestState
46 | responseState
47 | doneState
48 | errorState
49 | )
50 |
51 | // Mods is the Bubble Tea model that manages reading stdin and querying the
52 | // OpenAI API.
53 | type Mods struct {
54 | Output string
55 | Input string
56 | Styles styles
57 | Error *modsError
58 | state state
59 | retries int
60 | renderer *lipgloss.Renderer
61 | glam *glamour.TermRenderer
62 | glamViewport viewport.Model
63 | glamOutput string
64 | glamHeight int
65 | messages []proto.Message
66 | cancelRequest context.CancelFunc
67 | anim tea.Model
68 | width int
69 | height int
70 |
71 | db *convoDB
72 | cache *cache.Conversations
73 | Config *Config
74 |
75 | content []string
76 | contentMutex *sync.Mutex
77 | }
78 |
79 | func newMods(r *lipgloss.Renderer, cfg *Config, db *convoDB, cache *cache.Conversations) *Mods {
80 | gr, _ := glamour.NewTermRenderer(
81 | glamour.WithEnvironmentConfig(),
82 | glamour.WithWordWrap(cfg.WordWrap),
83 | )
84 | vp := viewport.New(0, 0)
85 | vp.GotoBottom()
86 | return &Mods{
87 | Styles: makeStyles(r),
88 | glam: gr,
89 | state: startState,
90 | renderer: r,
91 | glamViewport: vp,
92 | contentMutex: &sync.Mutex{},
93 | db: db,
94 | cache: cache,
95 | Config: cfg,
96 | }
97 | }
98 |
99 | // completionInput is a tea.Msg that wraps the content read from stdin.
100 | type completionInput struct {
101 | content string
102 | }
103 |
104 | // completionOutput a tea.Msg that wraps the content returned from openai.
105 | type completionOutput struct {
106 | content string
107 | stream stream.Stream
108 | errh func(error) tea.Msg
109 | }
110 |
111 | // Init implements tea.Model.
112 | func (m *Mods) Init() tea.Cmd {
113 | return m.findCacheOpsDetails()
114 | }
115 |
116 | // Update implements tea.Model.
117 | func (m *Mods) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
118 | var cmds []tea.Cmd
119 | switch msg := msg.(type) {
120 | case cacheDetailsMsg:
121 | m.Config.cacheWriteToID = msg.WriteID
122 | m.Config.cacheWriteToTitle = msg.Title
123 | m.Config.cacheReadFromID = msg.ReadID
124 | m.Config.API = msg.API
125 | m.Config.Model = msg.Model
126 |
127 | if !m.Config.Quiet {
128 | m.anim = newAnim(m.Config.Fanciness, m.Config.StatusText, m.renderer, m.Styles)
129 | cmds = append(cmds, m.anim.Init())
130 | }
131 | m.state = configLoadedState
132 | cmds = append(cmds, m.readStdinCmd)
133 |
134 | case completionInput:
135 | if msg.content != "" {
136 | m.Input = removeWhitespace(msg.content)
137 | }
138 | if m.Input == "" && m.Config.Prefix == "" && m.Config.Show == "" && !m.Config.ShowLast {
139 | return m, m.quit
140 | }
141 | if m.Config.Dirs ||
142 | len(m.Config.Delete) > 0 ||
143 | m.Config.DeleteOlderThan != 0 ||
144 | m.Config.ShowHelp ||
145 | m.Config.List ||
146 | m.Config.ListRoles ||
147 | m.Config.Settings ||
148 | m.Config.ResetSettings {
149 | return m, m.quit
150 | }
151 |
152 | if m.Config.IncludePromptArgs {
153 | m.appendToOutput(m.Config.Prefix + "\n\n")
154 | }
155 |
156 | if m.Config.IncludePrompt > 0 {
157 | parts := strings.Split(m.Input, "\n")
158 | if len(parts) > m.Config.IncludePrompt {
159 | parts = parts[0:m.Config.IncludePrompt]
160 | }
161 | m.appendToOutput(strings.Join(parts, "\n") + "\n")
162 | }
163 | m.state = requestState
164 | cmds = append(cmds, m.startCompletionCmd(msg.content))
165 | case completionOutput:
166 | if msg.stream == nil {
167 | m.state = doneState
168 | return m, m.quit
169 | }
170 | if msg.content != "" {
171 | m.appendToOutput(msg.content)
172 | m.state = responseState
173 | }
174 | cmds = append(cmds, m.receiveCompletionStreamCmd(completionOutput{
175 | stream: msg.stream,
176 | errh: msg.errh,
177 | }))
178 | case modsError:
179 | m.Error = &msg
180 | m.state = errorState
181 | return m, m.quit
182 | case tea.WindowSizeMsg:
183 | m.width, m.height = msg.Width, msg.Height
184 | m.glamViewport.Width = m.width
185 | m.glamViewport.Height = m.height
186 | return m, nil
187 | case tea.KeyMsg:
188 | switch msg.String() {
189 | case "q", "ctrl+c":
190 | m.state = doneState
191 | return m, m.quit
192 | }
193 | }
194 | if !m.Config.Quiet && (m.state == configLoadedState || m.state == requestState) {
195 | var cmd tea.Cmd
196 | m.anim, cmd = m.anim.Update(msg)
197 | cmds = append(cmds, cmd)
198 | }
199 | if m.viewportNeeded() {
200 | // Only respond to keypresses when the viewport (i.e. the content) is
201 | // taller than the window.
202 | var cmd tea.Cmd
203 | m.glamViewport, cmd = m.glamViewport.Update(msg)
204 | cmds = append(cmds, cmd)
205 | }
206 | return m, tea.Batch(cmds...)
207 | }
208 |
209 | func (m Mods) viewportNeeded() bool {
210 | return m.glamHeight > m.height
211 | }
212 |
213 | // View implements tea.Model.
214 | func (m *Mods) View() string {
215 | //nolint:exhaustive
216 | switch m.state {
217 | case errorState:
218 | return ""
219 | case requestState:
220 | if !m.Config.Quiet {
221 | return m.anim.View()
222 | }
223 | case responseState:
224 | if !m.Config.Raw && isOutputTTY() {
225 | if m.viewportNeeded() {
226 | return m.glamViewport.View()
227 | }
228 | // We don't need the viewport yet.
229 | return m.glamOutput
230 | }
231 |
232 | if isOutputTTY() && !m.Config.Raw {
233 | return m.Output
234 | }
235 |
236 | m.contentMutex.Lock()
237 | for _, c := range m.content {
238 | fmt.Print(c)
239 | }
240 | m.content = []string{}
241 | m.contentMutex.Unlock()
242 | case doneState:
243 | if !isOutputTTY() {
244 | fmt.Printf("\n")
245 | }
246 | return ""
247 | }
248 | return ""
249 | }
250 |
251 | func (m *Mods) quit() tea.Msg {
252 | if m.cancelRequest != nil {
253 | m.cancelRequest()
254 | }
255 | return tea.Quit()
256 | }
257 |
258 | func (m *Mods) retry(content string, err modsError) tea.Msg {
259 | m.retries++
260 | if m.retries >= m.Config.MaxRetries {
261 | return err
262 | }
263 | wait := time.Millisecond * 100 * time.Duration(math.Pow(2, float64(m.retries))) //nolint:mnd
264 | time.Sleep(wait)
265 | return completionInput{content}
266 | }
267 |
268 | func (m *Mods) startCompletionCmd(content string) tea.Cmd {
269 | if m.Config.Show != "" || m.Config.ShowLast {
270 | return m.readFromCache()
271 | }
272 |
273 | return func() tea.Msg {
274 | var mod Model
275 | var api API
276 | var ccfg openai.Config
277 | var accfg anthropic.Config
278 | var cccfg cohere.Config
279 | var occfg ollama.Config
280 | var gccfg google.Config
281 |
282 | cfg := m.Config
283 | api, mod, err := m.resolveModel(cfg)
284 | cfg.API = mod.API
285 | if err != nil {
286 | return err
287 | }
288 | if api.Name == "" {
289 | eps := make([]string, 0)
290 | for _, a := range cfg.APIs {
291 | eps = append(eps, m.Styles.InlineCode.Render(a.Name))
292 | }
293 | return modsError{
294 | err: newUserErrorf(
295 | "Your configured API endpoints are: %s",
296 | eps,
297 | ),
298 | reason: fmt.Sprintf(
299 | "The API endpoint %s is not configured.",
300 | m.Styles.InlineCode.Render(cfg.API),
301 | ),
302 | }
303 | }
304 |
305 | switch mod.API {
306 | case "ollama":
307 | occfg = ollama.DefaultConfig()
308 | if api.BaseURL != "" {
309 | occfg.BaseURL = api.BaseURL
310 | }
311 | case "anthropic":
312 | key, err := m.ensureKey(api, "ANTHROPIC_API_KEY", "https://console.anthropic.com/settings/keys")
313 | if err != nil {
314 | return modsError{err, "Anthropic authentication failed"}
315 | }
316 | accfg = anthropic.DefaultConfig(key)
317 | if api.BaseURL != "" {
318 | accfg.BaseURL = api.BaseURL
319 | }
320 | case "google":
321 | key, err := m.ensureKey(api, "GOOGLE_API_KEY", "https://aistudio.google.com/app/apikey")
322 | if err != nil {
323 | return modsError{err, "Google authentication failed"}
324 | }
325 | gccfg = google.DefaultConfig(mod.Name, key)
326 | case "cohere":
327 | key, err := m.ensureKey(api, "COHERE_API_KEY", "https://dashboard.cohere.com/api-keys")
328 | if err != nil {
329 | return modsError{err, "Cohere authentication failed"}
330 | }
331 | cccfg = cohere.DefaultConfig(key)
332 | if api.BaseURL != "" {
333 | ccfg.BaseURL = api.BaseURL
334 | }
335 | case "azure", "azure-ad": //nolint:goconst
336 | key, err := m.ensureKey(api, "AZURE_OPENAI_KEY", "https://aka.ms/oai/access")
337 | if err != nil {
338 | return modsError{err, "Azure authentication failed"}
339 | }
340 | ccfg = openai.Config{
341 | AuthToken: key,
342 | BaseURL: api.BaseURL,
343 | }
344 | if mod.API == "azure-ad" {
345 | ccfg.APIType = "azure-ad"
346 | }
347 | if api.User != "" {
348 | cfg.User = api.User
349 | }
350 | case "copilot":
351 | cli := copilot.New(config.CachePath)
352 | token, err := cli.Auth()
353 | if err != nil {
354 | return modsError{err, "Copilot authentication failed"}
355 | }
356 |
357 | ccfg = openai.Config{
358 | AuthToken: token.Token,
359 | BaseURL: api.BaseURL,
360 | }
361 | ccfg.HTTPClient = cli
362 | ccfg.BaseURL = ordered.First(api.BaseURL, token.Endpoints.API)
363 |
364 | default:
365 | key, err := m.ensureKey(api, "OPENAI_API_KEY", "https://platform.openai.com/account/api-keys")
366 | if err != nil {
367 | return modsError{err, "OpenAI authentication failed"}
368 | }
369 | ccfg = openai.Config{
370 | AuthToken: key,
371 | BaseURL: api.BaseURL,
372 | }
373 | }
374 |
375 | if cfg.HTTPProxy != "" {
376 | proxyURL, err := url.Parse(cfg.HTTPProxy)
377 | if err != nil {
378 | return modsError{err, "There was an error parsing your proxy URL."}
379 | }
380 | httpClient := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}}
381 | ccfg.HTTPClient = httpClient
382 | accfg.HTTPClient = httpClient
383 | cccfg.HTTPClient = httpClient
384 | occfg.HTTPClient = httpClient
385 | }
386 |
387 | if mod.MaxChars == 0 {
388 | mod.MaxChars = cfg.MaxInputChars
389 | }
390 |
391 | // Check if the model is an o1 model and unset the max_tokens parameter
392 | // accordingly, as it's unsupported by o1.
393 | // We do set max_completion_tokens instead, which is supported.
394 | // Release won't have a prefix with a dash, so just putting o1 for match.
395 | if strings.HasPrefix(mod.Name, "o1") {
396 | cfg.MaxTokens = 0
397 | }
398 |
399 | // 1min should be enough - user might not have mcp downloaded yet...
400 | ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
401 | m.cancelRequest = cancel
402 |
403 | tools, err := mcpTools(ctx)
404 | if err != nil {
405 | return modsError{err, "Could not setup MCP"}
406 | }
407 |
408 | if err := m.setupStreamContext(content, mod); err != nil {
409 | return err
410 | }
411 |
412 | request := proto.Request{
413 | Messages: m.messages,
414 | API: mod.API,
415 | Model: mod.Name,
416 | User: cfg.User,
417 | Temperature: &cfg.Temperature,
418 | TopP: &cfg.TopP,
419 | TopK: &cfg.TopK,
420 | Stop: cfg.Stop,
421 | Tools: tools,
422 | ToolCaller: func(name string, data []byte) (string, error) {
423 | ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
424 | m.cancelRequest = cancel
425 | return toolCall(ctx, name, data)
426 | },
427 | }
428 | if cfg.MaxTokens > 0 {
429 | request.MaxTokens = &cfg.MaxTokens
430 | }
431 |
432 | var client stream.Client
433 | switch mod.API {
434 | case "anthropic":
435 | client = anthropic.New(accfg)
436 | case "google":
437 | client = google.New(gccfg)
438 | case "cohere":
439 | client = cohere.New(cccfg)
440 | case "ollama":
441 | client, err = ollama.New(occfg)
442 | default:
443 | client = openai.New(ccfg)
444 | if cfg.Format && config.FormatAs == "json" {
445 | request.ResponseFormat = &config.FormatAs
446 | }
447 | }
448 | if err != nil {
449 | return modsError{err, "Could not setup client"}
450 | }
451 |
452 | stream := client.Request(ctx, request)
453 | return m.receiveCompletionStreamCmd(completionOutput{
454 | stream: stream,
455 | errh: func(err error) tea.Msg {
456 | return m.handleRequestError(err, mod, m.Input)
457 | },
458 | })()
459 | }
460 | }
461 |
462 | func (m Mods) ensureKey(api API, defaultEnv, docsURL string) (string, error) {
463 | key := api.APIKey
464 | if key == "" && api.APIKeyEnv != "" && api.APIKeyCmd == "" {
465 | key = os.Getenv(api.APIKeyEnv)
466 | }
467 | if key == "" && api.APIKeyCmd != "" {
468 | args, err := shellwords.Parse(api.APIKeyCmd)
469 | if err != nil {
470 | return "", modsError{err, "Failed to parse api-key-cmd"}
471 | }
472 | out, err := exec.Command(args[0], args[1:]...).CombinedOutput() //nolint:gosec
473 | if err != nil {
474 | return "", modsError{err, "Cannot exec api-key-cmd"}
475 | }
476 | key = strings.TrimSpace(string(out))
477 | }
478 | if key == "" {
479 | key = os.Getenv(defaultEnv)
480 | }
481 | if key != "" {
482 | return key, nil
483 | }
484 | return "", modsError{
485 | reason: fmt.Sprintf(
486 | "%[1]s required; set the environment variable %[1]s or update %[2]s through %[3]s.",
487 | m.Styles.InlineCode.Render(defaultEnv),
488 | m.Styles.InlineCode.Render("mods.yaml"),
489 | m.Styles.InlineCode.Render("mods --settings"),
490 | ),
491 | err: newUserErrorf(
492 | "You can grab one at %s",
493 | m.Styles.Link.Render(docsURL),
494 | ),
495 | }
496 | }
497 |
498 | func (m *Mods) receiveCompletionStreamCmd(msg completionOutput) tea.Cmd {
499 | return func() tea.Msg {
500 | if msg.stream.Next() {
501 | chunk, err := msg.stream.Current()
502 | if err != nil && !errors.Is(err, stream.ErrNoContent) {
503 | _ = msg.stream.Close()
504 | return msg.errh(err)
505 | }
506 | return completionOutput{
507 | content: chunk.Content,
508 | stream: msg.stream,
509 | errh: msg.errh,
510 | }
511 | }
512 |
513 | // stream is done, check for errors
514 | if err := msg.stream.Err(); err != nil {
515 | return msg.errh(err)
516 | }
517 |
518 | results := msg.stream.CallTools()
519 | toolMsg := completionOutput{
520 | stream: msg.stream,
521 | errh: msg.errh,
522 | }
523 | for _, call := range results {
524 | toolMsg.content += call.String()
525 | }
526 | if len(results) == 0 {
527 | m.messages = msg.stream.Messages()
528 | return completionOutput{
529 | errh: msg.errh,
530 | }
531 | }
532 | return toolMsg
533 | }
534 | }
535 |
536 | type cacheDetailsMsg struct {
537 | WriteID, Title, ReadID, API, Model string
538 | }
539 |
540 | func (m *Mods) findCacheOpsDetails() tea.Cmd {
541 | return func() tea.Msg {
542 | continueLast := m.Config.ContinueLast || (m.Config.Continue != "" && m.Config.Title == "")
543 | readID := ordered.First(m.Config.Continue, m.Config.Show)
544 | writeID := ordered.First(m.Config.Title, m.Config.Continue)
545 | title := writeID
546 | model := config.Model
547 | api := config.API
548 |
549 | if readID != "" || continueLast || m.Config.ShowLast {
550 | found, err := m.findReadID(readID)
551 | if err != nil {
552 | return modsError{
553 | err: err,
554 | reason: "Could not find the conversation.",
555 | }
556 | }
557 | if found != nil {
558 | readID = found.ID
559 | if found.Model != nil && found.API != nil {
560 | model = *found.Model
561 | api = *found.API
562 | }
563 | }
564 | }
565 |
566 | // if we are continuing last, update the existing conversation
567 | if continueLast {
568 | writeID = readID
569 | }
570 |
571 | if writeID == "" {
572 | writeID = newConversationID()
573 | }
574 |
575 | if !sha1reg.MatchString(writeID) {
576 | convo, err := m.db.Find(writeID)
577 | if err != nil {
578 | // its a new conversation with a title
579 | writeID = newConversationID()
580 | } else {
581 | writeID = convo.ID
582 | }
583 | }
584 |
585 | return cacheDetailsMsg{
586 | WriteID: writeID,
587 | Title: title,
588 | ReadID: readID,
589 | API: api,
590 | Model: model,
591 | }
592 | }
593 | }
594 |
595 | func (m *Mods) findReadID(in string) (*Conversation, error) {
596 | convo, err := m.db.Find(in)
597 | if err == nil {
598 | return convo, nil
599 | }
600 | if errors.Is(err, errNoMatches) && m.Config.Show == "" {
601 | convo, err := m.db.FindHEAD()
602 | if err != nil {
603 | return nil, err
604 | }
605 | return convo, nil
606 | }
607 | return nil, err
608 | }
609 |
610 | func (m *Mods) readStdinCmd() tea.Msg {
611 | if !isInputTTY() {
612 | reader := bufio.NewReader(os.Stdin)
613 | stdinBytes, err := io.ReadAll(reader)
614 | if err != nil {
615 | return modsError{err, "Unable to read stdin."}
616 | }
617 |
618 | return completionInput{increaseIndent(string(stdinBytes))}
619 | }
620 | return completionInput{""}
621 | }
622 |
623 | func (m *Mods) readFromCache() tea.Cmd {
624 | return func() tea.Msg {
625 | var messages []proto.Message
626 | if err := m.cache.Read(m.Config.cacheReadFromID, &messages); err != nil {
627 | return modsError{err, "There was an error loading the conversation."}
628 | }
629 |
630 | m.appendToOutput(proto.Conversation(messages).String())
631 | return completionOutput{
632 | errh: func(err error) tea.Msg {
633 | return modsError{err: err}
634 | },
635 | }
636 | }
637 | }
638 |
639 | const tabWidth = 4
640 |
641 | func (m *Mods) appendToOutput(s string) {
642 | m.Output += s
643 | if !isOutputTTY() || m.Config.Raw {
644 | m.contentMutex.Lock()
645 | m.content = append(m.content, s)
646 | m.contentMutex.Unlock()
647 | return
648 | }
649 |
650 | wasAtBottom := m.glamViewport.ScrollPercent() == 1.0
651 | oldHeight := m.glamHeight
652 | m.glamOutput, _ = m.glam.Render(m.Output)
653 | m.glamOutput = strings.TrimRightFunc(m.glamOutput, unicode.IsSpace)
654 | m.glamOutput = strings.ReplaceAll(m.glamOutput, "\t", strings.Repeat(" ", tabWidth))
655 | m.glamHeight = lipgloss.Height(m.glamOutput)
656 | m.glamOutput += "\n"
657 | truncatedGlamOutput := m.renderer.NewStyle().
658 | MaxWidth(m.width).
659 | Render(m.glamOutput)
660 | m.glamViewport.SetContent(truncatedGlamOutput)
661 | if oldHeight < m.glamHeight && wasAtBottom {
662 | // If the viewport's at the bottom and we've received a new
663 | // line of content, follow the output by auto scrolling to
664 | // the bottom.
665 | m.glamViewport.GotoBottom()
666 | }
667 | }
668 |
669 | // if the input is whitespace only, make it empty.
670 | func removeWhitespace(s string) string {
671 | if strings.TrimSpace(s) == "" {
672 | return ""
673 | }
674 | return s
675 | }
676 |
677 | var tokenErrRe = regexp.MustCompile(`This model's maximum context length is (\d+) tokens. However, your messages resulted in (\d+) tokens`)
678 |
679 | func cutPrompt(msg, prompt string) string {
680 | found := tokenErrRe.FindStringSubmatch(msg)
681 | if len(found) != 3 { //nolint:mnd
682 | return prompt
683 | }
684 |
685 | maxt, _ := strconv.Atoi(found[1])
686 | current, _ := strconv.Atoi(found[2])
687 |
688 | if maxt > current {
689 | return prompt
690 | }
691 |
692 | // 1 token =~ 4 chars
693 | // cut 10 extra chars 'just in case'
694 | reduceBy := 10 + (current-maxt)*4 //nolint:mnd
695 | if len(prompt) > reduceBy {
696 | return prompt[:len(prompt)-reduceBy]
697 | }
698 |
699 | return prompt
700 | }
701 |
702 | func increaseIndent(s string) string {
703 | lines := strings.Split(s, "\n")
704 | for i := range lines {
705 | lines[i] = "\t" + lines[i]
706 | }
707 | return strings.Join(lines, "\n")
708 | }
709 |
710 | func (m *Mods) resolveModel(cfg *Config) (API, Model, error) {
711 | for _, api := range cfg.APIs {
712 | if api.Name != cfg.API && cfg.API != "" {
713 | continue
714 | }
715 | for name, mod := range api.Models {
716 | if name == cfg.Model || slices.Contains(mod.Aliases, cfg.Model) {
717 | cfg.Model = name
718 | break
719 | }
720 | }
721 | mod, ok := api.Models[cfg.Model]
722 | if ok {
723 | mod.Name = cfg.Model
724 | mod.API = api.Name
725 | return api, mod, nil
726 | }
727 | if cfg.API != "" {
728 | return API{}, Model{}, modsError{
729 | err: newUserErrorf(
730 | "Available models are: %s",
731 | strings.Join(slices.Collect(maps.Keys(api.Models)), ", "),
732 | ),
733 | reason: fmt.Sprintf(
734 | "The API endpoint %s does not contain the model %s",
735 | m.Styles.InlineCode.Render(cfg.API),
736 | m.Styles.InlineCode.Render(cfg.Model),
737 | ),
738 | }
739 | }
740 | }
741 |
742 | return API{}, Model{}, modsError{
743 | reason: fmt.Sprintf(
744 | "Model %s is not in the settings file.",
745 | m.Styles.InlineCode.Render(cfg.Model),
746 | ),
747 | err: newUserErrorf(
748 | "Please specify an API endpoint with %s or configure the model in the settings: %s",
749 | m.Styles.InlineCode.Render("--api"),
750 | m.Styles.InlineCode.Render("mods --settings"),
751 | ),
752 | }
753 | }
754 |
--------------------------------------------------------------------------------
/mods_errors.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "net/http"
7 |
8 | tea "github.com/charmbracelet/bubbletea"
9 | "github.com/openai/openai-go"
10 | )
11 |
12 | func (m *Mods) handleRequestError(err error, mod Model, content string) tea.Msg {
13 | ae := &openai.Error{}
14 | if errors.As(err, &ae) {
15 | return m.handleAPIError(ae, mod, content)
16 | }
17 | return modsError{err, fmt.Sprintf(
18 | "There was a problem with the %s API request.",
19 | mod.API,
20 | )}
21 | }
22 |
23 | func (m *Mods) handleAPIError(err *openai.Error, mod Model, content string) tea.Msg {
24 | cfg := m.Config
25 | switch err.StatusCode {
26 | case http.StatusNotFound:
27 | if mod.Fallback != "" {
28 | m.Config.Model = mod.Fallback
29 | return m.retry(content, modsError{
30 | err: err,
31 | reason: fmt.Sprintf("%s API server error.", mod.API),
32 | })
33 | }
34 | return modsError{err: err, reason: fmt.Sprintf(
35 | "Missing model '%s' for API '%s'.",
36 | cfg.Model,
37 | cfg.API,
38 | )}
39 | case http.StatusBadRequest:
40 | if err.Code == "context_length_exceeded" {
41 | pe := modsError{err: err, reason: "Maximum prompt size exceeded."}
42 | if cfg.NoLimit {
43 | return pe
44 | }
45 |
46 | return m.retry(cutPrompt(err.Message, content), pe)
47 | }
48 | // bad request (do not retry)
49 | return modsError{err: err, reason: fmt.Sprintf("%s API request error.", mod.API)}
50 | case http.StatusUnauthorized:
51 | // invalid auth or key (do not retry)
52 | return modsError{err: err, reason: fmt.Sprintf("Invalid %s API key.", mod.API)}
53 | case http.StatusTooManyRequests:
54 | // rate limiting or engine overload (wait and retry)
55 | return m.retry(content, modsError{
56 | err: err, reason: fmt.Sprintf("You’ve hit your %s API rate limit.", mod.API),
57 | })
58 | case http.StatusInternalServerError:
59 | if mod.API == "openai" {
60 | return m.retry(content, modsError{err: err, reason: "OpenAI API server error."})
61 | }
62 | return modsError{err: err, reason: fmt.Sprintf(
63 | "Error loading model '%s' for API '%s'.",
64 | mod.Name,
65 | mod.API,
66 | )}
67 | default:
68 | return m.retry(content, modsError{err: err, reason: "Unknown API error."})
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/mods_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestFindCacheOpsDetails(t *testing.T) {
11 | newMods := func(t *testing.T) *Mods {
12 | db := testDB(t)
13 | return &Mods{
14 | db: db,
15 | Config: &Config{},
16 | }
17 | }
18 |
19 | t.Run("all empty", func(t *testing.T) {
20 | msg := newMods(t).findCacheOpsDetails()()
21 | dets := msg.(cacheDetailsMsg)
22 | require.Empty(t, dets.ReadID)
23 | require.NotEmpty(t, dets.WriteID)
24 | require.Empty(t, dets.Title)
25 | })
26 |
27 | t.Run("show id", func(t *testing.T) {
28 | mods := newMods(t)
29 | id := newConversationID()
30 | require.NoError(t, mods.db.Save(id, "message", "openai", "gpt-4"))
31 | mods.Config.Show = id[:8]
32 | msg := mods.findCacheOpsDetails()()
33 | dets := msg.(cacheDetailsMsg)
34 | require.Equal(t, id, dets.ReadID)
35 | })
36 |
37 | t.Run("show title", func(t *testing.T) {
38 | mods := newMods(t)
39 | id := newConversationID()
40 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
41 | mods.Config.Show = "message 1"
42 | msg := mods.findCacheOpsDetails()()
43 | dets := msg.(cacheDetailsMsg)
44 | require.Equal(t, id, dets.ReadID)
45 | })
46 |
47 | t.Run("continue id", func(t *testing.T) {
48 | mods := newMods(t)
49 | id := newConversationID()
50 | require.NoError(t, mods.db.Save(id, "message", "openai", "gpt-4"))
51 | mods.Config.Continue = id[:5]
52 | mods.Config.Prefix = "prompt"
53 | msg := mods.findCacheOpsDetails()()
54 | dets := msg.(cacheDetailsMsg)
55 | require.Equal(t, id, dets.ReadID)
56 | require.Equal(t, id, dets.WriteID)
57 | })
58 |
59 | t.Run("continue with no prompt", func(t *testing.T) {
60 | mods := newMods(t)
61 | id := newConversationID()
62 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
63 | mods.Config.ContinueLast = true
64 | msg := mods.findCacheOpsDetails()()
65 | dets := msg.(cacheDetailsMsg)
66 | require.Equal(t, id, dets.ReadID)
67 | require.Equal(t, id, dets.WriteID)
68 | require.Empty(t, dets.Title)
69 | })
70 |
71 | t.Run("continue title", func(t *testing.T) {
72 | mods := newMods(t)
73 | id := newConversationID()
74 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
75 | mods.Config.Continue = "message 1"
76 | mods.Config.Prefix = "prompt"
77 | msg := mods.findCacheOpsDetails()()
78 | dets := msg.(cacheDetailsMsg)
79 | require.Equal(t, id, dets.ReadID)
80 | require.Equal(t, id, dets.WriteID)
81 | })
82 |
83 | t.Run("continue last", func(t *testing.T) {
84 | mods := newMods(t)
85 | id := newConversationID()
86 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
87 | mods.Config.ContinueLast = true
88 | mods.Config.Prefix = "prompt"
89 | msg := mods.findCacheOpsDetails()()
90 | dets := msg.(cacheDetailsMsg)
91 | require.Equal(t, id, dets.ReadID)
92 | require.Equal(t, id, dets.WriteID)
93 | require.Empty(t, dets.Title)
94 | })
95 |
96 | t.Run("continue last with name", func(t *testing.T) {
97 | mods := newMods(t)
98 | id := newConversationID()
99 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
100 | mods.Config.Continue = "message 2"
101 | mods.Config.Prefix = "prompt"
102 | msg := mods.findCacheOpsDetails()()
103 | dets := msg.(cacheDetailsMsg)
104 | require.Equal(t, id, dets.ReadID)
105 | require.Equal(t, "message 2", dets.Title)
106 | require.NotEmpty(t, dets.WriteID)
107 | require.Equal(t, id, dets.WriteID)
108 | })
109 |
110 | t.Run("write", func(t *testing.T) {
111 | mods := newMods(t)
112 | mods.Config.Title = "some title"
113 | msg := mods.findCacheOpsDetails()()
114 | dets := msg.(cacheDetailsMsg)
115 | require.Empty(t, dets.ReadID)
116 | require.NotEmpty(t, dets.WriteID)
117 | require.NotEqual(t, "some title", dets.WriteID)
118 | require.Equal(t, "some title", dets.Title)
119 | })
120 |
121 | t.Run("continue id and write with title", func(t *testing.T) {
122 | mods := newMods(t)
123 | id := newConversationID()
124 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
125 | mods.Config.Title = "some title"
126 | mods.Config.Continue = id[:10]
127 | msg := mods.findCacheOpsDetails()()
128 | dets := msg.(cacheDetailsMsg)
129 | require.Equal(t, id, dets.ReadID)
130 | require.NotEmpty(t, dets.WriteID)
131 | require.NotEqual(t, id, dets.WriteID)
132 | require.NotEqual(t, "some title", dets.WriteID)
133 | require.Equal(t, "some title", dets.Title)
134 | })
135 |
136 | t.Run("continue title and write with title", func(t *testing.T) {
137 | mods := newMods(t)
138 | id := newConversationID()
139 | require.NoError(t, mods.db.Save(id, "message 1", "openai", "gpt-4"))
140 | mods.Config.Title = "some title"
141 | mods.Config.Continue = "message 1"
142 | msg := mods.findCacheOpsDetails()()
143 | dets := msg.(cacheDetailsMsg)
144 | require.Equal(t, id, dets.ReadID)
145 | require.NotEmpty(t, dets.WriteID)
146 | require.NotEqual(t, id, dets.WriteID)
147 | require.NotEqual(t, "some title", dets.WriteID)
148 | require.Equal(t, "some title", dets.Title)
149 | })
150 |
151 | t.Run("show invalid", func(t *testing.T) {
152 | mods := newMods(t)
153 | mods.Config.Show = "aaa"
154 | msg := mods.findCacheOpsDetails()()
155 | err := msg.(modsError)
156 | require.Equal(t, "Could not find the conversation.", err.reason)
157 | require.EqualError(t, err, "no conversations found: aaa")
158 | })
159 | }
160 |
161 | func TestRemoveWhitespace(t *testing.T) {
162 | t.Run("only whitespaces", func(t *testing.T) {
163 | require.Equal(t, "", removeWhitespace(" \n"))
164 | })
165 |
166 | t.Run("regular text", func(t *testing.T) {
167 | require.Equal(t, " regular\n ", removeWhitespace(" regular\n "))
168 | })
169 | }
170 |
171 | var cutPromptTests = map[string]struct {
172 | msg string
173 | prompt string
174 | expected string
175 | }{
176 | "bad error": {
177 | msg: "nope",
178 | prompt: "the prompt",
179 | expected: "the prompt",
180 | },
181 | "crazy error": {
182 | msg: tokenErrMsg(10, 93),
183 | prompt: "the prompt",
184 | expected: "the prompt",
185 | },
186 | "cut prompt": {
187 | msg: tokenErrMsg(10, 3),
188 | prompt: "this is a long prompt I have no idea if its really 10 tokens",
189 | expected: "this is a long prompt ",
190 | },
191 | "missmatch of token estimation vs api result": {
192 | msg: tokenErrMsg(30000, 100),
193 | prompt: "tell me a joke",
194 | expected: "tell me a joke",
195 | },
196 | }
197 |
198 | func tokenErrMsg(l, ml int) string {
199 | return fmt.Sprintf(`This model's maximum context length is %d tokens. However, your messages resulted in %d tokens`, ml, l)
200 | }
201 |
202 | func TestCutPrompt(t *testing.T) {
203 | for name, tc := range cutPromptTests {
204 | t.Run(name, func(t *testing.T) {
205 | require.Equal(t, tc.expected, cutPrompt(tc.msg, tc.prompt))
206 | })
207 | }
208 | }
209 |
--------------------------------------------------------------------------------
/sha.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "crypto/rand"
5 | "crypto/sha1" //nolint: gosec
6 | "fmt"
7 | "regexp"
8 | )
9 |
10 | const (
11 | sha1short = 7
12 | sha1minLen = 4
13 | sha1ReadBlockSize = 4096
14 | )
15 |
16 | var sha1reg = regexp.MustCompile(`\b[0-9a-f]{40}\b`)
17 |
18 | func newConversationID() string {
19 | b := make([]byte, sha1ReadBlockSize)
20 | _, _ = rand.Read(b)
21 | return fmt.Sprintf("%x", sha1.Sum(b)) //nolint: gosec
22 | }
23 |
--------------------------------------------------------------------------------
/stream.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "github.com/charmbracelet/mods/internal/proto"
8 | )
9 |
10 | func (m *Mods) setupStreamContext(content string, mod Model) error {
11 | cfg := m.Config
12 | m.messages = []proto.Message{}
13 | if txt := cfg.FormatText[cfg.FormatAs]; cfg.Format && txt != "" {
14 | m.messages = append(m.messages, proto.Message{
15 | Role: proto.RoleSystem,
16 | Content: txt,
17 | })
18 | }
19 |
20 | if cfg.Role != "" {
21 | roleSetup, ok := cfg.Roles[cfg.Role]
22 | if !ok {
23 | return modsError{
24 | err: fmt.Errorf("role %q does not exist", cfg.Role),
25 | reason: "Could not use role",
26 | }
27 | }
28 | for _, msg := range roleSetup {
29 | content, err := loadMsg(msg)
30 | if err != nil {
31 | return modsError{
32 | err: err,
33 | reason: "Could not use role",
34 | }
35 | }
36 | m.messages = append(m.messages, proto.Message{
37 | Role: proto.RoleSystem,
38 | Content: content,
39 | })
40 | }
41 | }
42 |
43 | if prefix := cfg.Prefix; prefix != "" {
44 | content = strings.TrimSpace(prefix + "\n\n" + content)
45 | }
46 |
47 | if !cfg.NoLimit && int64(len(content)) > mod.MaxChars {
48 | content = content[:mod.MaxChars]
49 | }
50 |
51 | if !cfg.NoCache && cfg.cacheReadFromID != "" {
52 | if err := m.cache.Read(cfg.cacheReadFromID, &m.messages); err != nil {
53 | return modsError{
54 | err: err,
55 | reason: fmt.Sprintf(
56 | "There was a problem reading the cache. Use %s / %s to disable it.",
57 | m.Styles.InlineCode.Render("--no-cache"),
58 | m.Styles.InlineCode.Render("NO_CACHE"),
59 | ),
60 | }
61 | }
62 | }
63 |
64 | m.messages = append(m.messages, proto.Message{
65 | Role: proto.RoleUser,
66 | Content: content,
67 | })
68 |
69 | return nil
70 | }
71 |
--------------------------------------------------------------------------------
/styles.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "github.com/charmbracelet/lipgloss"
8 | )
9 |
10 | type styles struct {
11 | AppName,
12 | CliArgs,
13 | Comment,
14 | CyclingChars,
15 | ErrorHeader,
16 | ErrorDetails,
17 | ErrPadding,
18 | Flag,
19 | FlagComma,
20 | FlagDesc,
21 | InlineCode,
22 | Link,
23 | Pipe,
24 | Quote,
25 | ConversationList,
26 | SHA1,
27 | Timeago lipgloss.Style
28 | }
29 |
30 | func makeStyles(r *lipgloss.Renderer) (s styles) {
31 | const horizontalEdgePadding = 2
32 | s.AppName = r.NewStyle().Bold(true)
33 | s.CliArgs = r.NewStyle().Foreground(lipgloss.Color("#585858"))
34 | s.Comment = r.NewStyle().Foreground(lipgloss.Color("#757575"))
35 | s.CyclingChars = r.NewStyle().Foreground(lipgloss.Color("#FF87D7"))
36 | s.ErrorHeader = r.NewStyle().Foreground(lipgloss.Color("#F1F1F1")).Background(lipgloss.Color("#FF5F87")).Bold(true).Padding(0, 1).SetString("ERROR")
37 | s.ErrorDetails = s.Comment
38 | s.ErrPadding = r.NewStyle().Padding(0, horizontalEdgePadding)
39 | s.Flag = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#00B594", Dark: "#3EEFCF"}).Bold(true)
40 | s.FlagComma = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#5DD6C0", Dark: "#427C72"}).SetString(",")
41 | s.FlagDesc = s.Comment
42 | s.InlineCode = r.NewStyle().Foreground(lipgloss.Color("#FF5F87")).Background(lipgloss.Color("#3A3A3A")).Padding(0, 1)
43 | s.Link = r.NewStyle().Foreground(lipgloss.Color("#00AF87")).Underline(true)
44 | s.Quote = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#FF71D0", Dark: "#FF78D2"})
45 | s.Pipe = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#8470FF", Dark: "#745CFF"})
46 | s.ConversationList = r.NewStyle().Padding(0, 1)
47 | s.SHA1 = s.Flag
48 | s.Timeago = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#999", Dark: "#555"})
49 | return s
50 | }
51 |
52 | // action messages
53 |
54 | const defaultAction = "WROTE"
55 |
56 | var outputHeader = lipgloss.NewStyle().Foreground(lipgloss.Color("#F1F1F1")).Background(lipgloss.Color("#6C50FF")).Bold(true).Padding(0, 1).MarginRight(1)
57 |
58 | func printConfirmation(action, content string) {
59 | if action == "" {
60 | action = defaultAction
61 | }
62 | outputHeader = outputHeader.SetString(strings.ToUpper(action))
63 | fmt.Println(lipgloss.JoinHorizontal(lipgloss.Center, outputHeader.String(), content))
64 | }
65 |
--------------------------------------------------------------------------------
/sync.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import "sync"
4 |
5 | // copied from go1.21, remove and use sync.OnceValue once go 1.21 is widely
6 | // available in package managers.
7 |
8 | // OnceValue returns a function that invokes f only once and returns the value
9 | // returned by f. The returned function may be called concurrently.
10 | //
11 | // If f panics, the returned function will panic with the same value on every call.
12 | func OnceValue[T any](f func() T) func() T {
13 | var once sync.Once
14 | var valid bool
15 | var p any
16 | var result T
17 | return func() T {
18 | once.Do(func() {
19 | defer func() {
20 | p = recover()
21 | if !valid {
22 | panic(p)
23 | }
24 | }()
25 | result = f()
26 | valid = true
27 | })
28 | if !valid {
29 | panic(p)
30 | }
31 | return result
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/term.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/charmbracelet/lipgloss"
7 | "github.com/mattn/go-isatty"
8 | "github.com/muesli/termenv"
9 | )
10 |
11 | var isInputTTY = OnceValue(func() bool {
12 | return isatty.IsTerminal(os.Stdin.Fd())
13 | })
14 |
15 | var isOutputTTY = OnceValue(func() bool {
16 | return isatty.IsTerminal(os.Stdout.Fd())
17 | })
18 |
19 | var stdoutRenderer = OnceValue(func() *lipgloss.Renderer {
20 | return lipgloss.DefaultRenderer()
21 | })
22 |
23 | var stdoutStyles = OnceValue(func() styles {
24 | return makeStyles(stdoutRenderer())
25 | })
26 |
27 | var stderrRenderer = OnceValue(func() *lipgloss.Renderer {
28 | return lipgloss.NewRenderer(os.Stderr, termenv.WithColorCache(true))
29 | })
30 |
31 | var stderrStyles = OnceValue(func() styles {
32 | return makeStyles(stderrRenderer())
33 | })
34 |
--------------------------------------------------------------------------------