├── .github ├── CODEOWNERS ├── workflows │ ├── lint.yml │ ├── lint-sync.yml │ ├── dependabot-sync.yml │ ├── nightly.yml │ ├── goreleaser.yml │ └── build.yml └── dependabot.yml ├── .gitignore ├── internal ├── proto │ ├── testdata │ │ └── TestStringer.golden │ ├── proto_test.go │ └── proto.go ├── google │ ├── format.go │ ├── http.go │ └── google.go ├── cohere │ ├── format.go │ └── cohere.go ├── stream │ └── stream.go ├── ollama │ ├── format.go │ └── ollama.go ├── cache │ ├── convo.go │ ├── cache.go │ ├── expiring.go │ └── cache_test.go ├── openai │ ├── format.go │ └── openai.go └── anthropic │ ├── format.go │ └── anthropic.go ├── .goreleaser.yml ├── examples ├── conversations.tape ├── demo.tape └── v1.5.tape ├── sha.go ├── messages.go ├── error.go ├── config_test.go ├── load.go ├── term.go ├── .golangci.yml ├── load_test.go ├── LICENSE ├── examples.go ├── flag_test.go ├── messages_test.go ├── stream.go ├── flag.go ├── mods_errors.go ├── main_test.go ├── styles.go ├── features.md ├── examples.md ├── go.mod ├── db_test.go ├── mcp.go ├── db.go ├── anim.go ├── mods_test.go ├── README.md ├── config.go ├── config_template.yml ├── mods.go └── go.sum /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @caarlos0 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | mods 2 | .envrc 3 | completions/ 4 | manpages/ 5 | dist/ 6 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | on: 3 | push: 4 | pull_request: 5 | 6 | jobs: 7 | lint: 8 | uses: charmbracelet/meta/.github/workflows/lint.yml@main 9 | -------------------------------------------------------------------------------- /internal/proto/testdata/TestStringer.golden: -------------------------------------------------------------------------------- 1 | **System**: you are a medieval king 2 | 3 | **User**: first 4 natural numbers 4 | 5 | **Assistant**: 1, 2, 3, 4 6 | 7 | 8 | > Ran tool: `myfunc` 9 | 10 | **User**: as a json array 11 | 12 | **Assistant**: [ 1, 2, 3, 4 ] 13 | 14 | **Assistant**: something from an assistant 15 | 16 | -------------------------------------------------------------------------------- /.github/workflows/lint-sync.yml: -------------------------------------------------------------------------------- 1 | name: lint-sync 2 | on: 3 | schedule: 4 | # every Sunday at midnight 5 | - cron: "0 0 * * 0" 6 | workflow_dispatch: # allows manual triggering 7 | 8 | permissions: 9 | contents: write 10 | pull-requests: write 11 | 12 | jobs: 13 | lint: 14 | uses: charmbracelet/meta/.github/workflows/lint-sync.yml@main 15 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # The lines beneath this are called `modelines`. See `:help modeline` 2 | # Feel free to remove those if you don't want/use them. 3 | # yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json 4 | # vim: set ts=2 sw=2 tw=0 fo=cnqoj 5 | version: 2 6 | includes: 7 | - from_url: 8 | url: charmbracelet/meta/main/goreleaser-mods.yaml 9 | -------------------------------------------------------------------------------- /examples/conversations.tape: -------------------------------------------------------------------------------- 1 | Output conversations.gif 2 | 3 | Set Height 900 4 | Set Width 1600 5 | Set Framerate 24 6 | Set FontSize 28 7 | 8 | Sleep 500ms 9 | Type "mods --list" 10 | Sleep 500ms 11 | Enter 12 | Sleep 3s 13 | 14 | Type "mods --show " 15 | Sleep 500ms 16 | 17 | Hide 18 | Type@25ms "8a0d428" 19 | Show 20 | 21 | Sleep 500ms 22 | 23 | Enter 24 | 25 | Sleep 5s 26 | -------------------------------------------------------------------------------- /sha.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/sha1" //nolint: gosec 6 | "fmt" 7 | "regexp" 8 | ) 9 | 10 | const ( 11 | sha1short = 7 12 | sha1minLen = 4 13 | sha1ReadBlockSize = 4096 14 | ) 15 | 16 | var sha1reg = regexp.MustCompile(`\b[0-9a-f]{40}\b`) 17 | 18 | func newConversationID() string { 19 | b := make([]byte, sha1ReadBlockSize) 20 | _, _ = rand.Read(b) 21 | return fmt.Sprintf("%x", sha1.Sum(b)) //nolint: gosec 22 | } 23 | -------------------------------------------------------------------------------- /internal/google/format.go: -------------------------------------------------------------------------------- 1 | package google 2 | 3 | import "github.com/charmbracelet/mods/internal/proto" 4 | 5 | func fromProtoMessages(input []proto.Message) []Content { 6 | result := make([]Content, 0, len(input)) 7 | for _, in := range input { 8 | switch in.Role { 9 | case proto.RoleSystem, proto.RoleUser: 10 | result = append(result, Content{ 11 | Role: proto.RoleUser, 12 | Parts: []Part{{Text: in.Content}}, 13 | }) 14 | } 15 | } 16 | return result 17 | } 18 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-sync.yml: -------------------------------------------------------------------------------- 1 | name: dependabot-sync 2 | on: 3 | schedule: 4 | - cron: "0 0 * * 0" # every Sunday at midnight 5 | workflow_dispatch: # allows manual triggering 6 | 7 | permissions: 8 | contents: write 9 | pull-requests: write 10 | 11 | jobs: 12 | dependabot-sync: 13 | uses: charmbracelet/meta/.github/workflows/dependabot-sync.yml@main 14 | with: 15 | repo_name: ${{ github.event.repository.name }} 16 | secrets: 17 | gh_token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} 18 | -------------------------------------------------------------------------------- /messages.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/charmbracelet/mods/internal/proto" 7 | ) 8 | 9 | func lastPrompt(messages []proto.Message) string { 10 | var result string 11 | for _, msg := range messages { 12 | if msg.Role != proto.RoleUser { 13 | continue 14 | } 15 | if msg.Content == "" { 16 | continue 17 | } 18 | result = msg.Content 19 | } 20 | return result 21 | } 22 | 23 | func firstLine(s string) string { 24 | first, _, _ := strings.Cut(s, "\n") 25 | return first 26 | } 27 | -------------------------------------------------------------------------------- /examples/demo.tape: -------------------------------------------------------------------------------- 1 | Output mods.gif 2 | 3 | Set Height 900 4 | Set Width 1600 5 | Set Framerate 24 6 | 7 | Hide 8 | Type `export OPENAI_API_KEY=$(pass OPENAI_API_KEY)` 9 | Enter 10 | Ctrl+L 11 | Sleep 500ms 12 | Show 13 | 14 | Sleep 500ms 15 | 16 | Type@25ms `curl -s ` 17 | 18 | Sleep 500ms 19 | 20 | # Simulate pasting link. 21 | Hide 22 | Type@5ms `https://api.github.com/orgs/charmbracelet/repos` 23 | Show 24 | 25 | Sleep 500ms 26 | 27 | Type@25ms ` | mods -f "rate this github org and summarize each repository"` 28 | 29 | Enter 30 | 31 | Sleep 30s 32 | -------------------------------------------------------------------------------- /error.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | // newUserErrorf is a user-facing error. 6 | // this function is mostly to avoid linters complain about errors starting with a capitalized letter. 7 | func newUserErrorf(format string, a ...any) error { 8 | return fmt.Errorf(format, a...) 9 | } 10 | 11 | // modsError is a wrapper around an error that adds additional context. 12 | type modsError struct { 13 | err error 14 | reason string 15 | } 16 | 17 | func (m modsError) Error() string { 18 | return m.err.Error() 19 | } 20 | 21 | func (m modsError) Reason() string { 22 | return m.reason 23 | } 24 | -------------------------------------------------------------------------------- /examples/v1.5.tape: -------------------------------------------------------------------------------- 1 | Output mods.gif 2 | 3 | Set Height 900 4 | Set Width 1600 5 | Set Framerate 24 6 | 7 | Type@150ms `mods -M` 8 | Sleep 500ms 9 | Enter 10 | Sleep 250ms 11 | 12 | Down 13 | Sleep 150ms 14 | Down 15 | Sleep 150ms 16 | Down 17 | Sleep 150ms 18 | Up 19 | Sleep 150ms 20 | Down 21 | Sleep 150ms 22 | Down 23 | Sleep 150ms 24 | Up 25 | Sleep 150ms 26 | Up 27 | Sleep 150ms 28 | 29 | Sleep 150ms 30 | Type@150ms `/` 31 | Sleep 250ms 32 | Type@150ms `gro` 33 | Sleep 500ms 34 | Enter 35 | Sleep 250ms 36 | 37 | Down 38 | Sleep 150ms 39 | Down 40 | Sleep 150ms 41 | Enter 42 | Sleep 250ms 43 | 44 | Type@150ms `Hello world` 45 | Sleep 500ms 46 | Enter 47 | 48 | Sleep 3s 49 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | name: nightly 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | nightly: 10 | uses: charmbracelet/meta/.github/workflows/nightly.yml@main 11 | secrets: 12 | docker_username: ${{ secrets.DOCKERHUB_USERNAME }} 13 | docker_token: ${{ secrets.DOCKERHUB_TOKEN }} 14 | goreleaser_key: ${{ secrets.GORELEASER_KEY }} 15 | macos_sign_p12: ${{ secrets.MACOS_SIGN_P12 }} 16 | macos_sign_password: ${{ secrets.MACOS_SIGN_PASSWORD }} 17 | macos_notary_issuer_id: ${{ secrets.MACOS_NOTARY_ISSUER_ID }} 18 | macos_notary_key_id: ${{ secrets.MACOS_NOTARY_KEY_ID }} 19 | macos_notary_key: ${{ secrets.MACOS_NOTARY_KEY }} 20 | -------------------------------------------------------------------------------- /config_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | "gopkg.in/yaml.v3" 8 | ) 9 | 10 | func TestConfig(t *testing.T) { 11 | t.Run("old format text", func(t *testing.T) { 12 | var cfg Config 13 | require.NoError(t, yaml.Unmarshal([]byte("format-text: as markdown"), &cfg)) 14 | require.Equal(t, FormatText(map[string]string{ 15 | "markdown": "as markdown", 16 | }), cfg.FormatText) 17 | }) 18 | t.Run("new format text", func(t *testing.T) { 19 | var cfg Config 20 | require.NoError(t, yaml.Unmarshal([]byte("format-text:\n markdown: as markdown\n json: as json"), &cfg)) 21 | require.Equal(t, FormatText(map[string]string{ 22 | "markdown": "as markdown", 23 | "json": "as json", 24 | }), cfg.FormatText) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /load.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "os" 7 | "strings" 8 | ) 9 | 10 | func loadMsg(msg string) (string, error) { 11 | if strings.HasPrefix(msg, "https://") || strings.HasPrefix(msg, "http://") { 12 | resp, err := http.Get(msg) //nolint:gosec,noctx 13 | if err != nil { 14 | return "", err //nolint:wrapcheck 15 | } 16 | defer func() { _ = resp.Body.Close() }() 17 | bts, err := io.ReadAll(resp.Body) 18 | if err != nil { 19 | return "", err //nolint:wrapcheck 20 | } 21 | return string(bts), nil 22 | } 23 | 24 | if strings.HasPrefix(msg, "file://") { 25 | bts, err := os.ReadFile(strings.TrimPrefix(msg, "file://")) 26 | if err != nil { 27 | return "", err //nolint:wrapcheck 28 | } 29 | return string(bts), nil 30 | } 31 | 32 | return msg, nil 33 | } 34 | -------------------------------------------------------------------------------- /term.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "sync" 6 | 7 | "github.com/charmbracelet/lipgloss" 8 | "github.com/mattn/go-isatty" 9 | "github.com/muesli/termenv" 10 | ) 11 | 12 | var isInputTTY = sync.OnceValue(func() bool { 13 | return isatty.IsTerminal(os.Stdin.Fd()) 14 | }) 15 | 16 | var isOutputTTY = sync.OnceValue(func() bool { 17 | return isatty.IsTerminal(os.Stdout.Fd()) 18 | }) 19 | 20 | var stdoutRenderer = sync.OnceValue(func() *lipgloss.Renderer { 21 | return lipgloss.DefaultRenderer() 22 | }) 23 | 24 | var stdoutStyles = sync.OnceValue(func() styles { 25 | return makeStyles(stdoutRenderer()) 26 | }) 27 | 28 | var stderrRenderer = sync.OnceValue(func() *lipgloss.Renderer { 29 | return lipgloss.NewRenderer(os.Stderr, termenv.WithColorCache(true)) 30 | }) 31 | 32 | var stderrStyles = sync.OnceValue(func() styles { 33 | return makeStyles(stderrRenderer()) 34 | }) 35 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | tests: false 4 | linters: 5 | enable: 6 | - bodyclose 7 | - exhaustive 8 | - goconst 9 | - godot 10 | - gomoddirectives 11 | - goprintffuncname 12 | - gosec 13 | - misspell 14 | - nakedret 15 | - nestif 16 | - nilerr 17 | - noctx 18 | - nolintlint 19 | - prealloc 20 | - revive 21 | - rowserrcheck 22 | - sqlclosecheck 23 | - tparallel 24 | - unconvert 25 | - unparam 26 | - whitespace 27 | - wrapcheck 28 | exclusions: 29 | rules: 30 | - text: '(slog|log)\.\w+' 31 | linters: 32 | - noctx 33 | generated: lax 34 | presets: 35 | - common-false-positives 36 | issues: 37 | max-issues-per-linter: 0 38 | max-same-issues: 0 39 | formatters: 40 | enable: 41 | - gofumpt 42 | - goimports 43 | exclusions: 44 | generated: lax 45 | -------------------------------------------------------------------------------- /internal/proto/proto_test.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/charmbracelet/x/exp/golden" 7 | ) 8 | 9 | func TestStringer(t *testing.T) { 10 | messages := []Message{ 11 | { 12 | Role: RoleSystem, 13 | Content: "you are a medieval king", 14 | }, 15 | { 16 | Role: RoleUser, 17 | Content: "first 4 natural numbers", 18 | }, 19 | { 20 | Role: RoleAssistant, 21 | Content: "1, 2, 3, 4", 22 | }, 23 | { 24 | Role: RoleTool, 25 | Content: `{"the":"result"}`, 26 | ToolCalls: []ToolCall{ 27 | { 28 | ID: "aaa", 29 | Function: Function{ 30 | Name: "myfunc", 31 | Arguments: []byte(`{"a":"b"}`), 32 | }, 33 | }, 34 | }, 35 | }, 36 | { 37 | Role: RoleUser, 38 | Content: "as a json array", 39 | }, 40 | { 41 | Role: RoleAssistant, 42 | Content: "[ 1, 2, 3, 4 ]", 43 | }, 44 | { 45 | Role: RoleAssistant, 46 | Content: "something from an assistant", 47 | }, 48 | } 49 | 50 | golden.RequireEqual(t, []byte(Conversation(messages).String())) 51 | } 52 | -------------------------------------------------------------------------------- /load_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestLoad(t *testing.T) { 12 | const content = "just text" 13 | t.Run("normal msg", func(t *testing.T) { 14 | msg, err := loadMsg(content) 15 | require.NoError(t, err) 16 | require.Equal(t, content, msg) 17 | }) 18 | 19 | t.Run("file", func(t *testing.T) { 20 | path := filepath.Join(t.TempDir(), "foo.txt") 21 | require.NoError(t, os.WriteFile(path, []byte(content), 0o644)) 22 | 23 | msg, err := loadMsg("file://" + path) 24 | require.NoError(t, err) 25 | require.Equal(t, content, msg) 26 | }) 27 | 28 | t.Run("http url", func(t *testing.T) { 29 | msg, err := loadMsg("http://raw.githubusercontent.com/charmbracelet/mods/main/LICENSE") 30 | require.NoError(t, err) 31 | require.Contains(t, msg, "MIT License") 32 | }) 33 | 34 | t.Run("https url", func(t *testing.T) { 35 | msg, err := loadMsg("https://raw.githubusercontent.com/charmbracelet/mods/main/LICENSE") 36 | require.NoError(t, err) 37 | require.Contains(t, msg, "MIT License") 38 | }) 39 | } 40 | -------------------------------------------------------------------------------- /.github/workflows/goreleaser.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json 2 | 3 | name: goreleaser 4 | 5 | on: 6 | push: 7 | tags: 8 | - v*.*.* 9 | 10 | concurrency: 11 | group: goreleaser 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | goreleaser: 16 | uses: charmbracelet/meta/.github/workflows/goreleaser.yml@main 17 | secrets: 18 | docker_username: ${{ secrets.DOCKERHUB_USERNAME }} 19 | docker_token: ${{ secrets.DOCKERHUB_TOKEN }} 20 | gh_pat: ${{ secrets.PERSONAL_ACCESS_TOKEN }} 21 | goreleaser_key: ${{ secrets.GORELEASER_KEY }} 22 | aur_key: ${{ secrets.AUR_KEY }} 23 | fury_token: ${{ secrets.FURY_TOKEN }} 24 | nfpm_gpg_key: ${{ secrets.NFPM_GPG_KEY }} 25 | nfpm_passphrase: ${{ secrets.NFPM_PASSPHRASE }} 26 | macos_sign_p12: ${{ secrets.MACOS_SIGN_P12 }} 27 | macos_sign_password: ${{ secrets.MACOS_SIGN_PASSWORD }} 28 | macos_notary_issuer_id: ${{ secrets.MACOS_NOTARY_ISSUER_ID }} 29 | macos_notary_key_id: ${{ secrets.MACOS_NOTARY_KEY_ID }} 30 | macos_notary_key: ${{ secrets.MACOS_NOTARY_KEY }} 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Charmbracelet, Inc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "math/rand" 5 | "regexp" 6 | ) 7 | 8 | var examples = map[string]string{ 9 | "Write new sections for a readme": `cat README.md | mods "write a new section to this README documenting a pdf sharing feature"`, 10 | "Editorialize your video files": `ls ~/vids | mods -f "summarize each of these titles, group them by decade" | glow`, 11 | "Let GPT pick something to watch": `ls ~/vids | mods "Pick 5 action packed shows from the 80s from this list" | gum choose | xargs vlc`, 12 | } 13 | 14 | func randomExample() string { 15 | keys := make([]string, 0, len(examples)) 16 | for k := range examples { 17 | keys = append(keys, k) 18 | } 19 | desc := keys[rand.Intn(len(keys))] //nolint:gosec 20 | return desc 21 | } 22 | 23 | func cheapHighlighting(s styles, code string) string { 24 | code = regexp. 25 | MustCompile(`"([^"\\]|\\.)*"`). 26 | ReplaceAllStringFunc(code, func(x string) string { 27 | return s.Quote.Render(x) 28 | }) 29 | code = regexp. 30 | MustCompile(`\|`). 31 | ReplaceAllStringFunc(code, func(x string) string { 32 | return s.Pipe.Render(x) 33 | }) 34 | return code 35 | } 36 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: "gomod" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | day: "monday" 9 | time: "05:00" 10 | timezone: "America/New_York" 11 | labels: 12 | - "dependencies" 13 | commit-message: 14 | prefix: "chore" 15 | include: "scope" 16 | groups: 17 | all: 18 | patterns: 19 | - "*" 20 | ignore: 21 | - dependency-name: github.com/charmbracelet/bubbletea/v2 22 | versions: 23 | - v2.0.0-beta1 24 | 25 | - package-ecosystem: "github-actions" 26 | directory: "/" 27 | schedule: 28 | interval: "weekly" 29 | day: "monday" 30 | time: "05:00" 31 | timezone: "America/New_York" 32 | labels: 33 | - "dependencies" 34 | commit-message: 35 | prefix: "chore" 36 | include: "scope" 37 | groups: 38 | all: 39 | patterns: 40 | - "*" 41 | 42 | - package-ecosystem: "docker" 43 | directory: "/" 44 | schedule: 45 | interval: "weekly" 46 | day: "monday" 47 | time: "05:00" 48 | timezone: "America/New_York" 49 | labels: 50 | - "dependencies" 51 | commit-message: 52 | prefix: "chore" 53 | include: "scope" 54 | groups: 55 | all: 56 | patterns: 57 | - "*" 58 | -------------------------------------------------------------------------------- /flag_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | var flagParseErrorTests = []struct { 11 | in string 12 | flag string 13 | reason string 14 | }{ 15 | { 16 | "unknown flag: --nope", 17 | "--nope", 18 | "Flag %s is missing.", 19 | }, 20 | { 21 | "flag needs an argument: --delete", 22 | "--delete", 23 | "Flag %s needs an argument.", 24 | }, 25 | { 26 | "flag needs an argument: 'd' in -d", 27 | "-d", 28 | "Flag %s needs an argument.", 29 | }, 30 | { 31 | `invalid argument "20dd" for "--delete-older-than" flag: time: unknown unit "dd" in duration "20dd"`, 32 | "--delete-older-than", 33 | "Flag %s have an invalid argument.", 34 | }, 35 | { 36 | `invalid argument "sdfjasdl" for "--max-tokens" flag: strconv.ParseInt: parsing "sdfjasdl": invalid syntax`, 37 | "--max-tokens", 38 | "Flag %s have an invalid argument.", 39 | }, 40 | { 41 | `invalid argument "nope" for "-r, --raw" flag: strconv.ParseBool: parsing "nope": invalid syntax`, 42 | "-r, --raw", 43 | "Flag %s have an invalid argument.", 44 | }, 45 | } 46 | 47 | func TestFlagParseError(t *testing.T) { 48 | for _, tf := range flagParseErrorTests { 49 | t.Run(tf.in, func(t *testing.T) { 50 | err := newFlagParseError(errors.New(tf.in)) 51 | require.Equal(t, tf.flag, err.Flag()) 52 | require.Equal(t, tf.reason, err.ReasonFormat()) 53 | require.Equal(t, tf.in, err.Error()) 54 | }) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /messages_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/charmbracelet/mods/internal/proto" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestLastPrompt(t *testing.T) { 11 | t.Run("no prompt", func(t *testing.T) { 12 | require.Equal(t, "", lastPrompt(nil)) 13 | }) 14 | 15 | t.Run("single prompt", func(t *testing.T) { 16 | require.Equal(t, "single", lastPrompt([]proto.Message{ 17 | { 18 | Role: proto.RoleUser, 19 | Content: "single", 20 | }, 21 | })) 22 | }) 23 | 24 | t.Run("multiple prompts", func(t *testing.T) { 25 | require.Equal(t, "last", lastPrompt([]proto.Message{ 26 | { 27 | Role: proto.RoleUser, 28 | Content: "first", 29 | }, 30 | { 31 | Role: proto.RoleAssistant, 32 | Content: "hallo", 33 | }, 34 | { 35 | Role: proto.RoleUser, 36 | Content: "middle 1", 37 | }, 38 | { 39 | Role: proto.RoleUser, 40 | Content: "middle 2", 41 | }, 42 | { 43 | Role: proto.RoleUser, 44 | Content: "last", 45 | }, 46 | })) 47 | }) 48 | } 49 | 50 | func TestFirstLine(t *testing.T) { 51 | t.Run("single line", func(t *testing.T) { 52 | require.Equal(t, "line", firstLine("line")) 53 | }) 54 | t.Run("single line ending with \n", func(t *testing.T) { 55 | require.Equal(t, "line", firstLine("line\n")) 56 | }) 57 | t.Run("multiple lines", func(t *testing.T) { 58 | require.Equal(t, "line", firstLine("line\nsomething else\nline3\nfoo\nends with a double \n\n")) 59 | }) 60 | } 61 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | strategy: 8 | matrix: 9 | os: [ubuntu-latest, macos-latest, windows-latest] 10 | runs-on: ${{ matrix.os }} 11 | env: 12 | GO111MODULE: "on" 13 | steps: 14 | - name: Install Go 15 | uses: actions/setup-go@v6 16 | with: 17 | go-version: stable 18 | check-latest: true 19 | 20 | - name: Checkout code 21 | uses: actions/checkout@v6 22 | 23 | - name: Download Go modules 24 | run: go mod download 25 | 26 | - name: Build 27 | run: go build -v ./... 28 | 29 | - name: Test 30 | run: go test -v -cover -timeout=30s ./... 31 | 32 | snapshot: 33 | uses: charmbracelet/meta/.github/workflows/snapshot.yml@main 34 | secrets: 35 | goreleaser_key: ${{ secrets.GORELEASER_KEY }} 36 | 37 | dependabot: 38 | needs: [build] 39 | runs-on: ubuntu-latest 40 | permissions: 41 | pull-requests: write 42 | contents: write 43 | if: ${{ github.actor == 'dependabot[bot]' && github.event_name == 'pull_request'}} 44 | steps: 45 | - id: metadata 46 | uses: dependabot/fetch-metadata@v2 47 | with: 48 | github-token: "${{ secrets.GITHUB_TOKEN }}" 49 | - run: | 50 | gh pr review --approve "$PR_URL" 51 | gh pr merge --squash --auto "$PR_URL" 52 | env: 53 | PR_URL: ${{github.event.pull_request.html_url}} 54 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 55 | -------------------------------------------------------------------------------- /internal/cohere/format.go: -------------------------------------------------------------------------------- 1 | package cohere 2 | 3 | import ( 4 | "github.com/charmbracelet/mods/internal/proto" 5 | cohere "github.com/cohere-ai/cohere-go/v2" 6 | ) 7 | 8 | func fromProtoMessages(input []proto.Message) (history []*cohere.Message, message string) { 9 | var messages []*cohere.Message //nolint:prealloc 10 | for _, msg := range input { 11 | messages = append(messages, &cohere.Message{ 12 | Role: fromProtoRole(msg.Role), 13 | Chatbot: &cohere.ChatMessage{ 14 | Message: msg.Content, 15 | }, 16 | }) 17 | } 18 | if len(messages) > 1 { 19 | history = messages[:len(messages)-1] 20 | } 21 | message = messages[len(messages)-1].User.Message 22 | return history, message 23 | } 24 | 25 | func toProtoMessages(input []*cohere.Message) []proto.Message { 26 | var messages []proto.Message 27 | for _, in := range input { 28 | switch in.Role { 29 | case "USER": 30 | messages = append(messages, proto.Message{ 31 | Role: proto.RoleUser, 32 | Content: in.User.Message, 33 | }) 34 | case "SYSTEM": 35 | messages = append(messages, proto.Message{ 36 | Role: proto.RoleSystem, 37 | Content: in.System.Message, 38 | }) 39 | case "CHATBOT": 40 | messages = append(messages, proto.Message{ 41 | Role: proto.RoleAssistant, 42 | Content: in.Chatbot.Message, 43 | }) 44 | case "TOOL": 45 | // not supported yet 46 | } 47 | } 48 | return messages 49 | } 50 | 51 | func fromProtoRole(role string) string { 52 | switch role { 53 | case proto.RoleSystem: 54 | return "SYSTEM" 55 | case proto.RoleAssistant: 56 | return "CHATBOT" 57 | default: 58 | return "USER" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /stream.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/charmbracelet/mods/internal/proto" 8 | ) 9 | 10 | func (m *Mods) setupStreamContext(content string, mod Model) error { 11 | cfg := m.Config 12 | m.messages = []proto.Message{} 13 | if txt := cfg.FormatText[cfg.FormatAs]; cfg.Format && txt != "" { 14 | m.messages = append(m.messages, proto.Message{ 15 | Role: proto.RoleSystem, 16 | Content: txt, 17 | }) 18 | } 19 | 20 | if cfg.Role != "" { 21 | roleSetup, ok := cfg.Roles[cfg.Role] 22 | if !ok { 23 | return modsError{ 24 | err: fmt.Errorf("role %q does not exist", cfg.Role), 25 | reason: "Could not use role", 26 | } 27 | } 28 | for _, msg := range roleSetup { 29 | content, err := loadMsg(msg) 30 | if err != nil { 31 | return modsError{ 32 | err: err, 33 | reason: "Could not use role", 34 | } 35 | } 36 | m.messages = append(m.messages, proto.Message{ 37 | Role: proto.RoleSystem, 38 | Content: content, 39 | }) 40 | } 41 | } 42 | 43 | if prefix := cfg.Prefix; prefix != "" { 44 | content = strings.TrimSpace(prefix + "\n\n" + content) 45 | } 46 | 47 | if !cfg.NoLimit && int64(len(content)) > mod.MaxChars { 48 | content = content[:mod.MaxChars] 49 | } 50 | 51 | if !cfg.NoCache && cfg.cacheReadFromID != "" { 52 | if err := m.cache.Read(cfg.cacheReadFromID, &m.messages); err != nil { 53 | return modsError{ 54 | err: err, 55 | reason: fmt.Sprintf( 56 | "There was a problem reading the cache. Use %s / %s to disable it.", 57 | m.Styles.InlineCode.Render("--no-cache"), 58 | m.Styles.InlineCode.Render("NO_CACHE"), 59 | ), 60 | } 61 | } 62 | } 63 | 64 | m.messages = append(m.messages, proto.Message{ 65 | Role: proto.RoleUser, 66 | Content: content, 67 | }) 68 | 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /internal/stream/stream.go: -------------------------------------------------------------------------------- 1 | // Package stream provides interfaces for streaming conversations. 2 | package stream 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | 8 | "github.com/charmbracelet/mods/internal/proto" 9 | ) 10 | 11 | // ErrNoContent happens when the client is returning no content. 12 | var ErrNoContent = errors.New("no content") 13 | 14 | // Client is a streaming client. 15 | type Client interface { 16 | Request(context.Context, proto.Request) Stream 17 | } 18 | 19 | // Stream is an ongoing stream. 20 | type Stream interface { 21 | // returns false when no more messages, caller should run [Stream.CallTools()] 22 | // once that happens, and then check for this again 23 | Next() bool 24 | 25 | // the current chunk 26 | // implementation should accumulate chunks into a message, and keep its 27 | // internal conversation state 28 | Current() (proto.Chunk, error) 29 | 30 | // closes the underlying stream 31 | Close() error 32 | 33 | // streaming error 34 | Err() error 35 | 36 | // the whole conversation 37 | Messages() []proto.Message 38 | 39 | // handles any pending tool calls 40 | CallTools() []proto.ToolCallStatus 41 | } 42 | 43 | // CallTool calls a tool using the provided data and caller, and returns the 44 | // resulting [proto.Message] and [proto.ToolCallStatus]. 45 | func CallTool( 46 | id, name string, 47 | data []byte, 48 | caller func(name string, data []byte) (string, error), 49 | ) (proto.Message, proto.ToolCallStatus) { 50 | content, err := caller(name, data) 51 | if content == "" && err != nil { 52 | content = err.Error() 53 | } 54 | return proto.Message{ 55 | Role: proto.RoleTool, 56 | Content: content, 57 | ToolCalls: []proto.ToolCall{ 58 | { 59 | ID: id, 60 | IsError: err != nil, 61 | Function: proto.Function{ 62 | Name: name, 63 | Arguments: data, 64 | }, 65 | }, 66 | }, 67 | }, 68 | proto.ToolCallStatus{ 69 | Name: name, 70 | Err: err, 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /internal/ollama/format.go: -------------------------------------------------------------------------------- 1 | package ollama 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | 8 | "github.com/charmbracelet/mods/internal/proto" 9 | "github.com/mark3labs/mcp-go/mcp" 10 | "github.com/ollama/ollama/api" 11 | ) 12 | 13 | func fromMCPTools(mcps map[string][]mcp.Tool) []api.Tool { 14 | var tools []api.Tool 15 | for name, serverTools := range mcps { 16 | for _, tool := range serverTools { 17 | t := api.Tool{ 18 | Type: "function", 19 | Items: nil, 20 | Function: api.ToolFunction{ 21 | Name: fmt.Sprintf("%s_%s", name, tool.Name), 22 | Description: tool.Description, 23 | }, 24 | } 25 | _ = json.Unmarshal(tool.RawInputSchema, &t.Function.Parameters) 26 | tools = append(tools, t) 27 | } 28 | } 29 | return tools 30 | } 31 | 32 | func fromProtoMessages(input []proto.Message) []api.Message { 33 | messages := make([]api.Message, 0, len(input)) 34 | for _, msg := range input { 35 | messages = append(messages, fromProtoMessage(msg)) 36 | } 37 | return messages 38 | } 39 | 40 | func fromProtoMessage(input proto.Message) api.Message { 41 | m := api.Message{ 42 | Content: input.Content, 43 | Role: input.Role, 44 | } 45 | for _, call := range input.ToolCalls { 46 | var args api.ToolCallFunctionArguments 47 | _ = json.Unmarshal(call.Function.Arguments, &args) 48 | idx, _ := strconv.Atoi(call.ID) 49 | m.ToolCalls = append(m.ToolCalls, api.ToolCall{ 50 | Function: api.ToolCallFunction{ 51 | Index: idx, 52 | Name: call.Function.Name, 53 | Arguments: args, 54 | }, 55 | }) 56 | } 57 | return m 58 | } 59 | 60 | func toProtoMessage(in api.Message) proto.Message { 61 | msg := proto.Message{ 62 | Role: in.Role, 63 | Content: in.Content, 64 | } 65 | for _, call := range in.ToolCalls { 66 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{ 67 | ID: strconv.Itoa(call.Function.Index), 68 | Function: proto.Function{ 69 | Arguments: []byte(call.Function.Arguments.String()), 70 | Name: call.Function.Name, 71 | }, 72 | }) 73 | } 74 | return msg 75 | } 76 | -------------------------------------------------------------------------------- /flag.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | "time" 7 | 8 | "github.com/caarlos0/duration" 9 | ) 10 | 11 | func newFlagParseError(err error) flagParseError { 12 | var reason, flag string 13 | s := err.Error() 14 | switch { 15 | case strings.HasPrefix(s, "flag needs an argument:"): 16 | reason = "Flag %s needs an argument." 17 | ps := strings.Split(s, "-") 18 | switch len(ps) { 19 | case 2: //nolint:mnd 20 | flag = "-" + ps[len(ps)-1] 21 | case 3: //nolint:mnd 22 | flag = "--" + ps[len(ps)-1] 23 | } 24 | case strings.HasPrefix(s, "unknown flag:"): 25 | reason = "Flag %s is missing." 26 | flag = strings.TrimPrefix(s, "unknown flag: ") 27 | case strings.HasPrefix(s, "unknown shorthand flag:"): 28 | reason = "Short flag %s is missing." 29 | re := regexp.MustCompile(`unknown shorthand flag: '.*' in (-\w)`) 30 | parts := re.FindStringSubmatch(s) 31 | if len(parts) > 1 { 32 | flag = parts[1] 33 | } 34 | case strings.HasPrefix(s, "invalid argument"): 35 | reason = "Flag %s have an invalid argument." 36 | re := regexp.MustCompile(`invalid argument ".*" for "(.*)" flag: .*`) 37 | parts := re.FindStringSubmatch(s) 38 | if len(parts) > 1 { 39 | flag = parts[1] 40 | } 41 | default: 42 | reason = s 43 | } 44 | return flagParseError{ 45 | err: err, 46 | reason: reason, 47 | flag: flag, 48 | } 49 | } 50 | 51 | type flagParseError struct { 52 | err error 53 | reason string 54 | flag string 55 | } 56 | 57 | func (f flagParseError) Error() string { 58 | return f.err.Error() 59 | } 60 | 61 | func (f flagParseError) ReasonFormat() string { 62 | return f.reason 63 | } 64 | 65 | func (f flagParseError) Flag() string { 66 | return f.flag 67 | } 68 | 69 | func newDurationFlag(val time.Duration, p *time.Duration) *durationFlag { 70 | *p = val 71 | return (*durationFlag)(p) 72 | } 73 | 74 | type durationFlag time.Duration 75 | 76 | func (d *durationFlag) Set(s string) error { 77 | v, err := duration.Parse(s) 78 | *d = durationFlag(v) 79 | //nolint: wrapcheck 80 | return err 81 | } 82 | 83 | func (d *durationFlag) String() string { 84 | return time.Duration(*d).String() 85 | } 86 | 87 | func (*durationFlag) Type() string { 88 | return "duration" 89 | } 90 | -------------------------------------------------------------------------------- /mods_errors.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/http" 7 | 8 | tea "github.com/charmbracelet/bubbletea" 9 | "github.com/openai/openai-go" 10 | ) 11 | 12 | func (m *Mods) handleRequestError(err error, mod Model, content string) tea.Msg { 13 | ae := &openai.Error{} 14 | if errors.As(err, &ae) { 15 | return m.handleAPIError(ae, mod, content) 16 | } 17 | return modsError{err, fmt.Sprintf( 18 | "There was a problem with the %s API request.", 19 | mod.API, 20 | )} 21 | } 22 | 23 | func (m *Mods) handleAPIError(err *openai.Error, mod Model, content string) tea.Msg { 24 | cfg := m.Config 25 | switch err.StatusCode { 26 | case http.StatusNotFound: 27 | if mod.Fallback != "" { 28 | m.Config.Model = mod.Fallback 29 | return m.retry(content, modsError{ 30 | err: err, 31 | reason: fmt.Sprintf("%s API server error.", mod.API), 32 | }) 33 | } 34 | return modsError{err: err, reason: fmt.Sprintf( 35 | "Missing model '%s' for API '%s'.", 36 | cfg.Model, 37 | cfg.API, 38 | )} 39 | case http.StatusBadRequest: 40 | if err.Code == "context_length_exceeded" { 41 | pe := modsError{err: err, reason: "Maximum prompt size exceeded."} 42 | if cfg.NoLimit { 43 | return pe 44 | } 45 | 46 | return m.retry(cutPrompt(err.Message, content), pe) 47 | } 48 | // bad request (do not retry) 49 | return modsError{err: err, reason: fmt.Sprintf("%s API request error.", mod.API)} 50 | case http.StatusUnauthorized: 51 | // invalid auth or key (do not retry) 52 | return modsError{err: err, reason: fmt.Sprintf("Invalid %s API key.", mod.API)} 53 | case http.StatusTooManyRequests: 54 | // rate limiting or engine overload (wait and retry) 55 | return m.retry(content, modsError{ 56 | err: err, reason: fmt.Sprintf("You’ve hit your %s API rate limit.", mod.API), 57 | }) 58 | case http.StatusInternalServerError: 59 | if mod.API == "openai" { 60 | return m.retry(content, modsError{err: err, reason: "OpenAI API server error."}) 61 | } 62 | return modsError{err: err, reason: fmt.Sprintf( 63 | "Error loading model '%s' for API '%s'.", 64 | mod.Name, 65 | mod.API, 66 | )} 67 | default: 68 | return m.retry(content, modsError{err: err, reason: "Unknown API error."}) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | func TestIsCompletionCmd(t *testing.T) { 9 | for args, is := range map[string]bool{ 10 | "": false, 11 | "something": false, 12 | "something something": false, 13 | "completion for my bash script how to": false, 14 | "completion bash how to": false, 15 | "completion": false, 16 | "completion -h": true, 17 | "completion --help": true, 18 | "completion help": true, 19 | "completion bash": true, 20 | "completion fish": true, 21 | "completion zsh": true, 22 | "completion powershell": true, 23 | "completion bash -h": true, 24 | "completion fish -h": true, 25 | "completion zsh -h": true, 26 | "completion powershell -h": true, 27 | "completion bash --help": true, 28 | "completion fish --help": true, 29 | "completion zsh --help": true, 30 | "completion powershell --help": true, 31 | "__complete": true, 32 | "__complete blah blah blah": true, 33 | } { 34 | t.Run(args, func(t *testing.T) { 35 | vargs := append([]string{"mods"}, strings.Fields(args)...) 36 | if b := isCompletionCmd(vargs); b != is { 37 | t.Errorf("%v: expected %v, got %v", vargs, is, b) 38 | } 39 | }) 40 | } 41 | } 42 | 43 | func TestIsManCmd(t *testing.T) { 44 | for args, is := range map[string]bool{ 45 | "": false, 46 | "something": false, 47 | "something something": false, 48 | "man is no more": false, 49 | "mans": false, 50 | "man foo": false, 51 | "man": true, 52 | "man -h": true, 53 | "man --help": true, 54 | } { 55 | t.Run(args, func(t *testing.T) { 56 | vargs := append([]string{"mods"}, strings.Fields(args)...) 57 | if b := isManCmd(vargs); b != is { 58 | t.Errorf("%v: expected %v, got %v", vargs, is, b) 59 | } 60 | }) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /internal/cache/convo.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | "errors" 7 | "fmt" 8 | "io" 9 | 10 | "github.com/charmbracelet/mods/internal/proto" 11 | ) 12 | 13 | // Conversations is the conversation cache. 14 | type Conversations struct { 15 | cache *Cache[[]proto.Message] 16 | } 17 | 18 | // NewConversations creates a new conversation cache. 19 | func NewConversations(dir string) (*Conversations, error) { 20 | cache, err := New[[]proto.Message](dir, ConversationCache) 21 | if err != nil { 22 | return nil, err 23 | } 24 | return &Conversations{ 25 | cache: cache, 26 | }, nil 27 | } 28 | 29 | func (c *Conversations) Read(id string, messages *[]proto.Message) error { 30 | return c.cache.Read(id, func(r io.Reader) error { 31 | return decode(r, messages) 32 | }) 33 | } 34 | 35 | func (c *Conversations) Write(id string, messages *[]proto.Message) error { 36 | return c.cache.Write(id, func(w io.Writer) error { 37 | return encode(w, messages) 38 | }) 39 | } 40 | 41 | // Delete a conversation. 42 | func (c *Conversations) Delete(id string) error { 43 | return c.cache.Delete(id) 44 | } 45 | 46 | func init() { 47 | gob.Register(errors.New("")) 48 | } 49 | 50 | func encode(w io.Writer, messages *[]proto.Message) error { 51 | if err := gob.NewEncoder(w).Encode(messages); err != nil { 52 | return fmt.Errorf("encode: %w", err) 53 | } 54 | return nil 55 | } 56 | 57 | // decode decodes the given reader using gob. 58 | // we use a teereader in case the user tries to read a message in the old 59 | // format (from before MCP), and if so convert between types to avoid encoding 60 | // errors. 61 | func decode(r io.Reader, messages *[]proto.Message) error { 62 | var tr bytes.Buffer 63 | if err1 := gob.NewDecoder(io.TeeReader(r, &tr)).Decode(messages); err1 != nil { 64 | var noCalls []noCallMessage 65 | if err2 := gob.NewDecoder(&tr).Decode(&noCalls); err2 != nil { 66 | return fmt.Errorf("decode: %w", err1) 67 | } 68 | for _, msg := range noCalls { 69 | *messages = append(*messages, proto.Message{ 70 | Role: msg.Role, 71 | Content: msg.Content, 72 | }) 73 | } 74 | } 75 | return nil 76 | } 77 | 78 | // noCallMessage compatibility with messages with no tool calls. 79 | type noCallMessage struct { 80 | Content string 81 | Role string 82 | } 83 | -------------------------------------------------------------------------------- /styles.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/charmbracelet/lipgloss" 8 | ) 9 | 10 | type styles struct { 11 | AppName, 12 | CliArgs, 13 | Comment, 14 | CyclingChars, 15 | ErrorHeader, 16 | ErrorDetails, 17 | ErrPadding, 18 | Flag, 19 | FlagComma, 20 | FlagDesc, 21 | InlineCode, 22 | Link, 23 | Pipe, 24 | Quote, 25 | ConversationList, 26 | SHA1, 27 | Timeago lipgloss.Style 28 | } 29 | 30 | func makeStyles(r *lipgloss.Renderer) (s styles) { 31 | const horizontalEdgePadding = 2 32 | s.AppName = r.NewStyle().Bold(true) 33 | s.CliArgs = r.NewStyle().Foreground(lipgloss.Color("#585858")) 34 | s.Comment = r.NewStyle().Foreground(lipgloss.Color("#757575")) 35 | s.CyclingChars = r.NewStyle().Foreground(lipgloss.Color("#FF87D7")) 36 | s.ErrorHeader = r.NewStyle().Foreground(lipgloss.Color("#F1F1F1")).Background(lipgloss.Color("#FF5F87")).Bold(true).Padding(0, 1).SetString("ERROR") 37 | s.ErrorDetails = s.Comment 38 | s.ErrPadding = r.NewStyle().Padding(0, horizontalEdgePadding) 39 | s.Flag = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#00B594", Dark: "#3EEFCF"}).Bold(true) 40 | s.FlagComma = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#5DD6C0", Dark: "#427C72"}).SetString(",") 41 | s.FlagDesc = s.Comment 42 | s.InlineCode = r.NewStyle().Foreground(lipgloss.Color("#FF5F87")).Background(lipgloss.Color("#3A3A3A")).Padding(0, 1) 43 | s.Link = r.NewStyle().Foreground(lipgloss.Color("#00AF87")).Underline(true) 44 | s.Quote = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#FF71D0", Dark: "#FF78D2"}) 45 | s.Pipe = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#8470FF", Dark: "#745CFF"}) 46 | s.ConversationList = r.NewStyle().Padding(0, 1) 47 | s.SHA1 = s.Flag 48 | s.Timeago = r.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#999", Dark: "#555"}) 49 | return s 50 | } 51 | 52 | // action messages 53 | 54 | const defaultAction = "WROTE" 55 | 56 | var outputHeader = lipgloss.NewStyle().Foreground(lipgloss.Color("#F1F1F1")).Background(lipgloss.Color("#6C50FF")).Bold(true).Padding(0, 1).MarginRight(1) 57 | 58 | func printConfirmation(action, content string) { 59 | if action == "" { 60 | action = defaultAction 61 | } 62 | outputHeader = outputHeader.SetString(strings.ToUpper(action)) 63 | fmt.Println(lipgloss.JoinHorizontal(lipgloss.Center, outputHeader.String(), content)) 64 | } 65 | -------------------------------------------------------------------------------- /internal/cache/cache.go: -------------------------------------------------------------------------------- 1 | // Package cache provides a simple in-file cache implementation. 2 | package cache 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | "io" 8 | "os" 9 | "path/filepath" 10 | ) 11 | 12 | // Type represents the type of cache being used. 13 | type Type string 14 | 15 | // Cache types for different purposes. 16 | const ( 17 | ConversationCache Type = "conversations" 18 | TemporaryCache Type = "temp" 19 | ) 20 | 21 | const cacheExt = ".gob" 22 | 23 | var errInvalidID = errors.New("invalid id") 24 | 25 | // Cache is a generic cache implementation that stores data in files. 26 | type Cache[T any] struct { 27 | baseDir string 28 | cType Type 29 | } 30 | 31 | // New creates a new cache instance with the specified base directory and cache type. 32 | func New[T any](baseDir string, cacheType Type) (*Cache[T], error) { 33 | dir := filepath.Join(baseDir, string(cacheType)) 34 | if err := os.MkdirAll(dir, os.ModePerm); err != nil { //nolint:gosec 35 | return nil, fmt.Errorf("create cache directory: %w", err) 36 | } 37 | return &Cache[T]{ 38 | baseDir: baseDir, 39 | cType: cacheType, 40 | }, nil 41 | } 42 | 43 | func (c *Cache[T]) dir() string { 44 | return filepath.Join(c.baseDir, string(c.cType)) 45 | } 46 | 47 | func (c *Cache[T]) Read(id string, readFn func(io.Reader) error) error { 48 | if id == "" { 49 | return fmt.Errorf("read: %w", errInvalidID) 50 | } 51 | file, err := os.Open(filepath.Join(c.dir(), id+cacheExt)) 52 | if err != nil { 53 | return fmt.Errorf("read: %w", err) 54 | } 55 | defer file.Close() //nolint:errcheck 56 | 57 | if err := readFn(file); err != nil { 58 | return fmt.Errorf("read: %w", err) 59 | } 60 | return nil 61 | } 62 | 63 | func (c *Cache[T]) Write(id string, writeFn func(io.Writer) error) error { 64 | if id == "" { 65 | return fmt.Errorf("write: %w", errInvalidID) 66 | } 67 | 68 | file, err := os.Create(filepath.Join(c.dir(), id+cacheExt)) 69 | if err != nil { 70 | return fmt.Errorf("write: %w", err) 71 | } 72 | defer file.Close() //nolint:errcheck 73 | 74 | if err := writeFn(file); err != nil { 75 | return fmt.Errorf("write: %w", err) 76 | } 77 | 78 | return nil 79 | } 80 | 81 | // Delete removes a cached item by its ID. 82 | func (c *Cache[T]) Delete(id string) error { 83 | if id == "" { 84 | return fmt.Errorf("delete: %w", errInvalidID) 85 | } 86 | if err := os.Remove(filepath.Join(c.dir(), id+cacheExt)); err != nil { 87 | return fmt.Errorf("delete: %w", err) 88 | } 89 | return nil 90 | } 91 | -------------------------------------------------------------------------------- /internal/proto/proto.go: -------------------------------------------------------------------------------- 1 | // Package proto shared protocol. 2 | package proto 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | "strings" 8 | 9 | "github.com/mark3labs/mcp-go/mcp" 10 | ) 11 | 12 | // Roles. 13 | const ( 14 | RoleSystem = "system" 15 | RoleUser = "user" 16 | RoleAssistant = "assistant" 17 | RoleTool = "tool" 18 | ) 19 | 20 | // Chunk is a streaming chunk of text. 21 | type Chunk struct { 22 | Content string 23 | } 24 | 25 | // ToolCallStatus is the status of a tool call. 26 | type ToolCallStatus struct { 27 | Name string 28 | Err error 29 | } 30 | 31 | func (c ToolCallStatus) String() string { 32 | var sb strings.Builder 33 | sb.WriteString(fmt.Sprintf("\n> Ran tool: `%s`\n", c.Name)) 34 | if c.Err != nil { 35 | sb.WriteString(">\n> *Failed*:\n> ```\n") 36 | for line := range strings.SplitSeq(c.Err.Error(), "\n") { 37 | sb.WriteString("> " + line) 38 | } 39 | sb.WriteString("\n> ```\n") 40 | } 41 | sb.WriteByte('\n') 42 | return sb.String() 43 | } 44 | 45 | // Message is a message in the conversation. 46 | type Message struct { 47 | Role string 48 | Content string 49 | ToolCalls []ToolCall 50 | } 51 | 52 | // ToolCall is a tool call in a message. 53 | type ToolCall struct { 54 | ID string 55 | Function Function 56 | IsError bool 57 | } 58 | 59 | // Function is the function signature of a tool call. 60 | type Function struct { 61 | Name string 62 | Arguments []byte 63 | } 64 | 65 | // Request is a chat request. 66 | type Request struct { 67 | Messages []Message 68 | API string 69 | Model string 70 | User string 71 | Tools map[string][]mcp.Tool 72 | Temperature *float64 73 | TopP *float64 74 | TopK *int64 75 | Stop []string 76 | MaxTokens *int64 77 | ResponseFormat *string 78 | ToolCaller func(name string, data []byte) (string, error) 79 | } 80 | 81 | // Conversation is a conversation. 82 | type Conversation []Message 83 | 84 | func (cc Conversation) String() string { 85 | var sb strings.Builder 86 | for _, msg := range cc { 87 | if msg.Content == "" { 88 | continue 89 | } 90 | switch msg.Role { 91 | case RoleSystem: 92 | sb.WriteString("**System**: ") 93 | case RoleUser: 94 | sb.WriteString("**User**: ") 95 | case RoleTool: 96 | for _, tool := range msg.ToolCalls { 97 | s := ToolCallStatus{ 98 | Name: tool.Function.Name, 99 | } 100 | if tool.IsError { 101 | s.Err = errors.New(msg.Content) 102 | } 103 | sb.WriteString(s.String()) 104 | } 105 | continue 106 | case RoleAssistant: 107 | sb.WriteString("**Assistant**: ") 108 | } 109 | sb.WriteString(msg.Content) 110 | sb.WriteString("\n\n") 111 | } 112 | return sb.String() 113 | } 114 | -------------------------------------------------------------------------------- /internal/google/http.go: -------------------------------------------------------------------------------- 1 | package google 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | ) 12 | 13 | type httpHeader http.Header 14 | 15 | // ErrTooManyEmptyStreamMessages represents an error when a stream has sent too many empty messages. 16 | var ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages") 17 | 18 | // Marshaller is an interface for marshalling values to bytes. 19 | type Marshaller interface { 20 | Marshal(value any) ([]byte, error) 21 | } 22 | 23 | // JSONMarshaller is a marshaller that marshals values to JSON. 24 | type JSONMarshaller struct{} 25 | 26 | // Marshal marshals a value to JSON. 27 | func (jm *JSONMarshaller) Marshal(value any) ([]byte, error) { 28 | result, err := json.Marshal(value) 29 | if err != nil { 30 | return result, fmt.Errorf("JSONMarshaller.Marshal: %w", err) 31 | } 32 | return result, nil 33 | } 34 | 35 | // HTTPRequestBuilder is an implementation of OllamaRequestBuilder that builds HTTP requests. 36 | type HTTPRequestBuilder struct { 37 | marshaller Marshaller 38 | } 39 | 40 | // Build builds an HTTP request. 41 | func (b *HTTPRequestBuilder) Build( 42 | ctx context.Context, 43 | method string, 44 | url string, 45 | body any, 46 | header http.Header, 47 | ) (req *http.Request, err error) { 48 | var bodyReader io.Reader 49 | if body != nil { 50 | if v, ok := body.(io.Reader); ok { 51 | bodyReader = v 52 | } else { 53 | var reqBytes []byte 54 | reqBytes, err = b.marshaller.Marshal(body) 55 | if err != nil { 56 | return 57 | } 58 | bodyReader = bytes.NewBuffer(reqBytes) 59 | } 60 | } 61 | req, err = http.NewRequestWithContext(ctx, method, url, bodyReader) 62 | if err != nil { 63 | return 64 | } 65 | if header != nil { 66 | req.Header = header 67 | } 68 | return 69 | } 70 | 71 | type requestOptions struct { 72 | body MessageCompletionRequest 73 | header http.Header 74 | } 75 | 76 | type requestOption func(*requestOptions) 77 | 78 | func withBody(body MessageCompletionRequest) requestOption { 79 | return func(args *requestOptions) { 80 | args.body = body 81 | } 82 | } 83 | 84 | // ErrorAccumulator is an interface for accumulating errors. 85 | type ErrorAccumulator interface { 86 | Write(p []byte) error 87 | Bytes() []byte 88 | } 89 | 90 | // Unmarshaler is an interface for unmarshalling bytes. 91 | type Unmarshaler interface { 92 | Unmarshal(data []byte, v any) error 93 | } 94 | 95 | func isFailureStatusCode(resp *http.Response) bool { 96 | return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest 97 | } 98 | 99 | // JSONUnmarshaler is an unmarshaler that unmarshals JSON data. 100 | type JSONUnmarshaler struct{} 101 | 102 | // Unmarshal unmarshals JSON data. 103 | func (jm *JSONUnmarshaler) Unmarshal(data []byte, v any) error { 104 | err := json.Unmarshal(data, v) 105 | if err != nil { 106 | return fmt.Errorf("JSONUnmarshaler.Unmarshal: %w", err) 107 | } 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /features.md: -------------------------------------------------------------------------------- 1 | # Mods Features 2 | 3 | ## Regular usage 4 | 5 | By default: 6 | 7 | - all messages go to `STDERR` 8 | - all prompts are saved with the first line of the prompt as the title 9 | - glamour is used by default if `STDOUT` is a TTY 10 | 11 | ### Basic 12 | 13 | The most basic usage is: 14 | 15 | ```bash 16 | mods 'first 2 primes' 17 | ``` 18 | 19 | ### Pipe from 20 | 21 | You can also pipe to it, in which case `STDIN` will not be a TTY: 22 | 23 | ```bash 24 | echo 'as json' | mods 'first 2 primes' 25 | ``` 26 | 27 | In this case, `mods` should read `STDIN` and append it to the prompt. 28 | 29 | ### Pipe to 30 | 31 | You may also pipe the output to another program, in which case `STDOUT` will not 32 | be a TTY: 33 | 34 | ```bash 35 | echo 'as json' | mods 'first 2 primes' | jq . 36 | ``` 37 | 38 | In this case, the "Generating" animation will go to `STDERR`, but the response 39 | will be streamed to `STDOUT`. 40 | 41 | ### Custom title 42 | 43 | You can set a custom title: 44 | 45 | ```bash 46 | mods --title='title' 'first 2 primes' 47 | ``` 48 | 49 | ### Continue latest 50 | 51 | You can continue the latest conversation and save it with a new title using 52 | `--continue=title`: 53 | 54 | ```bash 55 | mods 'first 2 primes' 56 | mods --continue='primes as json' 'format as json' 57 | ``` 58 | 59 | ### Untitled continue latest 60 | 61 | ```bash 62 | mods 'first 2 primes' 63 | mods --continue-last 'format as json' 64 | ``` 65 | 66 | ### Continue from specific conversation, save with a new title 67 | 68 | ```bash 69 | mods --title='naturals' 'first 5 natural numbers' 70 | mods --continue='naturals' --title='naturals.json' 'format as json' 71 | ``` 72 | 73 | ### Conversation branching 74 | 75 | You can use the `--continue` and `--title` to branch out conversations, for 76 | instance: 77 | 78 | ```bash 79 | mods --title='naturals' 'first 5 natural numbers' 80 | mods --continue='naturals' --title='naturals.json' 'format as json' 81 | mods --continue='naturals' --title='naturals.yaml' 'format as yaml' 82 | ``` 83 | 84 | With this you'll end up with 3 conversations: `naturals`, `naturals.json`, and 85 | `naturals.yaml`. 86 | 87 | ## List conversations 88 | 89 | You can list your previous conversations with: 90 | 91 | ```bash 92 | mods --list 93 | # or 94 | mods -l 95 | ``` 96 | 97 | ## Show a previous conversation 98 | 99 | You can also show a previous conversation by ID or title, e.g.: 100 | 101 | ```bash 102 | mods --show='naturals' 103 | mods -s='a2e2' 104 | ``` 105 | 106 | For titles, the match should be exact. 107 | For IDs, only the first 4 chars are needed. If it matches multiple 108 | conversations, you can add more chars until it matches a single one again. 109 | 110 | ## Delete a conversation 111 | 112 | You can also delete conversations by title or ID, same as `--show`, different 113 | flag: 114 | 115 | ```bash 116 | mods --delete='naturals' --delete='a2e2' 117 | ``` 118 | 119 | Keep in mind that these operations are not reversible. 120 | You can repeat the delete flag to delete multiple conversations at once. 121 | -------------------------------------------------------------------------------- /internal/cache/expiring.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path/filepath" 8 | "strconv" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | // ExpiringCache is a cache implementation that supports expiration of cached items. 14 | type ExpiringCache[T any] struct { 15 | cache *Cache[T] 16 | } 17 | 18 | // NewExpiring creates a new cache instance that supports item expiration. 19 | func NewExpiring[T any](path string) (*ExpiringCache[T], error) { 20 | cache, err := New[T](path, TemporaryCache) 21 | if err != nil { 22 | return nil, fmt.Errorf("create expiring cache: %w", err) 23 | } 24 | return &ExpiringCache[T]{cache: cache}, nil 25 | } 26 | 27 | func (c *ExpiringCache[T]) getCacheFilename(id string, expiresAt int64) string { 28 | return fmt.Sprintf("%s.%d", id, expiresAt) 29 | } 30 | 31 | func (c *ExpiringCache[T]) Read(id string, readFn func(io.Reader) error) error { 32 | pattern := fmt.Sprintf("%s.*", id) 33 | matches, err := filepath.Glob(filepath.Join(c.cache.dir(), pattern)) 34 | if err != nil { 35 | return fmt.Errorf("failed to read read expiring cache: %w", err) 36 | } 37 | 38 | if len(matches) == 0 { 39 | return fmt.Errorf("item not found") 40 | } 41 | 42 | filename := filepath.Base(matches[0]) 43 | parts := strings.Split(filename, ".") 44 | expectedFilenameParts := 2 // name and expiration timestamp 45 | 46 | if len(parts) != expectedFilenameParts { 47 | return fmt.Errorf("invalid cache filename") 48 | } 49 | 50 | expiresAt, err := strconv.ParseInt(parts[1], 10, 64) 51 | if err != nil { 52 | return fmt.Errorf("invalid expiration timestamp") 53 | } 54 | 55 | if expiresAt < time.Now().Unix() { 56 | if err := os.Remove(matches[0]); err != nil { 57 | return fmt.Errorf("failed to remove expired cache file: %w", err) 58 | } 59 | return os.ErrNotExist 60 | } 61 | 62 | file, err := os.Open(matches[0]) 63 | if err != nil { 64 | return fmt.Errorf("failed to open expiring cache file: %w", err) 65 | } 66 | defer func() { 67 | if cerr := file.Close(); cerr != nil { 68 | err = cerr 69 | } 70 | }() 71 | 72 | return readFn(file) 73 | } 74 | 75 | func (c *ExpiringCache[T]) Write(id string, expiresAt int64, writeFn func(io.Writer) error) error { 76 | pattern := fmt.Sprintf("%s.*", id) 77 | oldFiles, _ := filepath.Glob(filepath.Join(c.cache.dir(), pattern)) 78 | for _, file := range oldFiles { 79 | if err := os.Remove(file); err != nil { 80 | return fmt.Errorf("failed to remove old cache file: %w", err) 81 | } 82 | } 83 | 84 | filename := c.getCacheFilename(id, expiresAt) 85 | file, err := os.Create(filepath.Join(c.cache.dir(), filename)) 86 | if err != nil { 87 | return fmt.Errorf("failed to create expiring cache file: %w", err) 88 | } 89 | defer func() { 90 | if cerr := file.Close(); cerr != nil { 91 | err = cerr 92 | } 93 | }() 94 | 95 | return writeFn(file) 96 | } 97 | 98 | // Delete removes an expired cached item by its ID. 99 | func (c *ExpiringCache[T]) Delete(id string) error { 100 | pattern := fmt.Sprintf("%s.*", id) 101 | matches, err := filepath.Glob(filepath.Join(c.cache.dir(), pattern)) 102 | if err != nil { 103 | return fmt.Errorf("failed to delete expiring cache: %w", err) 104 | } 105 | 106 | for _, match := range matches { 107 | if err := os.Remove(match); err != nil { 108 | return fmt.Errorf("failed to delete expiring cache file: %w", err) 109 | } 110 | } 111 | 112 | return nil 113 | } 114 | -------------------------------------------------------------------------------- /internal/openai/format.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/charmbracelet/mods/internal/proto" 7 | "github.com/mark3labs/mcp-go/mcp" 8 | "github.com/openai/openai-go" 9 | "github.com/openai/openai-go/shared/constant" 10 | ) 11 | 12 | func fromMCPTools(mcps map[string][]mcp.Tool) []openai.ChatCompletionToolParam { 13 | var tools []openai.ChatCompletionToolParam 14 | for name, serverTools := range mcps { 15 | for _, tool := range serverTools { 16 | params := map[string]any{ 17 | "type": "object", 18 | "properties": tool.InputSchema.Properties, 19 | } 20 | if len(tool.InputSchema.Required) > 0 { 21 | params["required"] = tool.InputSchema.Required 22 | } 23 | 24 | tools = append(tools, openai.ChatCompletionToolParam{ 25 | Type: constant.Function("function"), 26 | Function: openai.FunctionDefinitionParam{ 27 | Name: fmt.Sprintf("%s_%s", name, tool.Name), 28 | Description: openai.String(tool.Description), 29 | Parameters: params, 30 | }, 31 | }) 32 | } 33 | } 34 | return tools 35 | } 36 | 37 | func fromProtoMessages(input []proto.Message) []openai.ChatCompletionMessageParamUnion { 38 | var messages []openai.ChatCompletionMessageParamUnion 39 | for _, msg := range input { 40 | switch msg.Role { 41 | case proto.RoleSystem: 42 | messages = append(messages, openai.SystemMessage(msg.Content)) 43 | case proto.RoleTool: 44 | for _, call := range msg.ToolCalls { 45 | messages = append(messages, openai.ToolMessage(msg.Content, call.ID)) 46 | break 47 | } 48 | case proto.RoleUser: 49 | messages = append(messages, openai.UserMessage(msg.Content)) 50 | case proto.RoleAssistant: 51 | m := openai.AssistantMessage(msg.Content) 52 | for _, tool := range msg.ToolCalls { 53 | m.OfAssistant.ToolCalls = append(m.OfAssistant.ToolCalls, openai.ChatCompletionMessageToolCallParam{ 54 | ID: tool.ID, 55 | Function: openai.ChatCompletionMessageToolCallFunctionParam{ 56 | Arguments: string(tool.Function.Arguments), 57 | Name: tool.Function.Name, 58 | }, 59 | }) 60 | } 61 | messages = append(messages, m) 62 | } 63 | } 64 | return messages 65 | } 66 | 67 | func toProtoMessage(in openai.ChatCompletionMessageParamUnion) proto.Message { 68 | msg := proto.Message{ 69 | Role: msgRole(in), 70 | } 71 | switch content := in.GetContent().AsAny().(type) { 72 | case *string: 73 | if content == nil || *content == "" { 74 | break 75 | } 76 | msg.Content = *content 77 | case *[]openai.ChatCompletionContentPartTextParam: 78 | if content == nil || len(*content) == 0 { 79 | break 80 | } 81 | for _, c := range *content { 82 | msg.Content += c.Text 83 | } 84 | } 85 | if msg.Role == proto.RoleAssistant { 86 | for _, call := range in.OfAssistant.ToolCalls { 87 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{ 88 | ID: call.ID, 89 | Function: proto.Function{ 90 | Name: call.Function.Name, 91 | Arguments: []byte(call.Function.Arguments), 92 | }, 93 | }) 94 | } 95 | } 96 | return msg 97 | } 98 | 99 | func msgRole(in openai.ChatCompletionMessageParamUnion) string { 100 | if in.OfSystem != nil { 101 | return proto.RoleSystem 102 | } 103 | if in.OfAssistant != nil { 104 | return proto.RoleAssistant 105 | } 106 | if in.OfUser != nil { 107 | return proto.RoleUser 108 | } 109 | if in.OfTool != nil { 110 | return proto.RoleTool 111 | } 112 | return "" 113 | } 114 | -------------------------------------------------------------------------------- /internal/anthropic/format.go: -------------------------------------------------------------------------------- 1 | package anthropic 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/anthropics/anthropic-sdk-go" 8 | "github.com/charmbracelet/mods/internal/proto" 9 | "github.com/mark3labs/mcp-go/mcp" 10 | ) 11 | 12 | func fromMCPTools(mcps map[string][]mcp.Tool) []anthropic.ToolUnionParam { 13 | var tools []anthropic.ToolUnionParam 14 | for name, serverTools := range mcps { 15 | for _, tool := range serverTools { 16 | tools = append(tools, anthropic.ToolUnionParam{ 17 | OfTool: &anthropic.ToolParam{ 18 | InputSchema: anthropic.ToolInputSchemaParam{ 19 | Properties: tool.InputSchema.Properties, 20 | }, 21 | Name: fmt.Sprintf("%s_%s", name, tool.Name), 22 | Description: anthropic.String(tool.Description), 23 | }, 24 | }) 25 | } 26 | } 27 | return tools 28 | } 29 | 30 | func fromProtoMessages(input []proto.Message) (system []anthropic.TextBlockParam, messages []anthropic.MessageParam) { 31 | for _, msg := range input { 32 | switch msg.Role { 33 | case proto.RoleSystem: 34 | // system is not a role in anthropic, must set it as the system part of the request. 35 | system = append(system, *anthropic.NewTextBlock(msg.Content).OfText) 36 | case proto.RoleTool: 37 | for _, call := range msg.ToolCalls { 38 | block := newToolResultBlock(call.ID, msg.Content, call.IsError) 39 | // tool is not a role in anthropic, must be a user message. 40 | messages = append(messages, anthropic.NewUserMessage(block)) 41 | break 42 | } 43 | case proto.RoleUser: 44 | block := anthropic.NewTextBlock(msg.Content) 45 | messages = append(messages, anthropic.NewUserMessage(block)) 46 | case proto.RoleAssistant: 47 | blocks := []anthropic.ContentBlockParamUnion{ 48 | anthropic.NewTextBlock(msg.Content), 49 | } 50 | for _, tool := range msg.ToolCalls { 51 | block := anthropic.ContentBlockParamUnion{ 52 | OfToolUse: &anthropic.ToolUseBlockParam{ 53 | ID: tool.ID, 54 | Name: tool.Function.Name, 55 | Input: json.RawMessage(tool.Function.Arguments), 56 | }, 57 | } 58 | blocks = append(blocks, block) 59 | } 60 | messages = append(messages, anthropic.NewAssistantMessage(blocks...)) 61 | } 62 | } 63 | return system, messages 64 | } 65 | 66 | func toProtoMessage(in anthropic.MessageParam) proto.Message { 67 | msg := proto.Message{ 68 | Role: string(in.Role), 69 | } 70 | 71 | for _, block := range in.Content { 72 | if txt := block.OfText; txt != nil { 73 | msg.Content += txt.Text 74 | } 75 | 76 | if call := block.OfToolResult; call != nil { 77 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{ 78 | ID: call.ToolUseID, 79 | IsError: call.IsError.Value, 80 | }) 81 | } 82 | 83 | if call := block.OfToolUse; call != nil { 84 | msg.ToolCalls = append(msg.ToolCalls, proto.ToolCall{ 85 | ID: call.ID, 86 | Function: proto.Function{ 87 | Name: call.Name, 88 | Arguments: call.Input.(json.RawMessage), 89 | }, 90 | }) 91 | } 92 | } 93 | 94 | return msg 95 | } 96 | 97 | // anthropic v1.5 removed this method, copied it back here so we don't need to 98 | // refactor too much. 99 | func newToolResultBlock(toolUseID string, content string, isError bool) anthropic.ContentBlockParamUnion { 100 | toolBlock := anthropic.ToolResultBlockParam{ 101 | ToolUseID: toolUseID, 102 | Content: []anthropic.ToolResultBlockParamContentUnion{ 103 | {OfText: &anthropic.TextBlockParam{Text: content}}, 104 | }, 105 | IsError: anthropic.Bool(isError), 106 | } 107 | return anthropic.ContentBlockParamUnion{OfToolResult: &toolBlock} 108 | } 109 | -------------------------------------------------------------------------------- /examples.md: -------------------------------------------------------------------------------- 1 | # Mods Examples 2 | 3 | ### Improve Your Code 4 | 5 | Piping source code to Mods and giving it an instruction on what to do with it 6 | gives you a lot of options for refactoring, enhancing or debugging code. 7 | 8 | `mods -f "what are your thoughts on improving this code?" < main.go | glow` 9 | 10 |

136 |
137 |
267 |
268 |
269 | Charm热爱开源 • Charm loves open source
270 |
--------------------------------------------------------------------------------
/internal/google/google.go:
--------------------------------------------------------------------------------
1 | // Package google implements [stream.Stream] for Google.
2 | package google
3 |
4 | import (
5 | "bufio"
6 | "bytes"
7 | "context"
8 | "encoding/json"
9 | "errors"
10 | "fmt"
11 | "io"
12 | "net/http"
13 |
14 | "github.com/charmbracelet/mods/internal/proto"
15 | "github.com/charmbracelet/mods/internal/stream"
16 | "github.com/openai/openai-go"
17 | )
18 |
19 | var _ stream.Client = &Client{}
20 |
21 | const emptyMessagesLimit uint = 300
22 |
23 | var (
24 | googleHeaderData = []byte("data: ")
25 | errorPrefix = []byte(`event: error`)
26 | )
27 |
28 | // Config represents the configuration for the Google API client.
29 | type Config struct {
30 | BaseURL string
31 | HTTPClient *http.Client
32 | ThinkingBudget int
33 | }
34 |
35 | // DefaultConfig returns the default configuration for the Google API client.
36 | func DefaultConfig(model, authToken string) Config {
37 | return Config{
38 | BaseURL: fmt.Sprintf("https://generativelanguage.googleapis.com/v1beta/models/%s:streamGenerateContent?alt=sse&key=%s", model, authToken),
39 | HTTPClient: &http.Client{},
40 | }
41 | }
42 |
43 | // Part is a datatype containing media that is part of a multi-part Content message.
44 | type Part struct {
45 | Text string `json:"text,omitempty"`
46 | }
47 |
48 | // Content is the base structured datatype containing multi-part content of a message.
49 | type Content struct {
50 | Parts []Part `json:"parts,omitempty"`
51 | Role string `json:"role,omitempty"`
52 | }
53 |
54 | // ThinkingConfig - for more details see https://ai.google.dev/gemini-api/docs/thinking#rest .
55 | type ThinkingConfig struct {
56 | ThinkingBudget int `json:"thinkingBudget,omitempty"`
57 | }
58 |
59 | // GenerationConfig are the options for model generation and outputs. Not all parameters are configurable for every model.
60 | type GenerationConfig struct {
61 | StopSequences []string `json:"stopSequences,omitempty"`
62 | ResponseMimeType string `json:"responseMimeType,omitempty"`
63 | CandidateCount uint `json:"candidateCount,omitempty"`
64 | MaxOutputTokens uint `json:"maxOutputTokens,omitempty"`
65 | Temperature float64 `json:"temperature,omitempty"`
66 | TopP float64 `json:"topP,omitempty"`
67 | TopK int64 `json:"topK,omitempty"`
68 | ThinkingConfig *ThinkingConfig `json:"thinkingConfig,omitempty"`
69 | }
70 |
71 | // MessageCompletionRequest represents the valid parameters and value options for the request.
72 | type MessageCompletionRequest struct {
73 | Contents []Content `json:"contents,omitempty"`
74 | GenerationConfig GenerationConfig `json:"generationConfig,omitempty"`
75 | }
76 |
77 | // RequestBuilder is an interface for building HTTP requests for the Google API.
78 | type RequestBuilder interface {
79 | Build(ctx context.Context, method, url string, body any, header http.Header) (*http.Request, error)
80 | }
81 |
82 | // NewRequestBuilder creates a new HTTPRequestBuilder.
83 | func NewRequestBuilder() *HTTPRequestBuilder {
84 | return &HTTPRequestBuilder{
85 | marshaller: &JSONMarshaller{},
86 | }
87 | }
88 |
89 | // Client is a client for the Google API.
90 | type Client struct {
91 | config Config
92 |
93 | requestBuilder RequestBuilder
94 | }
95 |
96 | // Request implements stream.Client.
97 | func (c *Client) Request(ctx context.Context, request proto.Request) stream.Stream {
98 | stream := new(Stream)
99 | body := MessageCompletionRequest{
100 | Contents: fromProtoMessages(request.Messages),
101 | GenerationConfig: GenerationConfig{
102 | ResponseMimeType: "",
103 | CandidateCount: 1,
104 | StopSequences: request.Stop,
105 | MaxOutputTokens: 4096,
106 | },
107 | }
108 |
109 | if request.Temperature != nil {
110 | body.GenerationConfig.Temperature = *request.Temperature
111 | }
112 | if request.TopP != nil {
113 | body.GenerationConfig.TopP = *request.TopP
114 | }
115 | if request.TopK != nil {
116 | body.GenerationConfig.TopK = *request.TopK
117 | }
118 |
119 | if request.MaxTokens != nil {
120 | body.GenerationConfig.MaxOutputTokens = uint(*request.MaxTokens) //nolint:gosec
121 | }
122 |
123 | if c.config.ThinkingBudget != 0 {
124 | body.GenerationConfig.ThinkingConfig = &ThinkingConfig{
125 | ThinkingBudget: c.config.ThinkingBudget,
126 | }
127 | }
128 |
129 | req, err := c.newRequest(ctx, http.MethodPost, c.config.BaseURL, withBody(body))
130 | if err != nil {
131 | stream.err = err
132 | return stream
133 | }
134 |
135 | stream, err = googleSendRequestStream(c, req)
136 | if err != nil {
137 | stream.err = err
138 | }
139 | return stream
140 | }
141 |
142 | // New creates a new Client with the given configuration.
143 | func New(config Config) *Client {
144 | return &Client{
145 | config: config,
146 | requestBuilder: NewRequestBuilder(),
147 | }
148 | }
149 |
150 | func (c *Client) newRequest(ctx context.Context, method, url string, setters ...requestOption) (*http.Request, error) {
151 | // Default Options
152 | args := &requestOptions{
153 | body: MessageCompletionRequest{},
154 | header: make(http.Header),
155 | }
156 | for _, setter := range setters {
157 | setter(args)
158 | }
159 | req, err := c.requestBuilder.Build(ctx, method, url, args.body, args.header)
160 | if err != nil {
161 | return new(http.Request), err
162 | }
163 | return req, nil
164 | }
165 |
166 | func (c *Client) handleErrorResp(resp *http.Response) error {
167 | // Print the response text
168 | var errRes openai.Error
169 | if err := json.NewDecoder(resp.Body).Decode(&errRes); err != nil {
170 | return &openai.Error{
171 | StatusCode: resp.StatusCode,
172 | Message: err.Error(),
173 | }
174 | }
175 | errRes.StatusCode = resp.StatusCode
176 | return &errRes
177 | }
178 |
179 | // Candidate represents a response candidate generated from the model.
180 | type Candidate struct {
181 | Content Content `json:"content,omitempty"`
182 | FinishReason string `json:"finishReason,omitempty"`
183 | TokenCount uint `json:"tokenCount,omitempty"`
184 | Index uint `json:"index,omitempty"`
185 | }
186 |
187 | // CompletionMessageResponse represents a response to an Google completion message.
188 | type CompletionMessageResponse struct {
189 | Candidates []Candidate `json:"candidates,omitempty"`
190 | }
191 |
192 | // Stream struct represents a stream of messages from the Google API.
193 | type Stream struct {
194 | isFinished bool
195 |
196 | reader *bufio.Reader
197 | response *http.Response
198 | err error
199 | unmarshaler Unmarshaler
200 |
201 | httpHeader
202 | }
203 |
204 | // CallTools implements stream.Stream.
205 | func (s *Stream) CallTools() []proto.ToolCallStatus {
206 | // No tool calls in Gemini/Google API yet.
207 | return nil
208 | }
209 |
210 | // Err implements stream.Stream.
211 | func (s *Stream) Err() error { return s.err }
212 |
213 | // Messages implements stream.Stream.
214 | func (s *Stream) Messages() []proto.Message {
215 | // Gemini does not support returning streamed messages after the fact.
216 | return nil
217 | }
218 |
219 | // Next implements stream.Stream.
220 | func (s *Stream) Next() bool {
221 | return !s.isFinished
222 | }
223 |
224 | // Close closes the stream.
225 | func (s *Stream) Close() error {
226 | return s.response.Body.Close() //nolint:wrapcheck
227 | }
228 |
229 | // Current implements stream.Stream.
230 | //
231 | //nolint:gocognit
232 | func (s *Stream) Current() (proto.Chunk, error) {
233 | var (
234 | emptyMessagesCount uint
235 | hasError bool
236 | )
237 |
238 | for {
239 | rawLine, readErr := s.reader.ReadBytes('\n')
240 | if readErr != nil {
241 | if errors.Is(readErr, io.EOF) {
242 | s.isFinished = true
243 | return proto.Chunk{}, stream.ErrNoContent // signals end of stream, not a real error
244 | }
245 | return proto.Chunk{}, fmt.Errorf("googleStreamReader.processLines: %w", readErr)
246 | }
247 |
248 | noSpaceLine := bytes.TrimSpace(rawLine)
249 |
250 | if bytes.HasPrefix(noSpaceLine, errorPrefix) {
251 | hasError = true
252 | // NOTE: Continue to the next event to get the error data.
253 | continue
254 | }
255 |
256 | if !bytes.HasPrefix(noSpaceLine, googleHeaderData) || hasError {
257 | if hasError {
258 | noSpaceLine = bytes.TrimPrefix(noSpaceLine, googleHeaderData)
259 | return proto.Chunk{}, fmt.Errorf("googleStreamReader.processLines: %s", noSpaceLine)
260 | }
261 | emptyMessagesCount++
262 | if emptyMessagesCount > emptyMessagesLimit {
263 | return proto.Chunk{}, ErrTooManyEmptyStreamMessages
264 | }
265 | continue
266 | }
267 |
268 | noPrefixLine := bytes.TrimPrefix(noSpaceLine, googleHeaderData)
269 |
270 | var chunk CompletionMessageResponse
271 | unmarshalErr := s.unmarshaler.Unmarshal(noPrefixLine, &chunk)
272 | if unmarshalErr != nil {
273 | return proto.Chunk{}, fmt.Errorf("googleStreamReader.processLines: %w", unmarshalErr)
274 | }
275 | if len(chunk.Candidates) == 0 {
276 | return proto.Chunk{}, stream.ErrNoContent
277 | }
278 | parts := chunk.Candidates[0].Content.Parts
279 | if len(parts) == 0 {
280 | return proto.Chunk{}, stream.ErrNoContent
281 | }
282 |
283 | return proto.Chunk{
284 | Content: chunk.Candidates[0].Content.Parts[0].Text,
285 | }, nil
286 | }
287 | }
288 |
289 | func googleSendRequestStream(client *Client, req *http.Request) (*Stream, error) {
290 | req.Header.Set("content-type", "application/json")
291 |
292 | resp, err := client.config.HTTPClient.Do(req) //nolint:bodyclose // body is closed in stream.Close()
293 | if err != nil {
294 | return new(Stream), err
295 | }
296 | if isFailureStatusCode(resp) {
297 | return new(Stream), client.handleErrorResp(resp)
298 | }
299 | return &Stream{
300 | reader: bufio.NewReader(resp.Body),
301 | response: resp,
302 | unmarshaler: &JSONUnmarshaler{},
303 | httpHeader: httpHeader(resp.Header),
304 | }, nil
305 | }
306 |
--------------------------------------------------------------------------------
/config.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | "text/template"
9 | "time"
10 |
11 | _ "embed"
12 |
13 | "github.com/adrg/xdg"
14 | "github.com/caarlos0/duration"
15 | "github.com/caarlos0/env/v9"
16 | "github.com/charmbracelet/x/exp/strings"
17 | "github.com/muesli/termenv"
18 | "github.com/spf13/cobra"
19 | flag "github.com/spf13/pflag"
20 | "gopkg.in/yaml.v3"
21 | )
22 |
23 | //go:embed config_template.yml
24 | var configTemplate string
25 |
26 | const (
27 | defaultMarkdownFormatText = "Format the response as markdown without enclosing backticks."
28 | defaultJSONFormatText = "Format the response as json without enclosing backticks."
29 | )
30 |
31 | var help = map[string]string{
32 | "api": "OpenAI compatible REST API (openai, localai, anthropic, ...)",
33 | "apis": "Aliases and endpoints for OpenAI compatible REST API",
34 | "http-proxy": "HTTP proxy to use for API requests",
35 | "model": "Default model (gpt-3.5-turbo, gpt-4, ggml-gpt4all-j...)",
36 | "ask-model": "Ask which model to use via interactive prompt",
37 | "max-input-chars": "Default character limit on input to model",
38 | "format": "Ask for the response to be formatted as markdown unless otherwise set",
39 | "format-text": "Text to append when using the -f flag",
40 | "role": "System role to use",
41 | "roles": "List of predefined system messages that can be used as roles",
42 | "list-roles": "List the roles defined in your configuration file",
43 | "prompt": "Include the prompt from the arguments and stdin, truncate stdin to specified number of lines",
44 | "prompt-args": "Include the prompt from the arguments in the response",
45 | "raw": "Render output as raw text when connected to a TTY",
46 | "quiet": "Quiet mode (hide the spinner while loading and stderr messages for success)",
47 | "help": "Show help and exit",
48 | "version": "Show version and exit",
49 | "max-retries": "Maximum number of times to retry API calls",
50 | "no-limit": "Turn off the client-side limit on the size of the input into the model",
51 | "word-wrap": "Wrap formatted output at specific width (default is 80)",
52 | "max-tokens": "Maximum number of tokens in response",
53 | "temp": "Temperature (randomness) of results, from 0.0 to 2.0, -1.0 to disable",
54 | "stop": "Up to 4 sequences where the API will stop generating further tokens",
55 | "topp": "TopP, an alternative to temperature that narrows response, from 0.0 to 1.0, -1.0 to disable",
56 | "topk": "TopK, only sample from the top K options for each subsequent token, -1 to disable",
57 | "fanciness": "Your desired level of fanciness",
58 | "status-text": "Text to show while generating",
59 | "settings": "Open settings in your $EDITOR",
60 | "dirs": "Print the directories in which mods store its data",
61 | "reset-settings": "Backup your old settings file and reset everything to the defaults",
62 | "continue": "Continue from the last response or a given save title",
63 | "continue-last": "Continue from the last response",
64 | "no-cache": "Disables caching of the prompt/response",
65 | "title": "Saves the current conversation with the given title",
66 | "list": "Lists saved conversations",
67 | "delete": "Deletes one or more saved conversations with the given titles or IDs",
68 | "delete-older-than": "Deletes all saved conversations older than the specified duration; valid values are " + strings.EnglishJoin(duration.ValidUnits(), true),
69 | "show": "Show a saved conversation with the given title or ID",
70 | "theme": "Theme to use in the forms; valid choices are charm, catppuccin, dracula, and base16",
71 | "show-last": "Show the last saved conversation",
72 | "editor": "Edit the prompt in your $EDITOR; only taken into account if no other args and if STDIN is a TTY",
73 | "mcp-servers": "MCP Servers configurations",
74 | "mcp-disable": "Disable specific MCP servers",
75 | "mcp-list": "List all available MCP servers",
76 | "mcp-list-tools": "List all available tools from enabled MCP servers",
77 | "mcp-timeout": "Timeout for MCP server calls, defaults to 15 seconds",
78 | }
79 |
80 | // Model represents the LLM model used in the API call.
81 | type Model struct {
82 | Name string
83 | API string
84 | MaxChars int64 `yaml:"max-input-chars"`
85 | Aliases []string `yaml:"aliases"`
86 | Fallback string `yaml:"fallback"`
87 | ThinkingBudget int `yaml:"thinking-budget,omitempty"`
88 | }
89 |
90 | // API represents an API endpoint and its models.
91 | type API struct {
92 | Name string
93 | APIKey string `yaml:"api-key"`
94 | APIKeyEnv string `yaml:"api-key-env"`
95 | APIKeyCmd string `yaml:"api-key-cmd"`
96 | Version string `yaml:"version"` // XXX: not used anywhere
97 | BaseURL string `yaml:"base-url"`
98 | Models map[string]Model `yaml:"models"`
99 | User string `yaml:"user"`
100 | }
101 |
102 | // APIs is a type alias to allow custom YAML decoding.
103 | type APIs []API
104 |
105 | // UnmarshalYAML implements sorted API YAML decoding.
106 | func (apis *APIs) UnmarshalYAML(node *yaml.Node) error {
107 | for i := 0; i < len(node.Content); i += 2 {
108 | var api API
109 | if err := node.Content[i+1].Decode(&api); err != nil {
110 | return fmt.Errorf("error decoding YAML file: %s", err)
111 | }
112 | api.Name = node.Content[i].Value
113 | *apis = append(*apis, api)
114 | }
115 | return nil
116 | }
117 |
118 | // FormatText is a map[format]formatting_text.
119 | type FormatText map[string]string
120 |
121 | // UnmarshalYAML conforms with yaml.Unmarshaler.
122 | func (ft *FormatText) UnmarshalYAML(unmarshal func(any) error) error {
123 | var text string
124 | if err := unmarshal(&text); err != nil {
125 | var formats map[string]string
126 | if err := unmarshal(&formats); err != nil {
127 | return err
128 | }
129 | *ft = (FormatText)(formats)
130 | return nil
131 | }
132 |
133 | *ft = map[string]string{
134 | "markdown": text,
135 | }
136 | return nil
137 | }
138 |
139 | // Config holds the main configuration and is mapped to the YAML settings file.
140 | type Config struct {
141 | API string `yaml:"default-api" env:"API"`
142 | Model string `yaml:"default-model" env:"MODEL"`
143 | Format bool `yaml:"format" env:"FORMAT"`
144 | FormatText FormatText `yaml:"format-text"`
145 | FormatAs string `yaml:"format-as" env:"FORMAT_AS"`
146 | Raw bool `yaml:"raw" env:"RAW"`
147 | Quiet bool `yaml:"quiet" env:"QUIET"`
148 | MaxTokens int64 `yaml:"max-tokens" env:"MAX_TOKENS"`
149 | MaxCompletionTokens int64 `yaml:"max-completion-tokens" env:"MAX_COMPLETION_TOKENS"`
150 | MaxInputChars int64 `yaml:"max-input-chars" env:"MAX_INPUT_CHARS"`
151 | Temperature float64 `yaml:"temp" env:"TEMP"`
152 | Stop []string `yaml:"stop" env:"STOP"`
153 | TopP float64 `yaml:"topp" env:"TOPP"`
154 | TopK int64 `yaml:"topk" env:"TOPK"`
155 | NoLimit bool `yaml:"no-limit" env:"NO_LIMIT"`
156 | CachePath string `yaml:"cache-path" env:"CACHE_PATH"`
157 | NoCache bool `yaml:"no-cache" env:"NO_CACHE"`
158 | IncludePromptArgs bool `yaml:"include-prompt-args" env:"INCLUDE_PROMPT_ARGS"`
159 | IncludePrompt int `yaml:"include-prompt" env:"INCLUDE_PROMPT"`
160 | MaxRetries int `yaml:"max-retries" env:"MAX_RETRIES"`
161 | WordWrap int `yaml:"word-wrap" env:"WORD_WRAP"`
162 | Fanciness uint `yaml:"fanciness" env:"FANCINESS"`
163 | StatusText string `yaml:"status-text" env:"STATUS_TEXT"`
164 | HTTPProxy string `yaml:"http-proxy" env:"HTTP_PROXY"`
165 | APIs APIs `yaml:"apis"`
166 | System string `yaml:"system"`
167 | Role string `yaml:"role" env:"ROLE"`
168 | AskModel bool
169 | Roles map[string][]string
170 | ShowHelp bool
171 | ResetSettings bool
172 | Prefix string
173 | Version bool
174 | Settings bool
175 | Dirs bool
176 | Theme string
177 | SettingsPath string
178 | ContinueLast bool
179 | Continue string
180 | Title string
181 | ShowLast bool
182 | Show string
183 | List bool
184 | ListRoles bool
185 | Delete []string
186 | DeleteOlderThan time.Duration
187 | User string
188 |
189 | MCPServers map[string]MCPServerConfig `yaml:"mcp-servers"`
190 | MCPList bool
191 | MCPListTools bool
192 | MCPDisable []string
193 | MCPTimeout time.Duration `yaml:"mcp-timeout" env:"MCP_TIMEOUT"`
194 |
195 | openEditor bool
196 | cacheReadFromID, cacheWriteToID, cacheWriteToTitle string
197 | }
198 |
199 | // MCPServerConfig holds configuration for an MCP server.
200 | type MCPServerConfig struct {
201 | Type string `yaml:"type"`
202 | Command string `yaml:"command"`
203 | Env []string `yaml:"env"`
204 | Args []string `yaml:"args"`
205 | URL string `yaml:"url"`
206 | }
207 |
208 | func ensureConfig() (Config, error) {
209 | var c Config
210 | sp, err := xdg.ConfigFile(filepath.Join("mods", "mods.yml"))
211 | if err != nil {
212 | return c, modsError{err, "Could not find settings path."}
213 | }
214 | c.SettingsPath = sp
215 |
216 | dir := filepath.Dir(sp)
217 | if dirErr := os.MkdirAll(dir, 0o700); dirErr != nil { //nolint:mnd
218 | return c, modsError{dirErr, "Could not create cache directory."}
219 | }
220 |
221 | if dirErr := writeConfigFile(sp); dirErr != nil {
222 | return c, dirErr
223 | }
224 | content, err := os.ReadFile(sp)
225 | if err != nil {
226 | return c, modsError{err, "Could not read settings file."}
227 | }
228 | if err := yaml.Unmarshal(content, &c); err != nil {
229 | return c, modsError{err, "Could not parse settings file."}
230 | }
231 |
232 | if err := env.ParseWithOptions(&c, env.Options{Prefix: "MODS_"}); err != nil {
233 | return c, modsError{err, "Could not parse environment into settings file."}
234 | }
235 |
236 | if c.CachePath == "" {
237 | c.CachePath = filepath.Join(xdg.DataHome, "mods")
238 | }
239 |
240 | if err := os.MkdirAll(
241 | filepath.Join(c.CachePath, "conversations"),
242 | 0o700,
243 | ); err != nil { //nolint:mnd
244 | return c, modsError{err, "Could not create cache directory."}
245 | }
246 |
247 | if c.WordWrap == 0 {
248 | c.WordWrap = 80
249 | }
250 |
251 | return c, nil
252 | }
253 |
254 | func writeConfigFile(path string) error {
255 | if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
256 | return createConfigFile(path)
257 | } else if err != nil {
258 | return modsError{err, "Could not stat path."}
259 | }
260 | return nil
261 | }
262 |
263 | func createConfigFile(path string) error {
264 | tmpl := template.Must(template.New("config").Parse(configTemplate))
265 |
266 | f, err := os.Create(path)
267 | if err != nil {
268 | return modsError{err, "Could not create configuration file."}
269 | }
270 | defer func() { _ = f.Close() }()
271 |
272 | m := struct {
273 | Config Config
274 | Help map[string]string
275 | }{
276 | Config: defaultConfig(),
277 | Help: help,
278 | }
279 | if err := tmpl.Execute(f, m); err != nil {
280 | return modsError{err, "Could not render template."}
281 | }
282 | return nil
283 | }
284 |
285 | func defaultConfig() Config {
286 | return Config{
287 | FormatAs: "markdown",
288 | FormatText: FormatText{
289 | "markdown": defaultMarkdownFormatText,
290 | "json": defaultJSONFormatText,
291 | },
292 | MCPTimeout: 15 * time.Second,
293 | }
294 | }
295 |
296 | func useLine() string {
297 | appName := filepath.Base(os.Args[0])
298 |
299 | if stdoutRenderer().ColorProfile() == termenv.TrueColor {
300 | appName = makeGradientText(stdoutStyles().AppName, appName)
301 | }
302 |
303 | return fmt.Sprintf(
304 | "%s %s",
305 | appName,
306 | stdoutStyles().CliArgs.Render("[OPTIONS] [PREFIX TERM]"),
307 | )
308 | }
309 |
310 | func usageFunc(cmd *cobra.Command) error {
311 | fmt.Printf(
312 | "Usage:\n %s\n\n",
313 | useLine(),
314 | )
315 | fmt.Println("Options:")
316 | cmd.Flags().VisitAll(func(f *flag.Flag) {
317 | if f.Hidden {
318 | return
319 | }
320 | if f.Shorthand == "" {
321 | fmt.Printf(
322 | " %-44s %s\n",
323 | stdoutStyles().Flag.Render("--"+f.Name),
324 | stdoutStyles().FlagDesc.Render(f.Usage),
325 | )
326 | } else {
327 | fmt.Printf(
328 | " %s%s %-40s %s\n",
329 | stdoutStyles().Flag.Render("-"+f.Shorthand),
330 | stdoutStyles().FlagComma,
331 | stdoutStyles().Flag.Render("--"+f.Name),
332 | stdoutStyles().FlagDesc.Render(f.Usage),
333 | )
334 | }
335 | })
336 | if cmd.HasExample() {
337 | fmt.Printf(
338 | "\nExample:\n %s\n %s\n",
339 | stdoutStyles().Comment.Render("# "+cmd.Example),
340 | cheapHighlighting(stdoutStyles(), examples[cmd.Example]),
341 | )
342 | }
343 |
344 | return nil
345 | }
346 |
--------------------------------------------------------------------------------
/config_template.yml:
--------------------------------------------------------------------------------
1 | # {{ index .Help "api" }}
2 | default-api: openai
3 | # {{ index .Help "model" }}
4 | default-model: gpt-4o
5 | # {{ index .Help "format-text" }}
6 | format-text:
7 | markdown: '{{ index .Config.FormatText "markdown" }}'
8 | json: '{{ index .Config.FormatText "json" }}'
9 | # {{ index .Help "mcp-servers" }}
10 | mcp-servers:
11 | # Example: GitHub MCP via Docker:
12 | # github:
13 | # command: docker
14 | # env:
15 | # - GITHUB_PERSONAL_ACCESS_TOKEN=xxxyyy
16 | # args:
17 | # - run
18 | # - "-i"
19 | # - "--rm"
20 | # - "-e"
21 | # - GITHUB_PERSONAL_ACCESS_TOKEN
22 | # - "ghcr.io/github/github-mcp-server"
23 | # {{ index .Help "mcp-timeout" }}
24 | mcp-timeout: 15s
25 | # {{ index .Help "roles" }}
26 | roles:
27 | "default": []
28 | # Example, a role called `shell`:
29 | # shell:
30 | # - you are a shell expert
31 | # - you do not explain anything
32 | # - you simply output one liners to solve the problems you're asked
33 | # - you do not provide any explanation whatsoever, ONLY the command
34 | # {{ index .Help "format" }}
35 | format: false
36 | # {{ index .Help "role" }}
37 | role: "default"
38 | # {{ index .Help "raw" }}
39 | raw: false
40 | # {{ index .Help "quiet" }}
41 | quiet: false
42 | # {{ index .Help "temp" }}
43 | temp: 1.0
44 | # {{ index .Help "topp" }}
45 | topp: 1.0
46 | # {{ index .Help "topk" }}
47 | topk: 50
48 | # {{ index .Help "no-limit" }}
49 | no-limit: false
50 | # {{ index .Help "word-wrap" }}
51 | word-wrap: 80
52 | # {{ index .Help "prompt-args" }}
53 | include-prompt-args: false
54 | # {{ index .Help "prompt" }}
55 | include-prompt: 0
56 | # {{ index .Help "max-retries" }}
57 | max-retries: 5
58 | # {{ index .Help "fanciness" }}
59 | fanciness: 10
60 | # {{ index .Help "status-text" }}
61 | status-text: Generating
62 | # {{ index .Help "theme" }}
63 | theme: charm
64 | # {{ index .Help "max-input-chars" }}
65 | max-input-chars: 12250
66 | # {{ index .Help "max-tokens" }}
67 | # max-tokens: 100
68 | # {{ index .Help "max-completion-tokens" }}
69 | max-completion-tokens: 100
70 | # {{ index .Help "apis" }}
71 | apis:
72 | openai:
73 | base-url: https://api.openai.com/v1
74 | api-key:
75 | api-key-env: OPENAI_API_KEY
76 | # api-key-cmd: rbw get -f OPENAI_API_KEY chat.openai.com
77 | models: # https://platform.openai.com/docs/models
78 | gpt-4o-mini:
79 | aliases: ["4o-mini"]
80 | max-input-chars: 392000
81 | fallback: gpt-4o
82 | # GPT-5 Series (Current Flagship)
83 | gpt-5:
84 | aliases: ["5", "gpt5", "gpt-5-thinking", "gpt5-thinking"]
85 | max-input-chars: 794000
86 | fallback: gpt-4o
87 | gpt-5-mini:
88 | aliases: ["5mini", "gpt5mini"]
89 | max-input-chars: 400000
90 | fallback: gpt-5
91 | gpt-5-nano:
92 | aliases: ["5nano", "gpt5nano"]
93 | max-input-chars: 200000
94 | fallback: gpt-5-mini
95 | gpt-5-codex:
96 | aliases: ["5codex", "gpt5codex", "codex"]
97 | max-input-chars: 400000
98 | fallback: gpt-5
99 | gpt-4o:
100 | aliases: ["4o"]
101 | max-input-chars: 392000
102 | fallback: gpt-4
103 | gpt-4:
104 | aliases: ["4"]
105 | max-input-chars: 24500
106 | fallback: gpt-3.5-turbo
107 | gpt-4-1106-preview:
108 | aliases: ["128k"]
109 | max-input-chars: 392000
110 | fallback: gpt-4
111 | gpt-4-32k:
112 | aliases: ["32k"]
113 | max-input-chars: 98000
114 | fallback: gpt-4
115 | gpt-3.5-turbo:
116 | aliases: ["35t"]
117 | max-input-chars: 12250
118 | fallback: gpt-3.5
119 | gpt-3.5-turbo-1106:
120 | aliases: ["35t-1106"]
121 | max-input-chars: 12250
122 | fallback: gpt-3.5-turbo
123 | gpt-3.5-turbo-16k:
124 | aliases: ["35t16k"]
125 | max-input-chars: 44500
126 | fallback: gpt-3.5
127 | gpt-3.5:
128 | aliases: ["35"]
129 | max-input-chars: 12250
130 | fallback:
131 | o1:
132 | aliases: ["o1"]
133 | max-input-chars: 200000
134 | o1-preview:
135 | aliases: ["o1-preview"]
136 | max-input-chars: 128000
137 | o1-mini:
138 | aliases: ["o1-mini"]
139 | max-input-chars: 128000
140 | # O3 Series (Advanced Reasoning Models)
141 | o3:
142 | aliases: ["o3"]
143 | max-input-chars: 794000
144 | fallback: o3-mini
145 | o3-pro:
146 | aliases: ["o3-pro", "o3pro"]
147 | max-input-chars: 794000
148 | fallback: o3
149 | o3-mini:
150 | aliases: ["o3m", "o3-mini"]
151 | max-input-chars: 200000
152 | fallback: o1-mini
153 | anthropic:
154 | base-url: https://api.anthropic.com/v1
155 | api-key:
156 | api-key-env: ANTHROPIC_API_KEY
157 | models: # https://docs.anthropic.com/en/docs/about-claude/models
158 | claude-sonnet-4-20250514:
159 | aliases: ["claude-sonnet-4", "sonnet-4"]
160 | max-input-chars: 680000
161 | claude-opus-4-1-20250805:
162 | aliases: ["claude-opus-4.1", "opus-4.1", "opus-4-1", "opus"]
163 | max-input-chars: 680000
164 | claude-3-7-sonnet-latest:
165 | aliases: ["claude3.7-sonnet", "claude-3-7-sonnet", "sonnet-3.7"]
166 | max-input-chars: 680000
167 | claude-3-7-sonnet-20250219:
168 | max-input-chars: 680000
169 | claude-3-5-haiku-20241022:
170 | aliases: ["claude3.5-haiku", "haiku"]
171 | max-input-chars: 680000
172 | cohere:
173 | base-url: https://api.cohere.com/v1
174 | models:
175 | command-r-plus:
176 | max-input-chars: 128000
177 | command-r:
178 | max-input-chars: 128000
179 | google:
180 | models: # https://ai.google.dev/gemini-api/docs/models/gemini
181 | gemini-1.5-pro-latest:
182 | aliases: ["gmp", "gemini", "gemini-1.5-pro"]
183 | max-input-chars: 392000
184 | gemini-1.5-flash-latest:
185 | aliases: ["gmf", "flash", "gemini-1.5-flash"]
186 | max-input-chars: 392000
187 | gemini-2.0-flash-001:
188 | aliases: ["gm2f", "flash-2", "gemini-2-flash"]
189 | max-input-chars: 4194304
190 | gemini-2.0-flash-lite:
191 | aliases: ["gm2fl", "flash-2-lite", "gemini-2-flash-lite"]
192 | max-input-chars: 4194304
193 |
194 | ollama:
195 | base-url: http://localhost:11434
196 | models: # https://ollama.com/library
197 | "llama3.2:3b":
198 | aliases: ["llama3.2"]
199 | max-input-chars: 650000
200 | "llama3.2:1b":
201 | aliases: ["llama3.2_1b"]
202 | max-input-chars: 650000
203 | "llama3:70b":
204 | aliases: ["llama3"]
205 | max-input-chars: 650000
206 | perplexity:
207 | base-url: https://api.perplexity.ai
208 | api-key:
209 | api-key-env: PERPLEXITY_API_KEY
210 | models: # https://docs.perplexity.ai/guides/model-cards
211 | llama-3.1-sonar-small-128k-online:
212 | aliases: ["llam31-small"]
213 | max-input-chars: 127072
214 | llama-3.1-sonar-large-128k-online:
215 | aliases: ["llam31-large"]
216 | max-input-chars: 127072
217 | llama-3.1-sonar-huge-128k-online:
218 | aliases: ["llam31-huge"]
219 | max-input-chars: 127072
220 |
221 | groq:
222 | base-url: https://api.groq.com/openai/v1
223 | api-key:
224 | api-key-env: GROQ_API_KEY
225 | models: # https://console.groq.com/docs/models
226 | # Production models
227 | gemma2-9b-it:
228 | aliases: ["gemma2", "gemma"]
229 | max-input-chars: 24500 # 8,192
230 | llama-3.3-70b-versatile:
231 | aliases: ["llama3.3", "llama3.3-70b", "llama3.3-versatile"]
232 | max-input-chars: 392000 # 128K
233 | max-completion-tokens: 98000 # 32,768
234 | llama-3.1-8b-instant:
235 | aliases: ["llama3.1-8b", "llama3.1-instant"]
236 | max-input-chars: 392000 # 128K
237 | max-completion-tokens: 24500 # 8,192
238 | llama-guard-3-8b:
239 | aliases: ["llama-guard"]
240 | max-input-chars: 24500 # 8,192
241 | llama3-70b-8192:
242 | aliases: ["llama3", "llama3-70b"]
243 | max-input-chars: 24500 # 8,192
244 | fallback: llama3-8b-8192
245 | llama3-8b-8192:
246 | aliases: ["llama3-8b"]
247 | max-input-chars: 24500 # 8,192
248 | mixtral-8x7b-32768:
249 | aliases: ["mixtral"]
250 | max-input-chars: 98000 # 32,768
251 | meta-llama/llama-4-scout-17b-16e-instruct:
252 | aliases: ["llama4-scout"]
253 | max-input-chars: 392000 # 128K
254 | meta-llama/llama-4-maverick-17b-128e-instruct:
255 | aliases: ["llama4", "llama4-maverick"]
256 | max-input-chars: 392000 # 128K
257 | # Preview models
258 | mistral-saba-24b:
259 | aliases: ["saba", "mistral-saba", "saba-24b"]
260 | max-input-chars: 98000 # 32K
261 | qwen-2.5-coder-32b:
262 | aliases: ["qwen-coder", "qwen2.5-coder", "qwen-2.5-coder"]
263 | max-input-chars: 392000 # 128K
264 | deepseek-r1-distill-qwen-32b:
265 | aliases: ["deepseek-r1", "r1-qwen", "deepseek-qwen"]
266 | max-input-chars: 392000 # 128K
267 | max-completion-tokens: 49152 # 16,384
268 | deepseek-r1-distill-llama-70b-specdec:
269 | aliases: ["deepseek-r1-specdec", "r1-llama-specdec"]
270 | max-input-chars: 392000 # 128K
271 | max-completion-tokens: 49152 # 16,384
272 | deepseek-r1-distill-llama-70b:
273 | aliases: ["deepseek-r1-llama", "r1-llama"]
274 | max-input-chars: 392000 # 128K
275 | llama-3.3-70b-specdec:
276 | aliases: ["llama3.3-specdec"]
277 | max-input-chars: 24500 # 8,192
278 | llama-3.2-1b-preview:
279 | aliases: ["llama3.2-1b"]
280 | max-input-chars: 392000 # 128K
281 | max-completion-tokens: 24500 # 8,192
282 | llama-3.2-3b-preview:
283 | aliases: ["llama3.2-3b"]
284 | max-input-chars: 392000 # 128K
285 | max-completion-tokens: 24500 # 8,192
286 | llama-3.2-11b-vision-preview:
287 | aliases: ["llama3.2-vision", "llama3.2-11b-vision"]
288 | max-input-chars: 392000 # 128K
289 | max-completion-tokens: 24500 # 8,192
290 | llama-3.2-90b-vision-preview:
291 | aliases: ["llama3.2-90b-vision"]
292 | max-input-chars: 392000 # 128K
293 | max-completion-tokens: 24500 # 8,192
294 |
295 | cerebras:
296 | base-url: https://api.cerebras.ai/v1
297 | api-key:
298 | api-key-env: CEREBRAS_API_KEY
299 | models: # https://inference-docs.cerebras.ai/introduction
300 | llama3.1-8b:
301 | aliases: ["llama3.1-8b-cerebras"]
302 | max-input-chars: 24500
303 | llama3.1-70b:
304 | aliases: ["llama3.1-cerebras", "llama3.1-70b-cerebras"]
305 | max-input-chars: 24500
306 |
307 | sambanova:
308 | base-url: https://api.sambanova.ai/v1
309 | api-key:
310 | api-key-env: SAMBANOVA_API_KEY
311 | models: # https://docs.sambanova.ai/cloud/docs/get-started/supported-models
312 | # Preview models
313 | DeepSeek-R1:
314 | aliases: ["deepseek-r1-sambanova", "deepseek-r1-preview"]
315 | max-input-chars: 24500 # 8k tokens
316 | # Production models
317 | DeepSeek-R1-Distill-Llama-70B:
318 | aliases: ["deepseek-r1-llama-sambanova", "deepseek-r1-distill"]
319 | max-input-chars: 98000 # 32k tokens
320 | Llama-3.1-Tulu-3-405B:
321 | aliases: ["llama3.1-tulu", "tulu-405b"]
322 | max-input-chars: 49000 # 16k tokens
323 | Meta-Llama-3.3-70B-Instruct:
324 | aliases: ["llama3.3-sambanova", "llama3.3-70b-sambanova"]
325 | max-input-chars: 392000 # 128k tokens
326 | Meta-Llama-3.2-3B-Instruct:
327 | aliases: ["llama3.2-3b-sambanova"]
328 | max-input-chars: 24500 # 8k tokens
329 | Meta-Llama-3.2-1B-Instruct:
330 | aliases: ["llama3.2-1b-sambanova"]
331 | max-input-chars: 49000 # 16k tokens
332 | Meta-Llama-3.1-405B-Instruct:
333 | aliases: ["llama3.1-405b-sambanova"]
334 | max-input-chars: 49000 # 16k tokens
335 | Meta-Llama-3.1-70B-Instruct:
336 | aliases: ["llama3.1-70b-sambanova"]
337 | max-input-chars: 392000 # 128k tokens
338 | Meta-Llama-3.1-8B-Instruct:
339 | aliases: ["llama3.1-8b-sambanova"]
340 | max-input-chars: 49000 # 16k tokens
341 | Meta-Llama-Guard-3-8B:
342 | aliases: ["llama-guard-sambanova"]
343 | max-input-chars: 24500 # 8k tokens
344 | Llama-3.2-90B-Vision-Instruct:
345 | aliases: ["llama3.2-vision-90b", "llama3.2-90b-vision-sambanova"]
346 | max-input-chars: 12250 # 4k tokens
347 | Llama-3.2-11B-Vision-Instruct:
348 | aliases: ["llama3.2-vision-11b", "llama3.2-11b-vision-sambanova"]
349 | max-input-chars: 12250 # 4k tokens
350 | Qwen2.5-72B-Instruct:
351 | aliases: ["qwen2.5-sambanova", "qwen2.5-72b"]
352 | max-input-chars: 49000 # 16k tokens
353 | Qwen2.5-Coder-32B-Instruct:
354 | aliases: ["qwen2.5-coder-sambanova", "qwen-coder-sambanova"]
355 | max-input-chars: 49000 # 16k tokens
356 | QwQ-32B-Preview:
357 | aliases: ["qwq-sambanova", "qwq-32b"]
358 | max-input-chars: 49000 # 16k tokens
359 |
360 | localai:
361 | # LocalAI setup instructions: https://github.com/go-skynet/LocalAI#example-use-gpt4all-j-model
362 | base-url: http://localhost:8080
363 | models:
364 | ggml-gpt4all-j:
365 | aliases: ["local", "4all"]
366 | max-input-chars: 12250
367 | fallback:
368 | azure:
369 | # Set to 'azure-ad' to use Active Directory
370 | # Azure OpenAI setup: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource
371 | base-url: https://YOUR_RESOURCE_NAME.openai.azure.com
372 | api-key:
373 | api-key-env: AZURE_OPENAI_KEY
374 | models:
375 | gpt-4:
376 | aliases: ["az4"]
377 | max-input-chars: 24500
378 | fallback: gpt-35-turbo
379 | gpt-35-turbo:
380 | aliases: ["az35t"]
381 | max-input-chars: 12250
382 | fallback: gpt-35
383 | gpt-35:
384 | aliases: ["az35"]
385 | max-input-chars: 12250
386 | fallback:
387 | o1-preview:
388 | aliases: ["o1-preview"]
389 | max-input-chars: 128000
390 | o1-mini:
391 | aliases: ["o1-mini"]
392 | max-input-chars: 128000
393 | runpod:
394 | # https://docs.runpod.io/serverless/workers/vllm/openai-compatibility
395 | base-url: https://api.runpod.ai/v2/${YOUR_ENDPOINT}/openai/v1
396 | api-key:
397 | api-key-env: RUNPOD_API_KEY
398 | models:
399 | openchat/openchat-3.5-1210:
400 | aliases: ["openchat"]
401 | max-input-chars: 8192
402 | mistral:
403 | base-url: https://api.mistral.ai/v1
404 | api-key:
405 | api-key-env: MISTRAL_API_KEY
406 | models: # https://docs.mistral.ai/getting-started/models/
407 | mistral-large-latest:
408 | aliases: ["mistral-large"]
409 | max-input-chars: 384000
410 | open-mistral-nemo:
411 | aliases: ["mistral-nemo"]
412 | max-input-chars: 384000
413 | # DeepSeek
414 | # https://api-docs.deepseek.com
415 | deepseek:
416 | base-url: https://api.deepseek.com/
417 | api-key:
418 | api-key-env: DEEPSEEK_API_KEY
419 | models:
420 | deepseek-chat:
421 | aliases: ["chat"]
422 | max-input-chars: 384000
423 | deepseek-reasoner:
424 | aliases: ["r1"]
425 | max-input-chars: 384000
426 | # GitHub Models
427 | # https://github.com/marketplace/models
428 | github-models:
429 | base-url: https://models.github.ai/inference
430 | api-key:
431 | api-key-env: GITHUB_PERSONAL_ACCESS_TOKEN
432 | models:
433 | openai/gpt-4.1:
434 | max-input-chars: 392000
435 | openai/o3-mini:
436 | max-input-chars: 392000
437 | openai/o4-mini:
438 | max-input-chars: 392000
439 | openai/text-embedding-3-large:
440 | max-input-chars: 392000
441 | openai/text-embedding-3-small:
442 | max-input-chars: 392000
443 | ai21-labs/AI21-Jamba-1.5-Large:
444 | max-input-chars: 392000
445 | ai21-labs/AI21-Jamba-1.5-Mini:
446 | max-input-chars: 392000
447 | cohere/cohere-command-a:
448 | max-input-chars: 392000
449 | cohere/Cohere-command-r:
450 | max-input-chars: 392000
451 | cohere/Cohere-command-r-08-2024:
452 | max-input-chars: 392000
453 | cohere/Cohere-command-r-plus:
454 | max-input-chars: 392000
455 | cohere/Cohere-command-r-plus-08-2024:
456 | max-input-chars: 392000
457 | cohere/Cohere-embed-v3-english:
458 | max-input-chars: 392000
459 | cohere/Cohere-embed-v3-multilingual:
460 | max-input-chars: 392000
461 | core42/jais-30b-chat:
462 | max-input-chars: 392000
463 | deepseek/DeepSeek-R1:
464 | max-input-chars: 392000
465 | deepseek/DeepSeek-V3-0324:
466 | max-input-chars: 392000
467 | meta/Llama-3.2-11B-Vision-Instruct:
468 | max-input-chars: 392000
469 | meta/Llama-3.2-90B-Vision-Instruct:
470 | max-input-chars: 392000
471 | meta/Llama-3.3-70B-Instruct:
472 | max-input-chars: 392000
473 | meta/Llama-4-Maverick-17B-128E-Instruct-FP8:
474 | max-input-chars: 392000
475 | meta/Llama-4-Scout-17B-16E-Instruct:
476 | max-input-chars: 392000
477 | meta/Meta-Llama-3.1-405B-Instruct:
478 | max-input-chars: 392000
479 | meta/Meta-Llama-3.1-70B-Instruct:
480 | max-input-chars: 392000
481 | meta/Meta-Llama-3.1-8B-Instruct:
482 | max-input-chars: 392000
483 | meta/Meta-Llama-3-70B-Instruct:
484 | max-input-chars: 392000
485 | meta/Meta-Llama-3-8B-Instruct:
486 | max-input-chars: 392000
487 | mistral-ai/Codestral-2501:
488 | max-input-chars: 392000
489 | mistral-ai/Ministral-3B:
490 | max-input-chars: 392000
491 | mistral-ai/Mistral-Large-2411:
492 | max-input-chars: 392000
493 | mistral-ai/mistral-medium-2505:
494 | max-input-chars: 392000
495 | mistral-ai/Mistral-Nemo:
496 | max-input-chars: 392000
497 | mistral-ai/mistral-small-2503:
498 | max-input-chars: 392000
499 | xai/grok-3:
500 | max-input-chars: 392000
501 | xai/grok-3-mini:
502 | max-input-chars: 392000
503 | microsoft/MAI-DS-R1:
504 | max-input-chars: 392000
505 | microsoft/Phi-3.5-mini-instruct:
506 | max-input-chars: 392000
507 | microsoft/Phi-3.5-MoE-instruct:
508 | max-input-chars: 392000
509 | microsoft/Phi-3.5-vision-instruct:
510 | max-input-chars: 392000
511 | microsoft/Phi-3-medium-128k-instruct:
512 | max-input-chars: 392000
513 | microsoft/Phi-3-medium-4k-instruct:
514 | max-input-chars: 392000
515 | microsoft/Phi-3-mini-128k-instruct:
516 | max-input-chars: 392000
517 | microsoft/Phi-3-mini-4k-instruct:
518 | max-input-chars: 392000
519 | microsoft/Phi-3-small-128k-instruct:
520 | max-input-chars: 392000
521 | microsoft/Phi-3-small-8k-instruct:
522 | max-input-chars: 392000
523 | microsoft/Phi-4:
524 | max-input-chars: 392000
525 | microsoft/Phi-4-mini-instruct:
526 | max-input-chars: 392000
527 | microsoft/Phi-4-mini-reasoning:
528 | max-input-chars: 392000
529 | microsoft/Phi-4-multimodal-instruct:
530 | max-input-chars: 392000
531 | microsoft/Phi-4-reasoning:
532 | max-input-chars: 392000
533 |
--------------------------------------------------------------------------------
/mods.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "maps"
10 | "math"
11 | "net/http"
12 | "net/url"
13 | "os"
14 | "os/exec"
15 | "regexp"
16 | "slices"
17 | "strconv"
18 | "strings"
19 | "sync"
20 | "time"
21 | "unicode"
22 |
23 | "github.com/caarlos0/go-shellwords"
24 | "github.com/charmbracelet/bubbles/viewport"
25 | tea "github.com/charmbracelet/bubbletea"
26 | "github.com/charmbracelet/glamour"
27 | "github.com/charmbracelet/lipgloss"
28 | "github.com/charmbracelet/mods/internal/anthropic"
29 | "github.com/charmbracelet/mods/internal/cache"
30 | "github.com/charmbracelet/mods/internal/cohere"
31 | "github.com/charmbracelet/mods/internal/google"
32 | "github.com/charmbracelet/mods/internal/ollama"
33 | "github.com/charmbracelet/mods/internal/openai"
34 | "github.com/charmbracelet/mods/internal/proto"
35 | "github.com/charmbracelet/mods/internal/stream"
36 | "github.com/charmbracelet/x/exp/ordered"
37 | )
38 |
39 | type state int
40 |
41 | const (
42 | startState state = iota
43 | configLoadedState
44 | requestState
45 | responseState
46 | doneState
47 | errorState
48 | )
49 |
50 | // Mods is the Bubble Tea model that manages reading stdin and querying the
51 | // OpenAI API.
52 | type Mods struct {
53 | Output string
54 | Input string
55 | Styles styles
56 | Error *modsError
57 | state state
58 | retries int
59 | renderer *lipgloss.Renderer
60 | glam *glamour.TermRenderer
61 | glamViewport viewport.Model
62 | glamOutput string
63 | glamHeight int
64 | messages []proto.Message
65 | cancelRequest []context.CancelFunc
66 | anim tea.Model
67 | width int
68 | height int
69 |
70 | db *convoDB
71 | cache *cache.Conversations
72 | Config *Config
73 |
74 | content []string
75 | contentMutex *sync.Mutex
76 |
77 | ctx context.Context
78 | }
79 |
80 | func newMods(
81 | ctx context.Context,
82 | r *lipgloss.Renderer,
83 | cfg *Config,
84 | db *convoDB,
85 | cache *cache.Conversations,
86 | ) *Mods {
87 | gr, _ := glamour.NewTermRenderer(
88 | glamour.WithEnvironmentConfig(),
89 | glamour.WithWordWrap(cfg.WordWrap),
90 | )
91 | vp := viewport.New(0, 0)
92 | vp.GotoBottom()
93 | return &Mods{
94 | Styles: makeStyles(r),
95 | glam: gr,
96 | state: startState,
97 | renderer: r,
98 | glamViewport: vp,
99 | contentMutex: &sync.Mutex{},
100 | db: db,
101 | cache: cache,
102 | Config: cfg,
103 | ctx: ctx,
104 | }
105 | }
106 |
107 | // completionInput is a tea.Msg that wraps the content read from stdin.
108 | type completionInput struct {
109 | content string
110 | }
111 |
112 | // completionOutput a tea.Msg that wraps the content returned from openai.
113 | type completionOutput struct {
114 | content string
115 | stream stream.Stream
116 | errh func(error) tea.Msg
117 | }
118 |
119 | // Init implements tea.Model.
120 | func (m *Mods) Init() tea.Cmd {
121 | return m.findCacheOpsDetails()
122 | }
123 |
124 | // Update implements tea.Model.
125 | func (m *Mods) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
126 | var cmds []tea.Cmd
127 | switch msg := msg.(type) {
128 | case cacheDetailsMsg:
129 | m.Config.cacheWriteToID = msg.WriteID
130 | m.Config.cacheWriteToTitle = msg.Title
131 | m.Config.cacheReadFromID = msg.ReadID
132 | m.Config.API = msg.API
133 | m.Config.Model = msg.Model
134 |
135 | if !m.Config.Quiet {
136 | m.anim = newAnim(m.Config.Fanciness, m.Config.StatusText, m.renderer, m.Styles)
137 | cmds = append(cmds, m.anim.Init())
138 | }
139 | m.state = configLoadedState
140 | cmds = append(cmds, m.readStdinCmd)
141 |
142 | case completionInput:
143 | if msg.content != "" {
144 | m.Input = removeWhitespace(msg.content)
145 | }
146 | if m.Input == "" && m.Config.Prefix == "" && m.Config.Show == "" && !m.Config.ShowLast {
147 | return m, m.quit
148 | }
149 | if m.Config.Dirs ||
150 | len(m.Config.Delete) > 0 ||
151 | m.Config.DeleteOlderThan != 0 ||
152 | m.Config.ShowHelp ||
153 | m.Config.List ||
154 | m.Config.ListRoles ||
155 | m.Config.Settings ||
156 | m.Config.ResetSettings {
157 | return m, m.quit
158 | }
159 |
160 | if m.Config.IncludePromptArgs {
161 | m.appendToOutput(m.Config.Prefix + "\n\n")
162 | }
163 |
164 | if m.Config.IncludePrompt > 0 {
165 | parts := strings.Split(m.Input, "\n")
166 | if len(parts) > m.Config.IncludePrompt {
167 | parts = parts[0:m.Config.IncludePrompt]
168 | }
169 | m.appendToOutput(strings.Join(parts, "\n") + "\n")
170 | }
171 | m.state = requestState
172 | cmds = append(cmds, m.startCompletionCmd(msg.content))
173 | case completionOutput:
174 | if msg.stream == nil {
175 | m.state = doneState
176 | return m, m.quit
177 | }
178 | if msg.content != "" {
179 | m.appendToOutput(msg.content)
180 | m.state = responseState
181 | }
182 | cmds = append(cmds, m.receiveCompletionStreamCmd(completionOutput{
183 | stream: msg.stream,
184 | errh: msg.errh,
185 | }))
186 | case modsError:
187 | m.Error = &msg
188 | m.state = errorState
189 | return m, m.quit
190 | case tea.WindowSizeMsg:
191 | m.width, m.height = msg.Width, msg.Height
192 | m.glamViewport.Width = m.width
193 | m.glamViewport.Height = m.height
194 | return m, nil
195 | case tea.KeyMsg:
196 | switch msg.String() {
197 | case "q", "ctrl+c":
198 | m.state = doneState
199 | return m, m.quit
200 | }
201 | }
202 | if !m.Config.Quiet && (m.state == configLoadedState || m.state == requestState) {
203 | var cmd tea.Cmd
204 | m.anim, cmd = m.anim.Update(msg)
205 | cmds = append(cmds, cmd)
206 | }
207 | if m.viewportNeeded() {
208 | // Only respond to keypresses when the viewport (i.e. the content) is
209 | // taller than the window.
210 | var cmd tea.Cmd
211 | m.glamViewport, cmd = m.glamViewport.Update(msg)
212 | cmds = append(cmds, cmd)
213 | }
214 | return m, tea.Batch(cmds...)
215 | }
216 |
217 | func (m Mods) viewportNeeded() bool {
218 | return m.glamHeight > m.height
219 | }
220 |
221 | // View implements tea.Model.
222 | func (m *Mods) View() string {
223 | //nolint:exhaustive
224 | switch m.state {
225 | case errorState:
226 | return ""
227 | case requestState:
228 | if !m.Config.Quiet {
229 | return m.anim.View()
230 | }
231 | case responseState:
232 | if !m.Config.Raw && isOutputTTY() {
233 | if m.viewportNeeded() {
234 | return m.glamViewport.View()
235 | }
236 | // We don't need the viewport yet.
237 | return m.glamOutput
238 | }
239 |
240 | if isOutputTTY() && !m.Config.Raw {
241 | return m.Output
242 | }
243 |
244 | m.contentMutex.Lock()
245 | for _, c := range m.content {
246 | fmt.Print(c)
247 | }
248 | m.content = []string{}
249 | m.contentMutex.Unlock()
250 | case doneState:
251 | if !isOutputTTY() {
252 | fmt.Printf("\n")
253 | }
254 | return ""
255 | }
256 | return ""
257 | }
258 |
259 | func (m *Mods) quit() tea.Msg {
260 | for _, cancel := range m.cancelRequest {
261 | cancel()
262 | }
263 | return tea.Quit()
264 | }
265 |
266 | func (m *Mods) retry(content string, err modsError) tea.Msg {
267 | m.retries++
268 | if m.retries >= m.Config.MaxRetries {
269 | return err
270 | }
271 | wait := time.Millisecond * 100 * time.Duration(math.Pow(2, float64(m.retries))) //nolint:mnd
272 | time.Sleep(wait)
273 | return completionInput{content}
274 | }
275 |
276 | func (m *Mods) startCompletionCmd(content string) tea.Cmd {
277 | if m.Config.Show != "" || m.Config.ShowLast {
278 | return m.readFromCache()
279 | }
280 |
281 | return func() tea.Msg {
282 | var mod Model
283 | var api API
284 | var ccfg openai.Config
285 | var accfg anthropic.Config
286 | var cccfg cohere.Config
287 | var occfg ollama.Config
288 | var gccfg google.Config
289 |
290 | cfg := m.Config
291 | api, mod, err := m.resolveModel(cfg)
292 | cfg.API = mod.API
293 | if err != nil {
294 | return err
295 | }
296 | if api.Name == "" {
297 | eps := make([]string, 0)
298 | for _, a := range cfg.APIs {
299 | eps = append(eps, m.Styles.InlineCode.Render(a.Name))
300 | }
301 | return modsError{
302 | err: newUserErrorf(
303 | "Your configured API endpoints are: %s",
304 | eps,
305 | ),
306 | reason: fmt.Sprintf(
307 | "The API endpoint %s is not configured.",
308 | m.Styles.InlineCode.Render(cfg.API),
309 | ),
310 | }
311 | }
312 |
313 | switch mod.API {
314 | case "ollama":
315 | occfg = ollama.DefaultConfig()
316 | if api.BaseURL != "" {
317 | occfg.BaseURL = api.BaseURL
318 | }
319 | case "anthropic":
320 | key, err := m.ensureKey(api, "ANTHROPIC_API_KEY", "https://console.anthropic.com/settings/keys")
321 | if err != nil {
322 | return modsError{err, "Anthropic authentication failed"}
323 | }
324 | accfg = anthropic.DefaultConfig(key)
325 | if api.BaseURL != "" {
326 | accfg.BaseURL = api.BaseURL
327 | }
328 | case "google":
329 | key, err := m.ensureKey(api, "GOOGLE_API_KEY", "https://aistudio.google.com/app/apikey")
330 | if err != nil {
331 | return modsError{err, "Google authentication failed"}
332 | }
333 | gccfg = google.DefaultConfig(mod.Name, key)
334 | gccfg.ThinkingBudget = mod.ThinkingBudget
335 | case "cohere":
336 | key, err := m.ensureKey(api, "COHERE_API_KEY", "https://dashboard.cohere.com/api-keys")
337 | if err != nil {
338 | return modsError{err, "Cohere authentication failed"}
339 | }
340 | cccfg = cohere.DefaultConfig(key)
341 | if api.BaseURL != "" {
342 | ccfg.BaseURL = api.BaseURL
343 | }
344 | case "azure", "azure-ad": //nolint:goconst
345 | key, err := m.ensureKey(api, "AZURE_OPENAI_KEY", "https://aka.ms/oai/access")
346 | if err != nil {
347 | return modsError{err, "Azure authentication failed"}
348 | }
349 | ccfg = openai.Config{
350 | AuthToken: key,
351 | BaseURL: api.BaseURL,
352 | }
353 | if mod.API == "azure-ad" {
354 | ccfg.APIType = "azure-ad"
355 | }
356 | if api.User != "" {
357 | cfg.User = api.User
358 | }
359 | default:
360 | key, err := m.ensureKey(api, "OPENAI_API_KEY", "https://platform.openai.com/account/api-keys")
361 | if err != nil {
362 | return modsError{err, "OpenAI authentication failed"}
363 | }
364 | ccfg = openai.Config{
365 | AuthToken: key,
366 | BaseURL: api.BaseURL,
367 | }
368 | }
369 |
370 | if cfg.HTTPProxy != "" {
371 | proxyURL, err := url.Parse(cfg.HTTPProxy)
372 | if err != nil {
373 | return modsError{err, "There was an error parsing your proxy URL."}
374 | }
375 | httpClient := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}}
376 | ccfg.HTTPClient = httpClient
377 | accfg.HTTPClient = httpClient
378 | cccfg.HTTPClient = httpClient
379 | occfg.HTTPClient = httpClient
380 | }
381 |
382 | if mod.MaxChars == 0 {
383 | mod.MaxChars = cfg.MaxInputChars
384 | }
385 |
386 | // Check if the model is an o1 model and unset the max_tokens parameter
387 | // accordingly, as it's unsupported by o1.
388 | // We do set max_completion_tokens instead, which is supported.
389 | // Release won't have a prefix with a dash, so just putting o1 for match.
390 | if strings.HasPrefix(mod.Name, "o1") {
391 | cfg.MaxTokens = 0
392 | }
393 |
394 | ctx, cancel := context.WithTimeout(m.ctx, config.MCPTimeout)
395 | m.cancelRequest = append(m.cancelRequest, cancel)
396 |
397 | tools, err := mcpTools(ctx)
398 | if err != nil {
399 | return err
400 | }
401 |
402 | if err := m.setupStreamContext(content, mod); err != nil {
403 | return err
404 | }
405 |
406 | request := proto.Request{
407 | Messages: m.messages,
408 | API: mod.API,
409 | Model: mod.Name,
410 | User: cfg.User,
411 | Temperature: ptrOrNil(cfg.Temperature),
412 | TopP: ptrOrNil(cfg.TopP),
413 | TopK: ptrOrNil(cfg.TopK),
414 | Stop: cfg.Stop,
415 | Tools: tools,
416 | ToolCaller: func(name string, data []byte) (string, error) {
417 | ctx, cancel := context.WithTimeout(m.ctx, config.MCPTimeout)
418 | m.cancelRequest = append(m.cancelRequest, cancel)
419 | return toolCall(ctx, name, data)
420 | },
421 | }
422 | if cfg.MaxTokens > 0 {
423 | request.MaxTokens = &cfg.MaxTokens
424 | }
425 |
426 | var client stream.Client
427 | switch mod.API {
428 | case "anthropic":
429 | client = anthropic.New(accfg)
430 | case "google":
431 | client = google.New(gccfg)
432 | case "cohere":
433 | client = cohere.New(cccfg)
434 | case "ollama":
435 | client, err = ollama.New(occfg)
436 | default:
437 | client = openai.New(ccfg)
438 | if cfg.Format && config.FormatAs == "json" {
439 | request.ResponseFormat = &config.FormatAs
440 | }
441 | }
442 | if err != nil {
443 | return modsError{err, "Could not setup client"}
444 | }
445 |
446 | stream := client.Request(m.ctx, request)
447 | return m.receiveCompletionStreamCmd(completionOutput{
448 | stream: stream,
449 | errh: func(err error) tea.Msg {
450 | return m.handleRequestError(err, mod, m.Input)
451 | },
452 | })()
453 | }
454 | }
455 |
456 | func (m Mods) ensureKey(api API, defaultEnv, docsURL string) (string, error) {
457 | key := api.APIKey
458 | if key == "" && api.APIKeyEnv != "" && api.APIKeyCmd == "" {
459 | key = os.Getenv(api.APIKeyEnv)
460 | }
461 | if key == "" && api.APIKeyCmd != "" {
462 | args, err := shellwords.Parse(api.APIKeyCmd)
463 | if err != nil {
464 | return "", modsError{err, "Failed to parse api-key-cmd"}
465 | }
466 | out, err := exec.Command(args[0], args[1:]...).CombinedOutput() //nolint:gosec
467 | if err != nil {
468 | return "", modsError{err, "Cannot exec api-key-cmd"}
469 | }
470 | key = strings.TrimSpace(string(out))
471 | }
472 | if key == "" {
473 | key = os.Getenv(defaultEnv)
474 | }
475 | if key != "" {
476 | return key, nil
477 | }
478 | return "", modsError{
479 | reason: fmt.Sprintf(
480 | "%[1]s required; set the environment variable %[1]s or update %[2]s through %[3]s.",
481 | m.Styles.InlineCode.Render(defaultEnv),
482 | m.Styles.InlineCode.Render("mods.yaml"),
483 | m.Styles.InlineCode.Render("mods --settings"),
484 | ),
485 | err: newUserErrorf(
486 | "You can grab one at %s",
487 | m.Styles.Link.Render(docsURL),
488 | ),
489 | }
490 | }
491 |
492 | func (m *Mods) receiveCompletionStreamCmd(msg completionOutput) tea.Cmd {
493 | return func() tea.Msg {
494 | if msg.stream.Next() {
495 | chunk, err := msg.stream.Current()
496 | if err != nil && !errors.Is(err, stream.ErrNoContent) {
497 | _ = msg.stream.Close()
498 | return msg.errh(err)
499 | }
500 | return completionOutput{
501 | content: chunk.Content,
502 | stream: msg.stream,
503 | errh: msg.errh,
504 | }
505 | }
506 |
507 | // stream is done, check for errors
508 | if err := msg.stream.Err(); err != nil {
509 | return msg.errh(err)
510 | }
511 |
512 | results := msg.stream.CallTools()
513 | toolMsg := completionOutput{
514 | stream: msg.stream,
515 | errh: msg.errh,
516 | }
517 | for _, call := range results {
518 | toolMsg.content += call.String()
519 | }
520 | if len(results) == 0 {
521 | m.messages = msg.stream.Messages()
522 | return completionOutput{
523 | errh: msg.errh,
524 | }
525 | }
526 | return toolMsg
527 | }
528 | }
529 |
530 | type cacheDetailsMsg struct {
531 | WriteID, Title, ReadID, API, Model string
532 | }
533 |
534 | func (m *Mods) findCacheOpsDetails() tea.Cmd {
535 | return func() tea.Msg {
536 | continueLast := m.Config.ContinueLast || (m.Config.Continue != "" && m.Config.Title == "")
537 | readID := ordered.First(m.Config.Continue, m.Config.Show)
538 | writeID := ordered.First(m.Config.Title, m.Config.Continue)
539 | title := writeID
540 | model := m.Config.Model
541 | api := m.Config.API
542 |
543 | if readID != "" || continueLast || m.Config.ShowLast {
544 | found, err := m.findReadID(readID)
545 | if err != nil {
546 | return modsError{
547 | err: err,
548 | reason: "Could not find the conversation.",
549 | }
550 | }
551 | if found != nil {
552 | readID = found.ID
553 | if found.Model != nil && found.API != nil {
554 | model = *found.Model
555 | api = *found.API
556 | }
557 | }
558 | }
559 |
560 | // if we are continuing last, update the existing conversation
561 | if continueLast {
562 | writeID = readID
563 | }
564 |
565 | if writeID == "" {
566 | writeID = newConversationID()
567 | }
568 |
569 | if !sha1reg.MatchString(writeID) {
570 | convo, err := m.db.Find(writeID)
571 | if err != nil {
572 | // its a new conversation with a title
573 | writeID = newConversationID()
574 | } else {
575 | writeID = convo.ID
576 | }
577 | }
578 |
579 | return cacheDetailsMsg{
580 | WriteID: writeID,
581 | Title: title,
582 | ReadID: readID,
583 | API: api,
584 | Model: model,
585 | }
586 | }
587 | }
588 |
589 | func (m *Mods) findReadID(in string) (*Conversation, error) {
590 | convo, err := m.db.Find(in)
591 | if err == nil {
592 | return convo, nil
593 | }
594 | if errors.Is(err, errNoMatches) && m.Config.Show == "" {
595 | convo, err := m.db.FindHEAD()
596 | if err != nil {
597 | return nil, err
598 | }
599 | return convo, nil
600 | }
601 | return nil, err
602 | }
603 |
604 | func (m *Mods) readStdinCmd() tea.Msg {
605 | if !isInputTTY() {
606 | reader := bufio.NewReader(os.Stdin)
607 | stdinBytes, err := io.ReadAll(reader)
608 | if err != nil {
609 | return modsError{err, "Unable to read stdin."}
610 | }
611 |
612 | return completionInput{increaseIndent(string(stdinBytes))}
613 | }
614 | return completionInput{""}
615 | }
616 |
617 | func (m *Mods) readFromCache() tea.Cmd {
618 | return func() tea.Msg {
619 | var messages []proto.Message
620 | if err := m.cache.Read(m.Config.cacheReadFromID, &messages); err != nil {
621 | return modsError{err, "There was an error loading the conversation."}
622 | }
623 |
624 | m.appendToOutput(proto.Conversation(messages).String())
625 | return completionOutput{
626 | errh: func(err error) tea.Msg {
627 | return modsError{err: err}
628 | },
629 | }
630 | }
631 | }
632 |
633 | const tabWidth = 4
634 |
635 | func (m *Mods) appendToOutput(s string) {
636 | m.Output += s
637 | if !isOutputTTY() || m.Config.Raw {
638 | m.contentMutex.Lock()
639 | m.content = append(m.content, s)
640 | m.contentMutex.Unlock()
641 | return
642 | }
643 |
644 | wasAtBottom := m.glamViewport.ScrollPercent() == 1.0
645 | oldHeight := m.glamHeight
646 | m.glamOutput, _ = m.glam.Render(m.Output)
647 | m.glamOutput = strings.TrimRightFunc(m.glamOutput, unicode.IsSpace)
648 | m.glamOutput = strings.ReplaceAll(m.glamOutput, "\t", strings.Repeat(" ", tabWidth))
649 | m.glamHeight = lipgloss.Height(m.glamOutput)
650 | m.glamOutput += "\n"
651 | truncatedGlamOutput := m.renderer.NewStyle().
652 | MaxWidth(m.width).
653 | Render(m.glamOutput)
654 | m.glamViewport.SetContent(truncatedGlamOutput)
655 | if oldHeight < m.glamHeight && wasAtBottom {
656 | // If the viewport's at the bottom and we've received a new
657 | // line of content, follow the output by auto scrolling to
658 | // the bottom.
659 | m.glamViewport.GotoBottom()
660 | }
661 | }
662 |
663 | // if the input is whitespace only, make it empty.
664 | func removeWhitespace(s string) string {
665 | if strings.TrimSpace(s) == "" {
666 | return ""
667 | }
668 | return s
669 | }
670 |
671 | var tokenErrRe = regexp.MustCompile(`This model's maximum context length is (\d+) tokens. However, your messages resulted in (\d+) tokens`)
672 |
673 | func cutPrompt(msg, prompt string) string {
674 | found := tokenErrRe.FindStringSubmatch(msg)
675 | if len(found) != 3 { //nolint:mnd
676 | return prompt
677 | }
678 |
679 | maxt, _ := strconv.Atoi(found[1])
680 | current, _ := strconv.Atoi(found[2])
681 |
682 | if maxt > current {
683 | return prompt
684 | }
685 |
686 | // 1 token =~ 4 chars
687 | // cut 10 extra chars 'just in case'
688 | reduceBy := 10 + (current-maxt)*4 //nolint:mnd
689 | if len(prompt) > reduceBy {
690 | return prompt[:len(prompt)-reduceBy]
691 | }
692 |
693 | return prompt
694 | }
695 |
696 | func increaseIndent(s string) string {
697 | lines := strings.Split(s, "\n")
698 | for i := range lines {
699 | lines[i] = "\t" + lines[i]
700 | }
701 | return strings.Join(lines, "\n")
702 | }
703 |
704 | func (m *Mods) resolveModel(cfg *Config) (API, Model, error) {
705 | for _, api := range cfg.APIs {
706 | if api.Name != cfg.API && cfg.API != "" {
707 | continue
708 | }
709 | for name, mod := range api.Models {
710 | if name == cfg.Model || slices.Contains(mod.Aliases, cfg.Model) {
711 | cfg.Model = name
712 | break
713 | }
714 | }
715 | mod, ok := api.Models[cfg.Model]
716 | if ok {
717 | mod.Name = cfg.Model
718 | mod.API = api.Name
719 | return api, mod, nil
720 | }
721 | if cfg.API != "" {
722 | return API{}, Model{}, modsError{
723 | err: newUserErrorf(
724 | "Available models are: %s",
725 | strings.Join(slices.Collect(maps.Keys(api.Models)), ", "),
726 | ),
727 | reason: fmt.Sprintf(
728 | "The API endpoint %s does not contain the model %s",
729 | m.Styles.InlineCode.Render(cfg.API),
730 | m.Styles.InlineCode.Render(cfg.Model),
731 | ),
732 | }
733 | }
734 | }
735 |
736 | return API{}, Model{}, modsError{
737 | reason: fmt.Sprintf(
738 | "Model %s is not in the settings file.",
739 | m.Styles.InlineCode.Render(cfg.Model),
740 | ),
741 | err: newUserErrorf(
742 | "Please specify an API endpoint with %s or configure the model in the settings: %s",
743 | m.Styles.InlineCode.Render("--api"),
744 | m.Styles.InlineCode.Render("mods --settings"),
745 | ),
746 | }
747 | }
748 |
749 | type number interface{ int64 | float64 }
750 |
751 | func ptrOrNil[T number](t T) *T {
752 | if t < 0 {
753 | return nil
754 | }
755 | return &t
756 | }
757 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
2 | filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
3 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
4 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
5 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
6 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
7 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
8 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
9 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
10 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
11 | github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
12 | github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
13 | github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
14 | github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
15 | github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
16 | github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
17 | github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
18 | github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
19 | github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
20 | github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
21 | github.com/anthropics/anthropic-sdk-go v1.19.0 h1:mO6E+ffSzLRvR/YUH9KJC0uGw0uV8GjISIuzem//3KE=
22 | github.com/anthropics/anthropic-sdk-go v1.19.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
23 | github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
24 | github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
25 | github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
26 | github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
27 | github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
28 | github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
29 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
30 | github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
31 | github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY=
32 | github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E=
33 | github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
34 | github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
35 | github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
36 | github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
37 | github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
38 | github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
39 | github.com/caarlos0/duration v0.0.0-20240108180406-5d492514f3c7 h1:kJP/C2eL9DCKrCOlX6lPVmAUAb6U4u9xllgws1kP9ds=
40 | github.com/caarlos0/duration v0.0.0-20240108180406-5d492514f3c7/go.mod h1:mSkwb/eZEwOJJJ4tqAKiuhLIPe0e9+FKhlU0oMCpbf8=
41 | github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc=
42 | github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
43 | github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8=
44 | github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw=
45 | github.com/caarlos0/timea.go v1.2.0 h1:JkjyWSUheN4nGO/OmYVGKbEv4ozHP/zuTZWD5Ih3Gog=
46 | github.com/caarlos0/timea.go v1.2.0/go.mod h1:p4uopjR7K+y0Oxh7j0vLh3vSo58jjzOgXHKcyKwQjuY=
47 | github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
48 | github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
49 | github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 h1:JFgG/xnwFfbezlUnFMJy0nusZvytYysV4SCS2cYbvws=
50 | github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7/go.mod h1:ISC1gtLcVilLOf23wvTfoQuYbW2q0JevFxPfUzZ9Ybw=
51 | github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
52 | github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
53 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
54 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
55 | github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY=
56 | github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk=
57 | github.com/charmbracelet/huh v0.8.0 h1:Xz/Pm2h64cXQZn/Jvele4J3r7DDiqFCNIVteYukxDvY=
58 | github.com/charmbracelet/huh v0.8.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
59 | github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE=
60 | github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA=
61 | github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ=
62 | github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
63 | github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
64 | github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
65 | github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
66 | github.com/charmbracelet/x/conpty v0.1.0/go.mod h1:rMFsDJoDwVmiYM10aD4bH2XiRgwI7NYJtQgl5yskjEQ=
67 | github.com/charmbracelet/x/editor v0.1.0 h1:p69/dpvlwRTs9uYiPeAWruwsHqTFzHhTvQOd/WVSX98=
68 | github.com/charmbracelet/x/editor v0.1.0/go.mod h1:oivrEbcP/AYt/Hpvk5pwDXXrQ933gQS6UzL6fxqAGSA=
69 | github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86 h1:JSt3B+U9iqk37QUU2Rvb6DSBYRLtWqFqfxf8l5hOZUA=
70 | github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86/go.mod h1:2P0UgXMEa6TsToMSuFqKFQR+fZTO9CNGUNokkPatT/0=
71 | github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
72 | github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
73 | github.com/charmbracelet/x/exp/ordered v0.1.0 h1:55/qLwjIh0gL0Vni+QAWk7T/qRVP6sBf+2agPBgnOFE=
74 | github.com/charmbracelet/x/exp/ordered v0.1.0/go.mod h1:5UHwmG+is5THxMyCJHNPCn2/ecI07aKNrW+LcResjJ8=
75 | github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf h1:rLG0Yb6MQSDKdB52aGX55JT1oi0P0Kuaj7wi1bLUpnI=
76 | github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU=
77 | github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 h1:qko3AQ4gK1MTS/de7F5hPGx6/k1u0w4TeYmBFwzYVP4=
78 | github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0/go.mod h1:pBhA0ybfXv6hDjQUZ7hk1lVxBiUbupdw5R31yPUViVQ=
79 | github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
80 | github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
81 | github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY=
82 | github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
83 | github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
84 | github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
85 | github.com/cohere-ai/cohere-go/v2 v2.16.1 h1:4yAPDJPKKgkkLpXseE9mujvezbs0WKQ01Y4sZVX9gRw=
86 | github.com/cohere-ai/cohere-go/v2 v2.16.1/go.mod h1:MuiJkCxlR18BDV2qQPbz2Yb/OCVphT1y6nD2zYaKeR0=
87 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
88 | github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
89 | github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
90 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
91 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
92 | github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo=
93 | github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
94 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
95 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
96 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
97 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
98 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
99 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
100 | github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
101 | github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
102 | github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
103 | github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
104 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
105 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
106 | github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
107 | github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
108 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
109 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
110 | github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
111 | github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
112 | github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
113 | github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
114 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
115 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
116 | github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
117 | github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
118 | github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
119 | github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
120 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
121 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
122 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
123 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
124 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
125 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
126 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
127 | github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
128 | github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
129 | github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
130 | github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
131 | github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
132 | github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
133 | github.com/mark3labs/mcp-go v0.43.2 h1:21PUSlWWiSbUPQwXIJ5WKlETixpFpq+WBpbMGDSVy/I=
134 | github.com/mark3labs/mcp-go v0.43.2/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw=
135 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
136 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
137 | github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
138 | github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
139 | github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
140 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
141 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
142 | github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
143 | github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
144 | github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
145 | github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
146 | github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
147 | github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
148 | github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
149 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
150 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
151 | github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
152 | github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
153 | github.com/muesli/mango v0.2.0 h1:iNNc0c5VLQ6fsMgAqGQofByNUBH2Q2nEbD6TaI+5yyQ=
154 | github.com/muesli/mango v0.2.0/go.mod h1:5XFpbC8jY5UUv89YQciiXNlbi+iJgt29VDC5xbzrLL4=
155 | github.com/muesli/mango-cobra v1.3.0 h1:vQy5GvPg3ndOSpduxutqFoINhWk3vD5K2dXo5E8pqec=
156 | github.com/muesli/mango-cobra v1.3.0/go.mod h1:Cj1ZrBu3806Qw7UjxnAUgE+7tllUBj1NCLQDwwGx19E=
157 | github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg=
158 | github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0=
159 | github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
160 | github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
161 | github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8=
162 | github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig=
163 | github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
164 | github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
165 | github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
166 | github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
167 | github.com/ollama/ollama v0.13.5 h1:ulttnWgeQrXc9jVsGReIP/9MCA+pF1XYTsdwiNMeZfk=
168 | github.com/ollama/ollama v0.13.5/go.mod h1:2VxohsKICsmUCrBjowf+luTXYiXn2Q70Cnvv5Urbzkw=
169 | github.com/openai/openai-go v1.12.0 h1:NBQCnXzqOTv5wsgNC36PrFEiskGfO5wccfCWDo9S1U0=
170 | github.com/openai/openai-go v1.12.0/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
171 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
172 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
173 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
174 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
175 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
176 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
177 | github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
178 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
179 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
180 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
181 | github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
182 | github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
183 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
184 | github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
185 | github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
186 | github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
187 | github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
188 | github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
189 | github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
190 | github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
191 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
192 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
193 | github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
194 | github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
195 | github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
196 | github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
197 | github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
198 | github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
199 | github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
200 | github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
201 | github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
202 | github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
203 | github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
204 | github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
205 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
206 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
207 | github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
208 | github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
209 | github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
210 | github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
211 | github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
212 | github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk=
213 | github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U=
214 | go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
215 | golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
216 | golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
217 | golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
218 | golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
219 | golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
220 | golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
221 | golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
222 | golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
223 | golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
224 | golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
225 | golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
226 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
227 | golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
228 | golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
229 | golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
230 | golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
231 | golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
232 | golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
233 | golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
234 | golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
235 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
236 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
237 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
238 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
239 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
240 | modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4=
241 | modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
242 | modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
243 | modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
244 | modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
245 | modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
246 | modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
247 | modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
248 | modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
249 | modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
250 | modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
251 | modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
252 | modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
253 | modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
254 | modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
255 | modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
256 | modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
257 | modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
258 | modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
259 | modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
260 | modernc.org/sqlite v1.41.0 h1:bJXddp4ZpsqMsNN1vS0jWo4IJTZzb8nWpcgvyCFG9Ck=
261 | modernc.org/sqlite v1.41.0/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
262 | modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
263 | modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
264 | modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
265 | modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
266 |
--------------------------------------------------------------------------------