├── LICENSE ├── example ├── config.json └── example.go ├── go.mod ├── go.sum ├── mcp ├── config.go ├── types.go ├── engine.go └── openai_provider.go └── README.md /LICENSE: -------------------------------------------------------------------------------- 1 | Placeholder for License text. Choose an appropriate open-source license (e.g., MIT, Apache 2.0). -------------------------------------------------------------------------------- /example/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "filesystem": { 4 | "command": "npx", 5 | "args": ["-y", "@modelcontextprotocol/server-filesystem", "./"], 6 | "transport": "stdio" 7 | }, 8 | "kubernetes": { 9 | "command": "npx", 10 | "args": [ 11 | "-y", 12 | "kubernetes-mcp-server@latest" 13 | ] 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/Chen-speculation/MCPKit 2 | 3 | go 1.23.7 4 | 5 | require ( 6 | github.com/mark3labs/mcp-go v0.23.1 7 | github.com/sashabaranov/go-openai v1.38.2 8 | github.com/sirupsen/logrus v1.9.3 9 | ) 10 | 11 | require ( 12 | github.com/google/uuid v1.6.0 // indirect 13 | github.com/spf13/cast v1.7.1 // indirect 14 | github.com/yosida95/uritemplate/v3 v3.0.2 // indirect 15 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect 16 | ) 17 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 5 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 6 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 7 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 8 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 9 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 10 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 11 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 12 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 13 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 14 | github.com/mark3labs/mcp-go v0.23.1 h1:RzTzZ5kJ+HxwnutKA4rll8N/pKV6Wh5dhCmiJUu5S9I= 15 | github.com/mark3labs/mcp-go v0.23.1/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= 16 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 17 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 18 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= 19 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 20 | github.com/sashabaranov/go-openai v1.38.2 h1:akrssjj+6DY3lWuDwHv6cBvJ8Z+FZDM9XEaaYFt0Auo= 21 | github.com/sashabaranov/go-openai v1.38.2/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= 22 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 23 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 24 | github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= 25 | github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 26 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 27 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 28 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 29 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 30 | github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= 31 | github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= 32 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= 33 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 34 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 35 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 36 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 37 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 38 | -------------------------------------------------------------------------------- /mcp/config.go: -------------------------------------------------------------------------------- 1 | package mcp 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | // rawServerConfig is used for initial JSON unmarshalling. 12 | // We use this intermediate step to determine the type before creating 13 | // the concrete ServerConfig implementation (ProcessServerConfig or SSEServerConfig). 14 | type rawServerConfig struct { 15 | // Common fields used for type determination or present in all types 16 | Command string `json:"command,omitempty"` 17 | URL string `json:"url,omitempty"` 18 | Transport string `json:"transport,omitempty"` 19 | 20 | // Capture the rest of the fields to unmarshal into the specific type later 21 | raw json.RawMessage 22 | } 23 | 24 | // UnmarshalJSON custom unmarshaler for rawServerConfig to capture the raw JSON. 25 | func (r *rawServerConfig) UnmarshalJSON(data []byte) error { 26 | // First, unmarshal into a temporary struct to get known fields 27 | type KnownFields struct { 28 | Command string `json:"command,omitempty"` 29 | URL string `json:"url,omitempty"` 30 | Transport string `json:"transport,omitempty"` 31 | } 32 | var known KnownFields 33 | if err := json.Unmarshal(data, &known); err != nil { 34 | return fmt.Errorf("failed to unmarshal known fields: %w", err) 35 | } 36 | r.Command = known.Command 37 | r.URL = known.URL 38 | r.Transport = known.Transport 39 | 40 | // Store the original raw JSON 41 | r.raw = data 42 | return nil 43 | } 44 | 45 | // configFile structure mirrors the expected JSON input (file or string). 46 | type configFile struct { 47 | // Using map[string]json.RawMessage allows flexible parsing based on type 48 | MCPServers map[string]json.RawMessage `json:"mcpServers"` 49 | } 50 | 51 | // ParseMCPServersJSON parses MCP server configurations from a JSON string. 52 | // It determines the server type (process/stdio or sse) and creates the appropriate 53 | // ServerConfig struct (ProcessServerConfig or SSEServerConfig). 54 | // Returns a map of server names to their ServerConfig interface implementations. 55 | func ParseMCPServersJSON(configJSON string) (map[string]ServerConfig, error) { 56 | var cfgFile configFile 57 | if err := json.Unmarshal([]byte(configJSON), &cfgFile); err != nil { 58 | return nil, fmt.Errorf("failed to unmarshal MCP servers JSON config: %w", err) 59 | } 60 | 61 | if cfgFile.MCPServers == nil { 62 | log.Warn("No 'mcpServers' key found or it is null in the provided configuration JSON.") 63 | return make(map[string]ServerConfig), nil // Return empty map if none defined 64 | } 65 | 66 | engineServers := make(map[string]ServerConfig) 67 | for name, rawCfg := range cfgFile.MCPServers { 68 | // Determine the transport type explicitly or implicitly 69 | var knownFields struct { 70 | Command string `json:"command,omitempty"` 71 | URL string `json:"url,omitempty"` 72 | Transport string `json:"transport,omitempty"` 73 | } 74 | if err := json.Unmarshal(rawCfg, &knownFields); err != nil { 75 | log.Warnf("Skipping MCP server '%s': Failed to unmarshal basic fields: %v", name, err) 76 | continue 77 | } 78 | 79 | transportType := knownFields.Transport 80 | if transportType == "" { 81 | if knownFields.Command != "" { 82 | transportType = "process" // Default to process if command exists 83 | log.Debugf("MCP server '%s' has no explicit transport, defaulting to 'process' due to 'command' field.", name) 84 | } else if knownFields.URL != "" { 85 | transportType = "sse" // Default to sse if url exists and command doesn't 86 | log.Debugf("MCP server '%s' has no explicit transport, defaulting to 'sse' due to 'url' field.", name) 87 | } else { 88 | log.Warnf("Skipping MCP server '%s': Cannot determine transport type (missing 'transport', 'command', or 'url').", name) 89 | continue 90 | } 91 | } 92 | 93 | // Unmarshal into the specific config type based on transport 94 | var serverConf ServerConfig 95 | var unmarshalErr error 96 | switch transportType { 97 | case "stdio", "process": 98 | var procConf ProcessServerConfig 99 | unmarshalErr = json.Unmarshal(rawCfg, &procConf) 100 | if unmarshalErr == nil { 101 | if procConf.Command == "" { 102 | log.Warnf("Skipping MCP server '%s': 'command' is required for stdio/process transport but is empty.", name) 103 | continue 104 | } 105 | procConf.Name = name // Inject the logical name 106 | procConf.Type = "process" // Normalize type 107 | serverConf = &procConf 108 | log.Debugf("Parsed stdio/process server config for '%s'", name) 109 | } 110 | 111 | case "sse": 112 | var sseConf SSEServerConfig 113 | unmarshalErr = json.Unmarshal(rawCfg, &sseConf) 114 | if unmarshalErr == nil { 115 | if sseConf.URL == "" { 116 | log.Warnf("Skipping MCP server '%s': 'url' is required for sse transport but is empty.", name) 117 | continue 118 | } 119 | sseConf.Name = name // Inject the logical name 120 | sseConf.Type = "sse" // Normalize type 121 | serverConf = &sseConf 122 | log.Debugf("Parsed sse server config for '%s'", name) 123 | } 124 | 125 | default: 126 | log.Warnf("Skipping MCP server '%s': Unsupported transport type '%s'.", name, transportType) 127 | continue // Skip unsupported types 128 | } 129 | 130 | // Check for unmarshalling errors after the switch 131 | if unmarshalErr != nil { 132 | log.Warnf("Skipping MCP server '%s': Failed to unmarshal config for transport '%s': %v", name, transportType, unmarshalErr) 133 | continue 134 | } 135 | 136 | // Add successfully parsed config to the map 137 | if serverConf != nil { 138 | engineServers[name] = serverConf 139 | } 140 | } 141 | 142 | return engineServers, nil 143 | } 144 | 145 | // LoadConfigFromFile reads a JSON file from the given path and parses it. 146 | func LoadConfigFromFile(filePath string) (map[string]ServerConfig, error) { 147 | data, err := os.ReadFile(filePath) 148 | if err != nil { 149 | return nil, fmt.Errorf("failed to read MCP config file '%s': %w", filePath, err) 150 | } 151 | 152 | if len(data) == 0 { 153 | return nil, fmt.Errorf("MCP config file '%s' is empty", filePath) 154 | } 155 | 156 | return ParseMCPServersJSON(string(data)) 157 | } 158 | -------------------------------------------------------------------------------- /example/example.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | 12 | "github.com/google/uuid" 13 | log "github.com/sirupsen/logrus" 14 | 15 | "github.com/Chen-speculation/MCPKit/mcp" // Import the new local MCP library using correct module path 16 | ) 17 | 18 | func main() { 19 | // --- Configuration Flags --- 20 | openaiAPIKey := flag.String("openai-key", os.Getenv("OPENAI_API_KEY"), "OpenAI API Key (or env var OPENAI_API_KEY)") 21 | openaiBaseURL := flag.String("openai-url", os.Getenv("OPENAI_BASE_URL"), "Optional: OpenAI compatible Base URL (or env var OPENAI_BASE_URL)") 22 | modelName := flag.String("model", "gpt-4o", "LLM model name") 23 | mcpConfigPath := flag.String("mcp-config", "./config.json", "Path to MCP servers JSON configuration file") 24 | userPrompt := flag.String("prompt", "你是个助手,能调用工具,告诉我文件夹下有什么", "User prompt for the LLM") 25 | logLevel := flag.String("log-level", "info", "Log level (debug, info, warn, error)") 26 | 27 | flag.Parse() 28 | 29 | // --- Logging Setup --- 30 | level, err := log.ParseLevel(*logLevel) 31 | if err != nil { 32 | log.SetLevel(log.InfoLevel) 33 | log.Warnf("Invalid log level '%s', defaulting to info. Error: %v", *logLevel, err) 34 | } else { 35 | log.SetLevel(level) 36 | } 37 | log.SetFormatter(&log.TextFormatter{FullTimestamp: true}) 38 | log.Info("Starting AwesomeProj MCP Example...") 39 | 40 | // --- Input Validation --- 41 | if *openaiAPIKey == "" { 42 | log.Fatal("OpenAI API Key is required. Set via -openai-key flag or OPENAI_API_KEY env var.") 43 | return 44 | } 45 | if *userPrompt == "" { 46 | log.Fatal("User prompt is required. Set via -prompt flag.") 47 | return 48 | } 49 | 50 | // --- Load MCP Server Configuration --- 51 | log.Infof("Loading MCP server configuration from: %s", *mcpConfigPath) 52 | mcpServers, err := mcp.LoadConfigFromFile(*mcpConfigPath) 53 | if err != nil { 54 | log.Fatalf("Failed to load MCP configuration: %v", err) 55 | return 56 | } 57 | if len(mcpServers) == 0 { 58 | log.Warn("No MCP servers loaded from the configuration file.") 59 | } 60 | 61 | // --- Setup Context --- 62 | ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) 63 | defer stop() // Ensure stop is called eventually 64 | 65 | // --- Initialize MCP Engine --- 66 | log.Info("Initializing MCP Engine...") 67 | engineConfig := mcp.EngineConfig{ 68 | DefaultModel: *modelName, 69 | OpenAIAPIKey: *openaiAPIKey, 70 | OpenAIBaseURL: *openaiBaseURL, 71 | MCPServers: mcpServers, 72 | } 73 | 74 | engine, err := mcp.NewEngine(engineConfig) 75 | if err != nil { 76 | log.Fatalf("Failed to initialize MCP Engine: %v", err) 77 | return 78 | } 79 | // Ensure engine resources (like MCP clients) are closed on exit 80 | defer func() { 81 | log.Info("Shutting down MCP Engine...") 82 | if err := engine.Close(); err != nil { 83 | log.Errorf("Error shutting down MCP Engine: %v", err) 84 | } 85 | log.Info("MCP Engine shut down complete.") 86 | }() 87 | 88 | // --- Prepare Chat --- 89 | sessionID := uuid.NewString() 90 | log.Infof("Starting chat stream with Session ID: %s", sessionID) 91 | // For this example, history is empty. A real app would manage history. 92 | history := []mcp.Message{} 93 | outputChan := make(chan mcp.ChatEvent, 10) // Buffered channel 94 | 95 | // --- Start Chat Stream (Goroutine) --- 96 | go func() { 97 | // Ensure channel is closed when goroutine finishes or errors 98 | defer close(outputChan) 99 | err := engine.ChatStream(ctx, sessionID, *userPrompt, history, outputChan) 100 | if err != nil { 101 | // Don't log fatal from goroutine, send error event instead 102 | log.Errorf("Chat stream error: %v", err) 103 | // Try sending error event if channel is still open and context not done 104 | select { 105 | case outputChan <- mcp.ChatEvent{Type: "error", Error: err.Error()}: 106 | case <-ctx.Done(): // Check context cancellation 107 | log.Warn("Context cancelled while trying to send chat stream error event.") 108 | default: 109 | // Channel likely closed already 110 | } 111 | } 112 | }() 113 | 114 | // --- Process Chat Events --- 115 | log.Info("Waiting for chat events... Press Ctrl+C to exit.") 116 | keepRunning := true 117 | for keepRunning { 118 | select { 119 | case event, ok := <-outputChan: 120 | if !ok { 121 | log.Info("Chat stream channel closed.") 122 | keepRunning = false 123 | break // Exit select when channel closes 124 | } 125 | 126 | // Process the received event 127 | switch event.Type { 128 | case "session_id": 129 | log.Infof("[🆔] Session ID: %s", event.SessionID) 130 | case "text": 131 | fmt.Print(event.Content) // Print text chunks directly for streaming effect 132 | case "tool_call": 133 | fmt.Println() // Newline before tool call info 134 | argsJSON, _ := json.Marshal(event.ToolCall.Arguments) 135 | log.Infof("[⚙️] Tool Call Requested: ID=%s, Name=%s, Args=%s", 136 | event.ToolCall.ID, event.ToolCall.Name, string(argsJSON)) 137 | case "tool_result": 138 | status := "OK" 139 | if event.ToolResult.IsError { 140 | status = "ERROR" 141 | } 142 | // Truncate long results for cleaner logging 143 | maxLen := 200 144 | content := event.ToolResult.Content 145 | if len(content) > maxLen { 146 | content = content[:maxLen] + "..." 147 | } 148 | log.Infof("[🧰] Tool Result Received: ID=%s, Name=%s, Status=%s, Content=%s", 149 | event.ToolResult.CallID, event.ToolResult.Name, status, content) 150 | case "error": 151 | fmt.Println() // Ensure newline before error 152 | log.Errorf("[‼️] Error Event: %s", event.Error) 153 | // Potentially stop processing on critical errors? For example, keepRunning = false 154 | case "finish": 155 | fmt.Println() // Ensure newline after last text chunk 156 | log.Info("[✅] Chat stream finished.") 157 | // keepRunning = false // Often we want to exit after finish 158 | default: 159 | log.Warnf("Received unknown event type: %s", event.Type) 160 | } 161 | 162 | case <-ctx.Done(): 163 | log.Info("Shutdown signal received, exiting event loop.") 164 | keepRunning = false 165 | } 166 | } 167 | 168 | // Final check for context error after loop 169 | if ctx.Err() != nil { 170 | log.Warnf("Context error after event loop: %v", ctx.Err()) 171 | } 172 | 173 | log.Info("AwesomeProj MCP Example finished.") 174 | } 175 | -------------------------------------------------------------------------------- /mcp/types.go: -------------------------------------------------------------------------------- 1 | package mcp 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | // Engine defines the interface for interacting with a language model 8 | // and managing MCP tool interactions. 9 | type Engine interface { 10 | // ChatStream handles streaming chat, sending prompt and history to the LLM, 11 | // managing tool discovery via MCP, handling LLM tool calls, executing them via MCP, 12 | // and streaming back text, tool calls, tool results, and errors. 13 | ChatStream(ctx context.Context, sessionID string, prompt string, history []Message, outputChan chan<- ChatEvent) error 14 | 15 | // ListTools returns the combined list of tool definitions available from all configured MCP servers. 16 | ListTools() ([]ToolDefinition, error) 17 | 18 | // Name returns the identifier for the specific engine implementation (e.g., "openai"). 19 | Name() string 20 | 21 | // Close gracefully shuts down the engine and its associated MCP clients. 22 | Close() error 23 | } 24 | 25 | // Message represents a single message in a chat conversation. 26 | type Message struct { 27 | Role string `json:"role"` // Sender role: "user", "assistant", "system", "tool" 28 | Content string `json:"content"` // Text content of the message 29 | ToolCalls []ToolCall `json:"tool_calls,omitempty"` // Tool calls requested by the assistant 30 | ToolCallID string `json:"tool_call_id,omitempty"` // ID of the tool call this message is a result for (for role="tool") 31 | } 32 | 33 | // ToolCall represents a request by the LLM to call a specific tool. 34 | // This structure mirrors the essential parts needed from the LLM's perspective. 35 | type ToolCall struct { 36 | ID string `json:"id"` // Unique ID for this specific tool call instance 37 | Name string `json:"name"` // Name of the tool to be called (potentially prefixed with server name, e.g., "server__tool") 38 | Arguments map[string]interface{} `json:"arguments"` // Arguments for the tool, structured as a map 39 | } 40 | 41 | // ToolDefinition represents the structure defining a tool, understandable by the LLM. 42 | // Based on OpenAI's function definition structure. 43 | type ToolDefinition struct { 44 | Name string `json:"name"` // Tool name (potentially prefixed, e.g., "server__tool") 45 | Description string `json:"description"` // Description of what the tool does 46 | Schema Schema `json:"schema"` // Input parameter schema for the tool 47 | } 48 | 49 | // Schema defines the input parameters for a tool, following JSON Schema conventions. 50 | type Schema struct { 51 | Type string `json:"type"` // Typically "object" 52 | Properties map[string]interface{} `json:"properties,omitempty"` // Map of parameter names to their definitions 53 | Required []string `json:"required,omitempty"` // List of required parameter names 54 | } 55 | 56 | // ChatEvent represents an event occurring during a chat stream. 57 | type ChatEvent struct { 58 | Type string `json:"type"` // Type: "text", "tool_call", "tool_result", "session_id", "error", "finish" 59 | Content string `json:"content,omitempty"` // Text chunk (for type="text") 60 | ToolCall *ToolCall `json:"tool_call,omitempty"` // Details of the tool call requested by LLM (for type="tool_call") 61 | ToolResult *ToolResult `json:"tool_result,omitempty"` // Details of the result from an executed tool (for type="tool_result") 62 | SessionID string `json:"session_id,omitempty"` // Session ID for the chat (for type="session_id") 63 | Error string `json:"error,omitempty"` // Error message (for type="error") 64 | } 65 | 66 | // ToolResult represents the outcome of an executed tool call. 67 | type ToolResult struct { 68 | CallID string `json:"callId"` // ID of the corresponding ToolCall 69 | Name string `json:"name"` // Name of the tool that was called (e.g., "server__tool") 70 | Content string `json:"content"` // Result content (string representation) 71 | IsError bool `json:"isError"` // Flag indicating if the content represents an error message 72 | } 73 | 74 | // --- Configuration Types --- 75 | 76 | // EngineConfig holds the configuration for the MCP Engine. 77 | type EngineConfig struct { 78 | DefaultModel string // Default LLM model name (e.g., "gpt-4o") 79 | OpenAIAPIKey string // API Key for OpenAI 80 | OpenAIBaseURL string // Optional: Base URL for OpenAI compatible API 81 | MCPServers map[string]ServerConfig // Map of MCP server names to their configurations 82 | } 83 | 84 | // ServerConfig is an interface for different MCP server connection types. 85 | type ServerConfig interface { 86 | GetType() string 87 | GetName() string // Get the logical name assigned to this server config 88 | } 89 | 90 | // BaseServerConfig provides common fields for server configurations. 91 | type BaseServerConfig struct { 92 | Name string `json:"-"` // Logical name, injected during parsing 93 | Type string `json:"type,omitempty"` // Server type (e.g., "process", "sse") 94 | } 95 | 96 | func (c *BaseServerConfig) GetName() string { 97 | return c.Name 98 | } 99 | 100 | // ProcessServerConfig defines configuration for an MCP server connected via stdio. 101 | type ProcessServerConfig struct { 102 | BaseServerConfig 103 | Command string `json:"command"` // Command to execute 104 | Args []string `json:"args,omitempty"` // Arguments for the command 105 | WorkingDir string `json:"workingDir,omitempty"` // Working directory for the command 106 | Env []string `json:"env,omitempty"` // Additional environment variables 107 | } 108 | 109 | // GetType returns the server type ("process"). 110 | func (c *ProcessServerConfig) GetType() string { 111 | if c.Type == "" { 112 | return "process" 113 | } 114 | return c.Type 115 | } 116 | 117 | // SSEServerConfig defines configuration for an MCP server connected via Server-Sent Events (SSE). 118 | type SSEServerConfig struct { 119 | BaseServerConfig 120 | URL string `json:"url"` // URL of the SSE endpoint 121 | Headers map[string]string `json:"headers,omitempty"` // Optional headers (e.g., Authorization) 122 | } 123 | 124 | // GetType returns the server type ("sse"). 125 | func (c *SSEServerConfig) GetType() string { 126 | if c.Type == "" { 127 | return "sse" 128 | } 129 | return c.Type 130 | } 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | inspiration from https://github.com/mark3labs/mcphost 2 | 3 | **English** 4 | 5 | * **Project Goal**: Implement an "MCP Engine" library in Go. This library allows Go applications to interface with Large Language Models (LLMs) and dynamically utilize tools exposed by external MCP Servers (processes or services adhering to the Model Context Protocol). 6 | * **MCP Introduction**: The Model Context Protocol (MCP) aims to standardize how applications provide context (like tools and data sources) to Large Language Models (LLMs). It acts like a common interface, enabling LLMs to discover and utilize capabilities provided by different external systems. Read more at [https://github.com/modelcontext/modelcontextprotocol](https://github.com/modelcontext/modelcontextprotocol). 7 | * **Configuration**: MCP Servers are configured via a JSON file (e.g., `mcp_servers.json`). The file specifies a map of server names to their configuration details. The library supports servers communicating via `stdio` (`process`) or `sse`. 8 | * `command`: The executable to run (for `process` type). 9 | * `args`: Arguments for the command (for `process` type). 10 | * `url`: The endpoint URL (for `sse` type). 11 | * `transport`: Communication method (`"process"` or `"sse"`). Can often be inferred. 12 | * `workingDir`: (Optional) Working directory for the server process (`process` type). 13 | * `headers`: (Optional) Headers for the connection (`sse` type). 14 | 15 | See `example/mcp_servers.json` for a sample (create this file based on your needs): 16 | ```json 17 | { 18 | "mcpServers": { 19 | "filesystem": { 20 | "command": "npx", 21 | "args": [ 22 | "-y", 23 | "@modelcontextprotocol/server-filesystem", 24 | "./fs_data" 25 | ], 26 | "transport": "process", 27 | "workingDir": "." 28 | }, 29 | "kubernetes": { 30 | "command": "npx", 31 | "args": [ 32 | "-y", 33 | "kubernetes-mcp-server@latest" 34 | ], 35 | "transport": "process" 36 | } 37 | // Example SSE server config (if you have one running) 38 | // "sse_example": { 39 | // "url": "http://localhost:8080/sse", 40 | // "transport": "sse", 41 | // "headers": { 42 | // "Authorization": "Bearer your_token" 43 | // } 44 | // } 45 | } 46 | } 47 | ``` 48 | * **Usage (LLM Interaction with MCP Tools)**: 49 | 1. Import the library: `import "github.com/Chen-speculation/MCPKit/mcp"` (using the correct module path from your `go.mod`). 50 | 2. Create an MCP server configuration file (e.g., `mcp_servers.json`). 51 | 3. Load the MCP configuration: `mcpServers, err := mcp.LoadConfigFromFile("mcp_servers.json")`. 52 | 4. Prepare the engine configuration, including LLM details (API key, model) and the loaded MCP servers: `engineConfig := mcp.EngineConfig{...}`. 53 | 5. Initialize the engine: `engine, err := mcp.NewEngine(engineConfig)`. This starts MCP clients. 54 | 6. Ensure clean shutdown: `defer engine.Close()`. 55 | 7. Start a chat stream: `go engine.ChatStream(ctx, sessionID, prompt, history, outputChan)`. 56 | 8. Process events (`mcp.ChatEvent`) from `outputChan`: Handle text chunks, tool call requests from the LLM, tool results, session ID, and errors. 57 | 58 | **Note:** The current implementation is focused on **OpenAI-compatible LLMs** for the chat and tool-calling logic. 59 | 60 | See `example/example.go` for a runnable demonstration. Run it with flags, e.g.: 61 | ```bash 62 | go run example/example.go \ 63 | -openai-key="YOUR_OPENAI_API_KEY" \ 64 | -mcp-config="./example/mcp_servers.json" \ 65 | -prompt="List the files in the root directory using the filesystem tool, then tell me about kubernetes pods." 66 | ``` 67 | * **How it Works**: The `mcp.Engine` reads the MCP server configuration and creates corresponding MCP clients (using `github.com/mark3labs/mcp-go/client`). When `ChatStream` is called, the engine (currently the `openaiProvider`): 68 | 1. Fetches available tools from all connected MCP clients using `ListTools`. 69 | 2. Formats the user prompt, history, and available tools for the configured LLM (OpenAI API). 70 | 3. Sends the request to the LLM. 71 | 4. Processes the LLM's response stream: 72 | * Yields text chunks via the output channel. 73 | * If the LLM requests a tool call (using OpenAI's function calling format), the engine identifies the target MCP client based on the tool name prefix (e.g., `filesystem__list_files`). 74 | * Sends a `tool_call` event. 75 | * Executes the tool call via the corresponding MCP client using `CallTool`. 76 | * Sends a `tool_result` event. 77 | * Sends the tool result back to the LLM to continue the conversation. 78 | 5. Repeats the process if further tool calls are needed, until the LLM finishes. 79 | 80 | --- 81 | 82 | **中文** 83 | 84 | * **项目目标**: 实现一个 Go 语言的 "MCP 引擎" 库。该库允许 Go 应用程序与大型语言模型 (LLM) 对接,并动态利用外部 MCP 服务器(遵循模型上下文协议的进程或服务)提供的工具。 85 | * **MCP 简介**: 模型上下文协议 (MCP) 旨在标准化应用程序向大型语言模型 (LLM) 提供上下文(如工具和数据源)的方式。它像一个通用接口,使 LLM 能够发现并利用不同外部系统提供的能力。更多信息请访问 [https://github.com/modelcontext/modelcontextprotocol](https://github.com/modelcontext/modelcontextprotocol)。 86 | * **配置**: MCP 服务器通过 JSON 文件(例如 `mcp_servers.json`)进行配置。该文件指定了一个从服务器名称到其配置细节的映射。该库支持通过 `stdio` (`process`) 或 `sse` 进行通信的服务器。 87 | * `command`: 要运行的可执行文件(用于 `process` 类型)。 88 | * `args`: 传递给命令的参数(用于 `process` 类型)。 89 | * `url`: 端点 URL(用于 `sse` 类型)。 90 | * `transport`: 通信方法(`"process"` 或 `"sse"`)。通常可以推断出来。 91 | * `workingDir`: (可选)服务器进程的工作目录(`process` 类型)。 92 | * `headers`: (可选)连接的 Headers(`sse` 类型)。 93 | 94 | 请参阅 `example/mcp_servers.json` 中的示例(请根据您的需求创建此文件): 95 | ```json 96 | { 97 | "mcpServers": { 98 | "filesystem": { 99 | "command": "npx", 100 | "args": [ 101 | "-y", 102 | "@modelcontextprotocol/server-filesystem", 103 | "./fs_data" 104 | ], 105 | "transport": "process", 106 | "workingDir": "." 107 | }, 108 | "kubernetes": { 109 | "command": "npx", 110 | "args": [ 111 | "-y", 112 | "kubernetes-mcp-server@latest" 113 | ], 114 | "transport": "process" 115 | } 116 | // SSE 服务器配置示例 (如果您运行了一个) 117 | // "sse_example": { 118 | // "url": "http://localhost:8080/sse", 119 | // "transport": "sse", 120 | // "headers": { 121 | // "Authorization": "Bearer your_token" 122 | // } 123 | // } 124 | } 125 | } 126 | ``` 127 | * **使用方法 (LLM 交互与 MCP 工具)**: 128 | 1. 导入库: `import "github.com/Chen-speculation/MCPKit/mcp"` (使用您 `go.mod` 中正确的模块路径)。 129 | 2. 创建一个 MCP 服务器配置文件 (例如, `mcp_servers.json`)。 130 | 3. 加载 MCP 配置: `mcpServers, err := mcp.LoadConfigFromFile("mcp_servers.json")`。 131 | 4. 准备引擎配置,包括 LLM 详细信息(API 密钥、模型)和加载的 MCP 服务器: `engineConfig := mcp.EngineConfig{...}`。 132 | 5. 初始化引擎: `engine, err := mcp.NewEngine(engineConfig)`。这将启动 MCP 客户端。 133 | 6. 确保干净地关闭: `defer engine.Close()`。 134 | 7. 启动聊天流: `go engine.ChatStream(ctx, sessionID, prompt, history, outputChan)`。 135 | 8. 处理来自 `outputChan` 的事件 (`mcp.ChatEvent`):处理文本块、来自 LLM 的工具调用请求、工具结果、会话 ID 和错误。 136 | 137 | **注意:** 当前实现专注于**与 OpenAI 兼容的 LLM** 的聊天和工具调用逻辑。 138 | 139 | 请参阅 `example/example.go` 获取可运行的演示。使用标志运行它,例如: 140 | ```bash 141 | go run example/example.go \ 142 | -openai-key="您的_OPENAI_API_密钥" \ 143 | -mcp-config="./example/mcp_servers.json" \ 144 | -prompt="使用 filesystem 工具列出根目录中的文件,然后告诉我有关 kubernetes pod 的信息。" 145 | ``` 146 | * **工作原理**: `mcp.Engine` 读取 MCP 服务器配置并创建相应的 MCP 客户端 (使用 `github.com/mark3labs/mcp-go/client`)。当调用 `ChatStream` 时,引擎(当前是 `openaiProvider`): 147 | 1. 使用 `ListTools` 从所有连接的 MCP 客户端获取可用工具。 148 | 2. 为配置的 LLM (OpenAI API) 格式化用户提示、历史记录和可用工具。 149 | 3. 将请求发送到 LLM。 150 | 4. 处理 LLM 的响应流: 151 | * 通过输出通道产生文本块。 152 | * 如果 LLM 请求工具调用(使用 OpenAI 的 function calling 格式),引擎会根据工具名称前缀(例如 `filesystem__list_files`)识别目标 MCP 客户端。 153 | * 发送一个 `tool_call` 事件。 154 | * 通过相应的 MCP 客户端使用 `CallTool` 执行工具调用。 155 | * 发送一个 `tool_result` 事件。 156 | * 将工具结果发送回 LLM 以继续对话。 157 | 5. 如果需要进一步的工具调用,则重复该过程,直到 LLM 完成。 158 | -------------------------------------------------------------------------------- /mcp/engine.go: -------------------------------------------------------------------------------- 1 | package mcp 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "sync" 8 | "time" 9 | 10 | mcpclient "github.com/mark3labs/mcp-go/client" 11 | mcptransport "github.com/mark3labs/mcp-go/client/transport" 12 | mcplib "github.com/mark3labs/mcp-go/mcp" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // mcpEngine is the concrete implementation holding the configured provider. 17 | type mcpEngine struct { 18 | provider Engine // Currently, this will be *openaiProvider 19 | } 20 | 21 | // NewEngine creates and initializes a new MCP engine based on the provided configuration. 22 | // It sets up MCP clients for the configured servers and instantiates the LLM provider (currently OpenAI). 23 | func NewEngine(config EngineConfig) (Engine, error) { 24 | // Initialize MCP Clients based on config.MCPServers 25 | mcpClients := make(map[string]mcpclient.MCPClient) 26 | var initWg sync.WaitGroup 27 | var initMutex sync.Mutex 28 | initErrs := make(map[string]error) 29 | 30 | log.Infof("Initializing %d MCP clients...", len(config.MCPServers)) 31 | initCtx, cancelInit := context.WithTimeout(context.Background(), 90*time.Second) // Timeout for initializing all clients 32 | defer cancelInit() 33 | 34 | for name, serverConf := range config.MCPServers { 35 | initWg.Add(1) 36 | go func(sName string, sConf ServerConfig) { 37 | defer initWg.Done() 38 | var mcpClient mcpclient.MCPClient 39 | var clientErr error 40 | 41 | clientCtx, clientCancel := context.WithCancel(initCtx) // Context per client init 42 | defer clientCancel() 43 | 44 | switch conf := sConf.(type) { 45 | case *ProcessServerConfig: 46 | log.Infof("Initializing MCP process client: %s (Cmd: %s)", sName, conf.Command) 47 | // transportOpts := []mcptransport.StdioTransportOption{} 48 | // if len(conf.Env) > 0 { // Need to check how to pass env vars via library 49 | // log.Warnf("Passing environment variables to stdio transport not directly supported by current library version, use wrapper script if needed.") 50 | // } 51 | // if conf.WorkingDir != "" { 52 | // transportOpts = append(transportOpts, mcptransport.WithWorkDir(conf.WorkingDir)) 53 | // } 54 | // Note: The current mcpclient.NewStdioMCPClient doesn't seem to accept env/workdir directly. 55 | // Consider using os/exec manually if these are critical, or a wrapper script. 56 | mcpClient, clientErr = mcpclient.NewStdioMCPClient(conf.Command, nil, conf.Args...) 57 | if clientErr != nil { 58 | clientErr = fmt.Errorf("failed to create stdio client for '%s': %w", sName, clientErr) 59 | } 60 | 61 | case *SSEServerConfig: 62 | log.Infof("Initializing MCP SSE client: %s (URL: %s)", sName, conf.URL) 63 | transportOpts := []mcptransport.ClientOption{} 64 | if len(conf.Headers) > 0 { 65 | transportOpts = append(transportOpts, mcpclient.WithHeaders(conf.Headers)) 66 | log.Debugf("Added %d headers for SSE client %s", len(conf.Headers), sName) 67 | } 68 | mcpClient, clientErr = mcpclient.NewSSEMCPClient(conf.URL, transportOpts...) 69 | if clientErr != nil { 70 | clientErr = fmt.Errorf("failed to create sse client for '%s': %w", sName, clientErr) 71 | } else { 72 | // Start the SSE client's background processing if it has a Start method 73 | // Assuming the interface might expose Start or specific implementations do. 74 | if starter, ok := mcpClient.(interface{ Start(context.Context) error }); ok { 75 | startErr := starter.Start(clientCtx) // Start requires context 76 | if startErr != nil { 77 | clientErr = fmt.Errorf("failed to start sse client '%s': %w", sName, startErr) 78 | } 79 | } else { 80 | log.Debugf("Client type %T for %s does not implement Start(context.Context)", mcpClient, sName) 81 | // Assume non-SSE clients don't need explicit Start here 82 | } 83 | } 84 | 85 | default: 86 | clientErr = fmt.Errorf("unsupported server config type '%T' for server '%s'", sConf, sName) 87 | } 88 | 89 | // If client creation/start failed, record error and return 90 | if clientErr != nil { 91 | log.Warnf("Failed to create/start MCP client for '%s': %v", sName, clientErr) 92 | initMutex.Lock() 93 | initErrs[sName] = clientErr 94 | initMutex.Unlock() 95 | if mcpClient != nil { // Attempt cleanup if client was partially created (e.g., SSE) 96 | _ = mcpClient.Close() 97 | } 98 | return 99 | } 100 | 101 | // --- Initialize MCP Session --- 102 | log.Debugf("Sending MCP Initialize request to server: %s", sName) 103 | initReq := mcplib.InitializeRequest{ 104 | Params: struct { 105 | ProtocolVersion string `json:"protocolVersion"` 106 | Capabilities mcplib.ClientCapabilities `json:"capabilities"` 107 | ClientInfo mcplib.Implementation `json:"clientInfo"` 108 | }{ 109 | ProtocolVersion: mcplib.LATEST_PROTOCOL_VERSION, 110 | ClientInfo: mcplib.Implementation{ 111 | Name: "awesomeproj-mcp-client", // Identify our client 112 | Version: "0.1.0", // TODO: Versioning? 113 | }, 114 | Capabilities: mcplib.ClientCapabilities{ /* Define capabilities if needed */ }, 115 | }, 116 | } 117 | _, initSessionErr := mcpClient.Initialize(clientCtx, initReq) 118 | if initSessionErr != nil { 119 | initErr := fmt.Errorf("failed to initialize MCP session with server '%s': %w", sName, initSessionErr) 120 | log.Warnf(initErr.Error()) 121 | initMutex.Lock() 122 | initErrs[sName] = initErr 123 | initMutex.Unlock() 124 | _ = mcpClient.Close() // Cleanup failed client 125 | return 126 | } 127 | 128 | // Store successfully initialized client 129 | initMutex.Lock() 130 | mcpClients[sName] = mcpClient 131 | initMutex.Unlock() 132 | log.Infof("Successfully initialized MCP client for server: %s", sName) 133 | 134 | }(name, serverConf) 135 | } 136 | 137 | initWg.Wait() // Wait for all initializations to complete or timeout 138 | 139 | // Check for overall timeout 140 | if initCtx.Err() == context.DeadlineExceeded { 141 | log.Errorf("MCP client initialization timed out after %v.", 90*time.Second) 142 | // Close any clients that might have been initialized before timeout 143 | closeAllClients(mcpClients) // Use helper to attempt cleanup 144 | return nil, fmt.Errorf("mcp client initialization timed out") 145 | } 146 | 147 | // Check if any clients failed to initialize 148 | if len(initErrs) > 0 { 149 | var errorMsgs []string 150 | for name, err := range initErrs { 151 | errorMsgs = append(errorMsgs, fmt.Sprintf(" '%s': %v", name, err)) 152 | } 153 | log.Errorf("Failed to initialize %d MCP client(s):%s", len(initErrs), strings.Join(errorMsgs, ";")) 154 | // Close successfully initialized clients before returning error 155 | closeAllClients(mcpClients) 156 | return nil, fmt.Errorf("failed to initialize %d mcp client(s):%s", len(initErrs), strings.Join(errorMsgs, ";")) 157 | } 158 | 159 | // If no servers were configured, return an error or a limited engine? 160 | // For now, let's allow it but log a warning. 161 | if len(config.MCPServers) == 0 { 162 | log.Warn("No MCP servers configured in EngineConfig.MCPServers. Tool listing and execution will be unavailable.") 163 | } else if len(mcpClients) == 0 { 164 | // This case should be covered by initErrs check, but as a safeguard: 165 | log.Error("MCP servers were configured, but no clients were successfully initialized.") 166 | return nil, fmt.Errorf("no mcp clients initialized despite configuration") 167 | } 168 | 169 | log.Infof("Successfully initialized %d MCP clients.", len(mcpClients)) 170 | 171 | // --- Create the LLM Provider --- 172 | // Currently hardcoded to OpenAI provider 173 | provider, err := newOpenaiProvider(config, mcpClients) 174 | if err != nil { 175 | log.Errorf("Failed to create OpenAI provider: %v", err) 176 | closeAllClients(mcpClients) // Cleanup clients if provider fails 177 | return nil, fmt.Errorf("failed to create engine provider: %w", err) 178 | } 179 | 180 | engine := &mcpEngine{ 181 | provider: provider, 182 | } 183 | 184 | log.Info("MCP Engine initialized successfully.") 185 | return engine, nil 186 | } 187 | 188 | // ChatStream delegates to the configured provider. 189 | func (e *mcpEngine) ChatStream(ctx context.Context, sessionID string, prompt string, history []Message, outputChan chan<- ChatEvent) error { 190 | if e.provider == nil { 191 | return fmt.Errorf("engine provider is not initialized") 192 | } 193 | return e.provider.ChatStream(ctx, sessionID, prompt, history, outputChan) 194 | } 195 | 196 | // ListTools delegates to the configured provider. 197 | func (e *mcpEngine) ListTools() ([]ToolDefinition, error) { 198 | if e.provider == nil { 199 | return nil, fmt.Errorf("engine provider is not initialized") 200 | } 201 | return e.provider.ListTools() 202 | } 203 | 204 | // Name delegates to the configured provider. 205 | func (e *mcpEngine) Name() string { 206 | if e.provider == nil { 207 | return "uninitialized" 208 | } 209 | return e.provider.Name() 210 | } 211 | 212 | // Close delegates to the configured provider to close itself and its clients. 213 | func (e *mcpEngine) Close() error { 214 | log.Info("Closing MCP Engine...") 215 | if e.provider == nil { 216 | log.Warn("Engine provider already nil during Close.") 217 | return nil 218 | } 219 | err := e.provider.Close() 220 | e.provider = nil // Prevent further use 221 | log.Info("MCP Engine closed.") 222 | return err 223 | } 224 | 225 | // closeAllClients is a helper to attempt closing multiple MCP clients. 226 | func closeAllClients(clients map[string]mcpclient.MCPClient) { 227 | log.Warnf("Closing %d MCP clients due to initialization failure or engine shutdown...", len(clients)) 228 | var wg sync.WaitGroup 229 | for name, client := range clients { 230 | wg.Add(1) 231 | go func(n string, c mcpclient.MCPClient) { 232 | defer wg.Done() 233 | if err := c.Close(); err != nil { 234 | log.Warnf("Error closing MCP client '%s' during cleanup: %v", n, err) 235 | } 236 | }(name, client) 237 | } 238 | wg.Wait() 239 | } 240 | -------------------------------------------------------------------------------- /mcp/openai_provider.go: -------------------------------------------------------------------------------- 1 | package mcp 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "strings" 10 | "sync" 11 | "time" 12 | 13 | mcpclient "github.com/mark3labs/mcp-go/client" 14 | mcplib "github.com/mark3labs/mcp-go/mcp" // Aliased to avoid collision with package name 15 | openai "github.com/sashabaranov/go-openai" 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | // openaiProvider implements the Engine interface using OpenAI's API. 20 | type openaiProvider struct { 21 | client *openai.Client 22 | model string 23 | apiKey string 24 | baseURL string 25 | mcpClients map[string]mcpclient.MCPClient // Map of server name to MCP client instance 26 | closeOnce sync.Once 27 | } 28 | 29 | // newOpenaiProvider creates a new instance of the OpenAI provider. 30 | // It initializes the OpenAI client but does not start MCP clients. 31 | func newOpenaiProvider(config EngineConfig, mcpClients map[string]mcpclient.MCPClient) (*openaiProvider, error) { 32 | apiKey := config.OpenAIAPIKey 33 | baseURL := config.OpenAIBaseURL 34 | model := config.DefaultModel 35 | 36 | if apiKey == "" { 37 | return nil, fmt.Errorf("OpenAI API key (EngineConfig.OpenAIAPIKey) is required") 38 | } 39 | if model == "" { 40 | // You might want to default the model here or return an error 41 | log.Warnf("OpenAI model (EngineConfig.DefaultModel) not specified, defaulting might occur in OpenAI library or API.") 42 | // model = "gpt-3.5-turbo" // Example default 43 | } 44 | 45 | openaiConfig := openai.DefaultConfig(apiKey) 46 | if baseURL != "" { 47 | openaiConfig.BaseURL = baseURL 48 | log.Infof("Using custom OpenAI base URL: %s", baseURL) 49 | } 50 | client := openai.NewClientWithConfig(openaiConfig) 51 | 52 | return &openaiProvider{ 53 | client: client, 54 | model: model, 55 | apiKey: apiKey, 56 | baseURL: baseURL, 57 | mcpClients: mcpClients, // Store the map of pre-initialized MCP clients 58 | }, nil 59 | } 60 | 61 | // Name returns the name of the engine. 62 | func (p *openaiProvider) Name() string { 63 | return "openai" 64 | } 65 | 66 | // Close shuts down the provider, primarily closing MCP clients. 67 | func (p *openaiProvider) Close() error { 68 | log.Info("Closing OpenAI provider and associated MCP clients...") 69 | p.closeOnce.Do(func() { 70 | var closeErrors []string 71 | var wg sync.WaitGroup 72 | for name, client := range p.mcpClients { 73 | wg.Add(1) 74 | go func(n string, c mcpclient.MCPClient) { 75 | defer wg.Done() 76 | log.Debugf("Closing MCP client: %s", n) 77 | if err := c.Close(); err != nil { 78 | log.Warnf("Error closing MCP client '%s': %v", n, err) 79 | // Potential: Collect errors if needed 80 | closeErrors = append(closeErrors, fmt.Sprintf("%s: %v", n, err)) 81 | } 82 | }(name, client) 83 | } 84 | wg.Wait() 85 | // Optionally return combined error 86 | if len(closeErrors) > 0 { 87 | // log.Errorf("Errors encountered while closing MCP clients: %s", strings.Join(closeErrors, "; ")) 88 | // If returning error, decide how: 89 | // return fmt.Errorf("errors closing clients: %s", strings.Join(closeErrors, "; ")) 90 | } 91 | }) 92 | log.Info("OpenAI provider closed.") 93 | return nil 94 | } 95 | 96 | // ChatStream implements streaming chat using OpenAI's API. 97 | // It handles text streaming and iterative tool calls via MCP clients. 98 | func (p *openaiProvider) ChatStream(ctx context.Context, sessionID string, prompt string, history []Message, outputChan chan<- ChatEvent) error { 99 | currentMessages := convertToOpenAIMessages(history) 100 | 101 | // Add the new user prompt if provided 102 | if prompt != "" { 103 | currentMessages = append(currentMessages, openai.ChatCompletionMessage{ 104 | Role: openai.ChatMessageRoleUser, 105 | Content: prompt, 106 | }) 107 | } 108 | 109 | // List and convert available tools from MCP clients 110 | tools, err := p.ListTools() // Returns []ToolDefinition (our type) 111 | if err != nil { 112 | // Log warning but proceed without tools if listing fails 113 | log.Warnf("Failed to list tools from MCP servers: %v. Proceeding without tools.", err) 114 | tools = []ToolDefinition{} 115 | } 116 | openaiTools := convertToOpenAITools(tools) 117 | log.Infof("Prepared %d tools for OpenAI API call.", len(openaiTools)) 118 | 119 | // Send session ID event 120 | sendEvent(ctx, outputChan, ChatEvent{Type: "session_id", SessionID: sessionID}) 121 | 122 | // --- Main Loop for Tool Call Iterations --- 123 | const maxToolIterations = 5 // Safety limit for tool call loops 124 | iteration := 0 125 | completedToolCalls := make(map[string]bool) // Track completed tool calls within this ChatStream call 126 | 127 | for iteration < maxToolIterations { 128 | iteration++ 129 | log.Infof("Starting LLM interaction cycle %d", iteration) 130 | 131 | // --- Create OpenAI Stream Request --- 132 | req := openai.ChatCompletionRequest{ 133 | Model: p.model, 134 | Messages: currentMessages, 135 | MaxTokens: 4096, // Consider making configurable 136 | Temperature: 0.7, // Consider making configurable 137 | Stream: true, 138 | } 139 | if len(openaiTools) > 0 { 140 | req.Tools = openaiTools 141 | req.ToolChoice = "auto" // Let OpenAI decide 142 | } 143 | 144 | stream, err := p.client.CreateChatCompletionStream(ctx, req) 145 | if err != nil { 146 | err = fmt.Errorf("ChatCompletionStream request failed: %w", err) 147 | log.Error(err) 148 | sendEvent(ctx, outputChan, ChatEvent{Type: "error", Error: err.Error()}) 149 | return err // Abort on stream creation failure 150 | } 151 | // Defer stream close inside the loop? If loop continues, need to ensure closed. 152 | // Let's close it explicitly before continuing or breaking. 153 | 154 | var fullAssistantMessageContent strings.Builder 155 | accumulatedToolCalls := make(map[int]openai.ToolCall) // Use map to handle potential index gaps 156 | var currentFinishReason openai.FinishReason 157 | var assistantResponseMessage openai.ChatCompletionMessage // To store the final message from this iteration 158 | 159 | // --- Process Stream for One LLM Turn --- 160 | streamProcessingError := func() error { 161 | defer stream.Close() // Ensure stream is closed after processing 162 | for { 163 | response, streamErr := stream.Recv() 164 | if errors.Is(streamErr, io.EOF) { 165 | log.Info("Stream finished (EOF).") 166 | break // End of this stream 167 | } 168 | if streamErr != nil { 169 | err := fmt.Errorf("stream processing error: %w", streamErr) 170 | log.Error(err) 171 | sendEvent(ctx, outputChan, ChatEvent{Type: "error", Error: err.Error()}) 172 | return err // Return error to stop outer loop 173 | } 174 | 175 | if len(response.Choices) == 0 { 176 | continue // Skip empty choices 177 | } 178 | 179 | delta := response.Choices[0].Delta 180 | currentFinishReason = response.Choices[0].FinishReason 181 | 182 | // Append text content 183 | if delta.Content != "" { 184 | sendEvent(ctx, outputChan, ChatEvent{Type: "text", Content: delta.Content}) 185 | fullAssistantMessageContent.WriteString(delta.Content) 186 | } 187 | 188 | // Accumulate tool calls 189 | if len(delta.ToolCalls) > 0 { 190 | for _, tcDelta := range delta.ToolCalls { 191 | if tcDelta.Index == nil { 192 | log.Warnf("Tool call delta missing index, cannot process reliably.") 193 | continue // Skip if index is missing 194 | } 195 | idx := *tcDelta.Index 196 | // Initialize map entry if needed 197 | if _, exists := accumulatedToolCalls[idx]; !exists { 198 | accumulatedToolCalls[idx] = openai.ToolCall{Function: openai.FunctionCall{}} 199 | } 200 | tc := accumulatedToolCalls[idx] // Get a copy to modify 201 | if tcDelta.ID != "" { 202 | tc.ID = tcDelta.ID 203 | } 204 | if tcDelta.Type != "" { 205 | tc.Type = tcDelta.Type 206 | } 207 | if tcDelta.Function.Name != "" { 208 | tc.Function.Name = tcDelta.Function.Name 209 | } 210 | if tcDelta.Function.Arguments != "" { 211 | tc.Function.Arguments += tcDelta.Function.Arguments 212 | } 213 | accumulatedToolCalls[idx] = tc // Put modified copy back 214 | } 215 | } 216 | 217 | // If a finish reason is received, stop processing this stream 218 | if currentFinishReason != "" { 219 | log.Infof("Stream processing finished with reason: %s", currentFinishReason) 220 | break 221 | } 222 | } 223 | return nil // Successful stream processing 224 | }() 225 | 226 | if streamProcessingError != nil { 227 | return streamProcessingError // Exit if stream processing failed 228 | } 229 | 230 | // --- Construct Assistant Message from Stream Output --- 231 | assistantResponseMessage = openai.ChatCompletionMessage{ 232 | Role: openai.ChatMessageRoleAssistant, 233 | } 234 | finalContent := fullAssistantMessageContent.String() 235 | finalToolCalls := make([]openai.ToolCall, 0, len(accumulatedToolCalls)) 236 | for _, tc := range accumulatedToolCalls { 237 | // Basic validation: Ensure ID, Type, Name, and Arguments are present 238 | if tc.ID != "" && tc.Type == openai.ToolTypeFunction && tc.Function.Name != "" && tc.Function.Arguments != "" { 239 | finalToolCalls = append(finalToolCalls, tc) 240 | } else { 241 | log.Warnf("Discarding incomplete tool call accumulated: ID=%s, Type=%s, Name=%s, ArgsLen=%d", tc.ID, tc.Type, tc.Function.Name, len(tc.Function.Arguments)) 242 | } 243 | } 244 | 245 | if len(finalToolCalls) > 0 { 246 | assistantResponseMessage.ToolCalls = finalToolCalls 247 | // assistantResponseMessage.Content should remain empty or nil for tool calls 248 | log.Infof("Assistant message contains %d tool calls.", len(finalToolCalls)) 249 | } else if finalContent != "" { 250 | assistantResponseMessage.Content = finalContent 251 | log.Infof("Assistant message contains text content (length: %d).", len(finalContent)) 252 | } else { 253 | // Handle cases where the assistant response is empty (neither text nor tools) 254 | log.Warnf("Assistant response was empty (no text or valid tool calls). Finish reason: %s", currentFinishReason) 255 | // If finish reason was stop, this might be okay. If tool_calls, it's weird. 256 | // If we stop here, the history might be incomplete. Let's add an empty message 257 | // but this might need adjustment based on observed API behavior. 258 | assistantResponseMessage.Content = "" // Explicitly empty 259 | } 260 | 261 | // Append assistant's message to history for next potential iteration or final history 262 | currentMessages = append(currentMessages, assistantResponseMessage) 263 | 264 | // --- Handle Finish Reason --- 265 | if currentFinishReason == openai.FinishReasonToolCalls { 266 | log.Infof("Finish Reason: Tool Calls. Processing %d calls.", len(assistantResponseMessage.ToolCalls)) 267 | toolResults := make([]openai.ChatCompletionMessage, 0, len(assistantResponseMessage.ToolCalls)) 268 | var toolWg sync.WaitGroup 269 | var toolMutex sync.Mutex // Protects toolResults slice 270 | 271 | for _, toolCall := range assistantResponseMessage.ToolCalls { 272 | if completedToolCalls[toolCall.ID] { 273 | log.Warnf("Tool call ID '%s' (%s) was already completed in a previous iteration. Skipping.", toolCall.ID, toolCall.Function.Name) 274 | continue 275 | } 276 | 277 | toolWg.Add(1) 278 | go func(tc openai.ToolCall) { 279 | defer toolWg.Done() 280 | 281 | var args map[string]interface{} 282 | if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err != nil { 283 | errMsg := fmt.Sprintf("Failed to parse JSON arguments for tool %s (ID: %s): %v", tc.Function.Name, tc.ID, err) 284 | log.Warnf(errMsg) 285 | sendEvent(ctx, outputChan, ChatEvent{Type: "error", Error: errMsg}) 286 | // Add error result message for LLM 287 | toolMutex.Lock() 288 | toolResults = append(toolResults, openai.ChatCompletionMessage{ 289 | Role: openai.ChatMessageRoleTool, 290 | ToolCallID: tc.ID, 291 | Name: tc.Function.Name, // Include name for context 292 | Content: fmt.Sprintf("Error parsing arguments: %v", err), 293 | }) 294 | completedToolCalls[tc.ID] = true // Mark as completed (due to error) 295 | toolMutex.Unlock() 296 | return 297 | } 298 | 299 | log.Infof("Requesting tool execution: ID=%s, Name=%s, Args=%v", tc.ID, tc.Function.Name, args) 300 | 301 | // Send tool_call event (using our ToolCall type) 302 | sendEvent(ctx, outputChan, ChatEvent{ 303 | Type: "tool_call", 304 | ToolCall: &ToolCall{ 305 | ID: tc.ID, 306 | Name: tc.Function.Name, 307 | Arguments: args, 308 | }, 309 | }) 310 | 311 | // Execute the tool via MCP 312 | // Use a derived context with timeout for the tool call itself 313 | toolCtx, toolCancel := context.WithTimeout(ctx, 60*time.Second) // Configurable timeout? 60s default 314 | toolResultContent, toolErr := executeMcpTool(toolCtx, p.mcpClients, tc.Function.Name, args) 315 | toolCancel() // Cancel context regardless of outcome 316 | 317 | // Send tool_result event (using our ToolResult type) 318 | sendEvent(ctx, outputChan, ChatEvent{ 319 | Type: "tool_result", 320 | ToolResult: &ToolResult{ 321 | CallID: tc.ID, 322 | Name: tc.Function.Name, 323 | Content: toolResultContent, 324 | IsError: toolErr != nil, 325 | }, 326 | }) 327 | 328 | // Add result message for the next LLM call 329 | toolMutex.Lock() 330 | toolResults = append(toolResults, openai.ChatCompletionMessage{ 331 | Role: openai.ChatMessageRoleTool, 332 | ToolCallID: tc.ID, 333 | Name: tc.Function.Name, // Name might be useful for context 334 | Content: toolResultContent, // Contains result or error message 335 | }) 336 | completedToolCalls[tc.ID] = true // Mark as completed 337 | toolMutex.Unlock() 338 | }(toolCall) 339 | } 340 | toolWg.Wait() // Wait for all tool calls in this iteration to finish 341 | 342 | // Add all tool results to the history for the next LLM call 343 | currentMessages = append(currentMessages, toolResults...) 344 | 345 | // Continue the loop to call the LLM again with the tool results 346 | continue 347 | 348 | } else if currentFinishReason == openai.FinishReasonStop { 349 | log.Info("Finish Reason: Stop. Chat completed normally.") 350 | break // Exit the main loop, conversation is complete 351 | } else { 352 | // Handle other finish reasons (length, content_filter, null) 353 | errMsg := fmt.Sprintf("Chat finished with unexpected reason: %s", currentFinishReason) 354 | log.Warnf(errMsg) 355 | // Send an error event? Or just finish? 356 | // sendEvent(ctx, outputChan, ChatEvent{Type: "error", Error: errMsg}) 357 | break // Exit loop on other/unexpected reasons 358 | } 359 | } // End of main for loop (tool call iterations) 360 | 361 | if iteration >= maxToolIterations { 362 | log.Warnf("Reached maximum tool iteration limit (%d). Finishing chat.", maxToolIterations) 363 | sendEvent(ctx, outputChan, ChatEvent{Type: "error", Error: fmt.Sprintf("reached maximum tool iterations (%d)", maxToolIterations)}) 364 | } 365 | 366 | log.Info("ChatStream processing complete.") 367 | sendEvent(ctx, outputChan, ChatEvent{Type: "finish"}) // Signal completion 368 | return nil 369 | } 370 | 371 | // --- Helper Functions --- 372 | 373 | // convertToOpenAIMessages converts internal []Message history to []openai.ChatCompletionMessage. 374 | func convertToOpenAIMessages(history []Message) []openai.ChatCompletionMessage { 375 | apiMessages := make([]openai.ChatCompletionMessage, 0, len(history)) 376 | for _, msg := range history { 377 | apiMsg := openai.ChatCompletionMessage{ 378 | Role: msg.Role, 379 | } 380 | 381 | switch msg.Role { 382 | case openai.ChatMessageRoleUser, openai.ChatMessageRoleSystem: 383 | apiMsg.Content = msg.Content 384 | case openai.ChatMessageRoleAssistant: 385 | if len(msg.ToolCalls) > 0 { 386 | apiMsg.ToolCalls = make([]openai.ToolCall, len(msg.ToolCalls)) 387 | for i, engCall := range msg.ToolCalls { 388 | argsJSON, err := json.Marshal(engCall.Arguments) 389 | if err != nil { 390 | log.Errorf("Error marshalling history tool call arguments for call ID %s: %v. Using empty args.", engCall.ID, err) 391 | argsJSON = []byte("{}") 392 | } 393 | apiMsg.ToolCalls[i] = openai.ToolCall{ 394 | ID: engCall.ID, 395 | Type: openai.ToolTypeFunction, 396 | Function: openai.FunctionCall{ 397 | Name: engCall.Name, 398 | Arguments: string(argsJSON), 399 | }, 400 | } 401 | } 402 | // Ensure Content is nil or empty string when ToolCalls are present 403 | // apiMsg.Content = "" // Let's rely on OpenAI library defaults / API behavior 404 | } else { 405 | // Regular assistant message content 406 | apiMsg.Content = msg.Content 407 | } 408 | case openai.ChatMessageRoleTool: 409 | apiMsg.Content = msg.Content // Content holds the result 410 | apiMsg.ToolCallID = msg.ToolCallID 411 | if msg.ToolCallID == "" { 412 | log.Warnf("History conversion: Tool message found with empty ToolCallID. Content: %s", msg.Content) 413 | } 414 | default: 415 | log.Warnf("History conversion: Unknown role '%s' encountered.", msg.Role) 416 | continue // Skip unknown roles 417 | } 418 | apiMessages = append(apiMessages, apiMsg) 419 | } 420 | return apiMessages 421 | } 422 | 423 | // convertToOpenAITools converts internal []ToolDefinition to []openai.Tool. 424 | func convertToOpenAITools(tools []ToolDefinition) []openai.Tool { 425 | openaiTools := make([]openai.Tool, len(tools)) 426 | for i, tool := range tools { 427 | // Convert our Schema struct to the JSON structure OpenAI expects (map[string]interface{}) 428 | // by marshalling and unmarshalling. 429 | schemaBytes, err := json.Marshal(tool.Schema) 430 | if err != nil { 431 | log.Errorf("Failed to marshal internal schema for tool '%s': %v. Using empty parameters.", tool.Name, err) 432 | schemaBytes = []byte("{}") // Fallback to empty object 433 | } 434 | 435 | // We need json.RawMessage which is compatible with openai.FunctionDefinition.Parameters 436 | rawSchema := json.RawMessage(schemaBytes) 437 | 438 | openaiTools[i] = openai.Tool{ 439 | Type: openai.ToolTypeFunction, 440 | Function: &openai.FunctionDefinition{ 441 | Name: tool.Name, // Should be server__toolname format from ListTools 442 | Description: tool.Description, 443 | Parameters: rawSchema, 444 | }, 445 | } 446 | } 447 | return openaiTools 448 | } 449 | 450 | // ListTools fetches tool definitions from all connected MCP servers. 451 | // It prepends the server name to the tool name (e.g., "filesystem__list_files"). 452 | func (p *openaiProvider) ListTools() ([]ToolDefinition, error) { 453 | allTools := make([]ToolDefinition, 0) 454 | var mu sync.Mutex // Protects allTools slice 455 | var wg sync.WaitGroup 456 | 457 | listToolsCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second) // Timeout for listing across all clients 458 | defer cancel() 459 | 460 | log.Infof("Listing tools from %d MCP clients...", len(p.mcpClients)) 461 | for serverName, client := range p.mcpClients { 462 | wg.Add(1) 463 | go func(sName string, c mcpclient.MCPClient) { 464 | defer wg.Done() 465 | log.Debugf("Requesting tools from MCP server: %s", sName) 466 | req := mcplib.ListToolsRequest{} // Empty request body 467 | resp, err := c.ListTools(listToolsCtx, req) 468 | if err != nil { 469 | if errors.Is(err, context.DeadlineExceeded) { 470 | log.Warnf("Timeout listing tools from server '%s'.", sName) 471 | } else { 472 | log.Warnf("Failed to list tools from server '%s': %v. Skipping server.", sName, err) 473 | } 474 | return // Skip this server on error 475 | } 476 | 477 | if resp == nil || len(resp.Tools) == 0 { 478 | log.Debugf("No tools reported by server '%s'", sName) 479 | return 480 | } 481 | 482 | log.Debugf("Received %d tools from server '%s'", len(resp.Tools), sName) 483 | serverTools := make([]ToolDefinition, 0, len(resp.Tools)) 484 | for _, mcpTool := range resp.Tools { 485 | // Convert mcplib.ToolDefinition to our internal ToolDefinition 486 | engineSchema := Schema{ 487 | Type: "object", // Default type 488 | Properties: make(map[string]interface{}), 489 | Required: []string{}, 490 | } 491 | 492 | // Check Type field as indicator for non-empty struct 493 | if mcpTool.InputSchema.Type != "" || len(mcpTool.InputSchema.Properties) > 0 { 494 | inputSchema := mcpTool.InputSchema // mcplib.ToolInputSchema 495 | 496 | if inputSchema.Properties != nil { 497 | // Properties in mcplib.ToolInputSchema is map[string]*mcplib.SchemaDefinition 498 | // Convert to map[string]interface{} via JSON marshal/unmarshal 499 | tempProps := make(map[string]interface{}) 500 | propBytes, err := json.Marshal(inputSchema.Properties) 501 | if err == nil { 502 | err = json.Unmarshal(propBytes, &tempProps) 503 | } 504 | if err != nil { 505 | log.Warnf("Failed to marshal/unmarshal properties for tool '%s' from server '%s': %v", mcpTool.Name, sName, err) 506 | } else { 507 | engineSchema.Properties = tempProps 508 | } 509 | } else { 510 | log.Debugf("Tool '%s' from server '%s' has nil InputSchema.Properties", mcpTool.Name, sName) 511 | } 512 | 513 | if inputSchema.Required != nil { 514 | engineSchema.Required = inputSchema.Required 515 | } 516 | 517 | if inputSchema.Type != "" { 518 | engineSchema.Type = inputSchema.Type 519 | } 520 | } else { 521 | log.Debugf("Tool '%s' from server '%s' has empty/default InputSchema definition.", mcpTool.Name, sName) 522 | } 523 | 524 | serverTools = append(serverTools, ToolDefinition{ 525 | Name: fmt.Sprintf("%s__%s", sName, mcpTool.Name), // Prepend server name 526 | Description: mcpTool.Description, 527 | Schema: engineSchema, 528 | }) 529 | } 530 | // Append server's tools to the main list safely 531 | mu.Lock() 532 | allTools = append(allTools, serverTools...) 533 | mu.Unlock() 534 | }(serverName, client) 535 | } 536 | 537 | wg.Wait() // Wait for all listing goroutines to complete 538 | 539 | log.Infof("Total tools listed from all MCP servers: %d", len(allTools)) 540 | return allTools, nil // Return the combined list 541 | } 542 | 543 | // executeMcpTool performs the actual MCP tool call via the appropriate client. 544 | // Returns the result content (string) and any execution error. 545 | func executeMcpTool(ctx context.Context, mcpClients map[string]mcpclient.MCPClient, fullToolName string, args map[string]interface{}) (resultContent string, err error) { 546 | parts := strings.SplitN(fullToolName, "__", 2) // Split only once 547 | if len(parts) != 2 { 548 | errMsg := fmt.Sprintf("invalid tool name format: expected 'server__toolname', got '%s'", fullToolName) 549 | log.Warnf(errMsg) 550 | return fmt.Sprintf("Error: %s", errMsg), errors.New(errMsg) 551 | } 552 | 553 | serverName, toolName := parts[0], parts[1] 554 | mcpClient, ok := mcpClients[serverName] 555 | if !ok { 556 | errMsg := fmt.Sprintf("MCP server '%s' not found for tool '%s'", serverName, fullToolName) 557 | log.Warnf(errMsg) 558 | return fmt.Sprintf("Error: %s", errMsg), errors.New(errMsg) 559 | } 560 | 561 | // Build request using mcplib types and the correct structure for Params 562 | toolRequest := mcplib.CallToolRequest{ 563 | Params: struct { 564 | Name string `json:"name"` 565 | Arguments map[string]interface{} `json:"arguments,omitempty"` 566 | Meta *struct { 567 | ProgressToken mcplib.ProgressToken `json:"progressToken,omitempty"` 568 | } `json:"_meta,omitempty"` 569 | }{ 570 | Name: toolName, 571 | Arguments: args, 572 | }, 573 | } 574 | 575 | // Execute tool call (using the context passed, which should have timeout) 576 | log.Debugf("Calling MCP tool '%s' on server '%s' with args: %v", toolName, serverName, args) 577 | toolResult, callErr := mcpClient.CallTool(ctx, toolRequest) 578 | 579 | if callErr != nil { 580 | // Check specifically for context deadline exceeded 581 | if errors.Is(callErr, context.DeadlineExceeded) { 582 | errMsg := fmt.Sprintf("timeout calling tool %s on server %s", fullToolName, serverName) 583 | log.Warnf(errMsg) 584 | return fmt.Sprintf("Error: %s", errMsg), callErr // Return specific error 585 | } else { 586 | errMsg := fmt.Sprintf("error calling tool %s on server %s: %v", fullToolName, serverName, callErr) 587 | log.Warnf(errMsg) 588 | return fmt.Sprintf("Error executing tool %s: %v", fullToolName, callErr), callErr 589 | } 590 | } 591 | 592 | // Process successful result 593 | if toolResult == nil { 594 | log.Infof("Tool %s executed successfully on server %s but returned a nil result.", fullToolName, serverName) 595 | return fmt.Sprintf("Tool %s executed successfully (no content).", fullToolName), nil 596 | } 597 | 598 | // Extract content from result.Result (which is []interface{}) 599 | if len(toolResult.Content) > 0 { 600 | var resultTextBuilder strings.Builder 601 | for i, item := range toolResult.Content { 602 | // Attempt to handle known content types (like TextContent) 603 | if textContent, ok := item.(mcplib.TextContent); ok { 604 | resultTextBuilder.WriteString(textContent.Text) 605 | } else { 606 | // Fallback: Marshal unknown content types to JSON string 607 | log.Debugf("Marshalling unknown tool result content type (%T) to JSON for tool %s", item, fullToolName) 608 | unknownJSON, jsonErr := json.Marshal(item) 609 | if jsonErr != nil { 610 | log.Warnf("Failed to marshal unknown tool result item #%d (%T) to JSON for tool %s: %v", i, item, fullToolName, jsonErr) 611 | resultTextBuilder.WriteString(fmt.Sprintf("[Unmarshallable Content Type: %T]", item)) 612 | } else { 613 | resultTextBuilder.WriteString(string(unknownJSON)) 614 | } 615 | } 616 | // Add a space between items for readability, trim later 617 | if i < len(toolResult.Content)-1 { 618 | resultTextBuilder.WriteString(" ") 619 | } 620 | } 621 | resultContent = strings.TrimSpace(resultTextBuilder.String()) 622 | log.Debugf("Tool %s result processed: %s", fullToolName, resultContent) 623 | return resultContent, nil // Success with content 624 | } else { 625 | // No content items in the result 626 | log.Infof("Tool %s executed successfully on server %s but returned no content items.", fullToolName, serverName) 627 | return fmt.Sprintf("Tool %s executed successfully (empty content).", fullToolName), nil // Success, no content 628 | } 629 | } 630 | 631 | // sendEvent safely sends a ChatEvent to the output channel, respecting context cancellation. 632 | func sendEvent(ctx context.Context, outputChan chan<- ChatEvent, event ChatEvent) { 633 | select { 634 | case outputChan <- event: 635 | // Event sent successfully 636 | case <-ctx.Done(): 637 | // Context cancelled, cannot send event 638 | log.Warnf("Context cancelled before sending event type '%s'.", event.Type) 639 | } 640 | } 641 | --------------------------------------------------------------------------------