Skip to content
This repository was archived by the owner on Apr 12, 2026. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 33 additions & 21 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import (
"syscall"
"time"

"github.com/tmc/langchaingo/llms"

"github.com/tuannvm/slack-mcp-client/internal/common"
customErrors "github.com/tuannvm/slack-mcp-client/internal/common/errors"
"github.com/tuannvm/slack-mcp-client/internal/common/logging"
Expand Down Expand Up @@ -51,10 +53,10 @@ func main() {
cfg := loadAndPrepareConfig(logger)

// Initialize MCP clients and discover tools
mcpClients, discoveredTools := initializeMCPClients(logger, cfg)
mcpClients, discoveredTools, llmsTools := initializeMCPClients(logger, cfg)

// Initialize and run Slack client
startSlackClient(logger, mcpClients, discoveredTools, cfg)
startSlackClient(logger, mcpClients, discoveredTools, llmsTools, cfg)
}

// setupLogging initializes the logging system
Expand Down Expand Up @@ -123,10 +125,11 @@ func loadAndPrepareConfig(logger *logging.Logger) *config.Config {

// initializeMCPClients initializes all MCP clients and discovers available tools
// Use mcp.Client from the internal mcp package
func initializeMCPClients(logger *logging.Logger, cfg *config.Config) (map[string]*mcp.Client, map[string]common.ToolInfo) {
func initializeMCPClients(logger *logging.Logger, cfg *config.Config) (map[string]*mcp.Client, map[string]common.ToolInfo, []llms.Tool) {
// Initialize MCP Clients and Discover Tools Sequentially
mcpClients := make(map[string]*mcp.Client)
allDiscoveredTools := make(map[string]common.ToolInfo) // Map: toolName -> common.ToolInfo
llmsTools := []llms.Tool{} // Map: toolName -> llms.Tool
failedServers := []string{}
initializedClientCount := 0

Expand All @@ -138,6 +141,7 @@ func initializeMCPClients(logger *logging.Logger, cfg *config.Config) (map[strin
serverConf,
mcpClients,
allDiscoveredTools,
&llmsTools,
&failedServers,
&initializedClientCount,
)
Expand All @@ -158,7 +162,7 @@ func initializeMCPClients(logger *logging.Logger, cfg *config.Config) (map[strin
logger.Warn("No MCP clients could be successfully initialized. Application will run with LLM capabilities only.")
}

return mcpClients, allDiscoveredTools
return mcpClients, allDiscoveredTools, llmsTools
}

// processSingleMCPServer processes a single MCP server configuration
Expand All @@ -168,6 +172,7 @@ func processSingleMCPServer(
serverConf config.ServerConfig,
mcpClients map[string]*mcp.Client, // Use mcp.Client
discoveredTools map[string]common.ToolInfo,
llmsTools *[]llms.Tool, // Use llms.Tool
failedServers *[]string,
initializedClientCount *int,
) {
Expand Down Expand Up @@ -247,31 +252,37 @@ func processSingleMCPServer(
for _, toolDef := range listResult.Tools {
toolName := toolDef.Name
if _, exists := discoveredTools[toolName]; !exists {
var inputSchemaMap map[string]interface{}
// Marshal the ToolInputSchema struct to JSON bytes
schemaBytes, err := json.Marshal(toolDef.InputSchema)
if err != nil {
serverLogger.Error(" Failed to marshal input schema struct for tool '%s': %v", toolName, err)
inputSchemaMap = make(map[string]interface{}) // Use empty map on error
} else {
// Unmarshal the JSON bytes into the map
if err := json.Unmarshal(schemaBytes, &inputSchemaMap); err != nil {
serverLogger.Error(" Failed to unmarshal input schema JSON for tool '%s': %v", toolName, err)
inputSchemaMap = make(map[string]interface{}) // Use empty map on error
llmTool := llms.Tool{
Type: "function",
Function: &llms.FunctionDefinition{
Name: toolDef.Name,
Description: toolDef.Description,
},
}
if toolDef.InputSchema.Type != "" {
parameters := map[string]any{
"type": toolDef.InputSchema.Type,
}
if toolDef.InputSchema.Properties != nil {
parameters["properties"] = toolDef.InputSchema.Properties
}
if toolDef.InputSchema.Required != nil {
parameters["required"] = toolDef.InputSchema.Required
}
llmTool.Function.Parameters = parameters
}
*llmsTools = append(*llmsTools, llmTool)

// Use common.ToolInfo
discoveredTools[toolName] = common.ToolInfo{
ServerName: serverName,
Description: toolDef.Description,
InputSchema: inputSchemaMap,
ServerName: serverName,
Tool: &llmTool,
}

if *mcpDebug {
serverLogger.Debug("Stored tool: '%s' (Desc: %s)", toolName, toolDef.Description)
if *debug {
// Only log the full schema if debug mode is enabled
schemaJSON, _ := json.MarshalIndent(inputSchemaMap, "", " ")
schemaJSON, _ := json.MarshalIndent(toolDef.InputSchema, "", " ")
serverLogger.Debug("Tool schema: %s", string(schemaJSON))
}
}
Expand Down Expand Up @@ -435,7 +446,7 @@ func logLLMSettings(logger *logging.Logger, cfg *config.Config) {

// startSlackClient starts the Slack client and handles shutdown
// Use mcp.Client from the internal mcp package
func startSlackClient(logger *logging.Logger, mcpClients map[string]*mcp.Client, discoveredTools map[string]common.ToolInfo, cfg *config.Config) {
func startSlackClient(logger *logging.Logger, mcpClients map[string]*mcp.Client, discoveredTools map[string]common.ToolInfo, llmsTools []llms.Tool, cfg *config.Config) {
logger.Info("Starting Slack client...")
var err error

Expand All @@ -459,6 +470,7 @@ func startSlackClient(logger *logging.Logger, mcpClients map[string]*mcp.Client,
logger, // Pass the structured logger
mcpClients, // Pass the map of initialized clients
discoveredTools, // Pass the map of tool information
llmsTools, // Pass the list of LLM tools
cfg, // Pass the whole config object
)
if err != nil {
Expand Down
7 changes: 4 additions & 3 deletions internal/common/types.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
// Package common provides shared types and utilities used across the application.
package common

import "github.com/tmc/langchaingo/llms"

// ToolInfo holds detailed information about a discovered tool
type ToolInfo struct {
ServerName string `json:"server_name"` // Added json tags for potential future use
Description string `json:"description"`
InputSchema map[string]interface{} `json:"input_schema"`
ServerName string
Tool *llms.Tool
}
55 changes: 27 additions & 28 deletions internal/handlers/llm_mcp_bridge.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,36 +120,35 @@ func NewLLMMCPBridgeFromClientsWithLogLevel(mcpClients interface{}, stdLogger *l

// ProcessLLMResponse processes an LLM response, expecting a specific JSON tool call format.
// It no longer uses natural language detection.
func (b *LLMMCPBridge) ProcessLLMResponse(ctx context.Context, llmResponse, _ string) (string, error) {
func (b *LLMMCPBridge) ProcessLLMResponse(ctx context.Context, name string, args map[string]interface{}) (string, error) {
// Check for a tool call in JSON format
if toolCall := b.detectSpecificJSONToolCall(llmResponse); toolCall != nil {
// Execute the tool call
result, err := b.executeToolCall(ctx, toolCall)
if err != nil {
// Check if it's already a domain error
var errorMessage string
if customErrors.IsDomainError(err) {
// Extract structured information from the domain error
code, _ := customErrors.GetErrorCode(err)
b.logger.ErrorKV("Failed to execute tool call",
"error", err.Error(),
"error_code", code,
"tool", toolCall.Tool)
errorMessage = fmt.Sprintf("Error executing tool call: %v (code: %s)", err, code)
} else {
b.logger.ErrorKV("Failed to execute tool call",
"error", err.Error(),
"tool", toolCall.Tool)
errorMessage = fmt.Sprintf("Error executing tool call: %v", err)
}

return errorMessage, nil
}
return result, nil
// Execute the tool call
toolCall := &ToolCall{
Tool: name,
Args: args,
}
result, err := b.executeToolCall(ctx, toolCall)
if err != nil {
// Check if it's already a domain error
var errorMessage string
if customErrors.IsDomainError(err) {
// Extract structured information from the domain error
code, _ := customErrors.GetErrorCode(err)
b.logger.ErrorKV("Failed to execute tool call",
"error", err.Error(),
"error_code", code,
"tool", toolCall.Tool)
errorMessage = fmt.Sprintf("Error executing tool call: %v (code: %s)", err, code)
} else {
b.logger.ErrorKV("Failed to execute tool call",
"error", err.Error(),
"tool", toolCall.Tool)
errorMessage = fmt.Sprintf("Error executing tool call: %v", err)
}

// Just return the LLM response as-is if no tool call was detected
return llmResponse, nil
return errorMessage, nil
}
return result, nil
}

// ToolCall represents the expected JSON structure for a tool call from the LLM
Expand Down Expand Up @@ -334,7 +333,7 @@ func (b *LLMMCPBridge) executeToolCall(ctx context.Context, toolCall *ToolCall)
return "", domainErr
}

b.logger.InfoKV("Successfully executed MCP tool", "tool", toolCall.Tool)
b.logger.InfoKV("Successfully executed MCP tool", "tool", toolCall.Tool, "result", len(result))

// The result is already a string with the updated interface
if result == "" {
Expand Down
44 changes: 23 additions & 21 deletions internal/llm/langchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ package llm
import (
"context"
"fmt"
"strings"

"github.com/tmc/langchaingo/llms"

Expand All @@ -16,6 +15,8 @@ const (
langchainProviderName = "langchain"
)

type Message llms.MessageContent

// LangChainProvider implements the LLMProvider interface using LangChainGo
// It acts as a gateway, configured to use various LLM providers underneath.
type LangChainProvider struct {
Expand Down Expand Up @@ -109,46 +110,40 @@ func NewLangChainProviderFactory(config map[string]interface{}, logger *logging.
}

// GenerateCompletion generates a completion using LangChainGo
func (p *LangChainProvider) GenerateCompletion(ctx context.Context, prompt string, options ProviderOptions) (string, error) {
func (p *LangChainProvider) GenerateCompletion(ctx context.Context, messages []llms.MessageContent, options ProviderOptions) (*llms.ContentChoice, error) {
if p.llm == nil {
return "", errors.NewLLMError("client_not_initialized", "LangChainGo client not initialized")
return nil, errors.NewLLMError("client_not_initialized", "LangChainGo client not initialized")
}

p.logger.DebugKV("Calling LangChainGo GenerateCompletion", "prompt_length", len(prompt))
callOptions := p.buildOptions(options)

completion, err := llms.GenerateFromSinglePrompt(ctx, p.llm, prompt, callOptions...)
resp, err := p.llm.GenerateContent(ctx, messages, callOptions...)
if err != nil {
p.logger.ErrorKV("LangChainGo GenerateCompletion request failed", "error", err)
return "", errors.WrapLLMError(err, "request_failed", "Failed to generate completion from LangChainGo")
return nil, errors.WrapLLMError(err, "request_failed", "Failed to generate completion from LangChainGo")
}
if len(resp.Choices) == 0 {
p.logger.ErrorKV("LangChainGo GenerateCompletion returned no choices")
return nil, errors.NewLLMError("no_choices", "No choices returned from LangChainGo completion request")
}
choice := resp.Choices[0]

p.logger.DebugKV("Received GenerateCompletion response", "length", len(completion))
return completion, nil
p.logger.DebugKV("Received GenerateCompletion response", "length", len(choice.Content))
return choice, nil
}

// GenerateChatCompletion generates a chat completion using LangChainGo
// Note: LangChainGo's basic llms.Model interface doesn't directly support chat messages.
// We simulate it by formatting messages into a single prompt.
func (p *LangChainProvider) GenerateChatCompletion(ctx context.Context, messages []RequestMessage, options ProviderOptions) (string, error) {
func (p *LangChainProvider) GenerateChatCompletion(ctx context.Context, messages []llms.MessageContent, options ProviderOptions) (*llms.ContentChoice, error) {
if p.llm == nil {
return "", errors.NewLLMError("client_not_initialized", "LangChainGo client not initialized")
return nil, errors.NewLLMError("client_not_initialized", "LangChainGo client not initialized")
}

p.logger.DebugKV("Calling LangChainGo GenerateChatCompletion", "num_messages", len(messages))

// Convert our message format to a single prompt string
var promptBuilder strings.Builder
for _, msg := range messages {
promptBuilder.WriteString(fmt.Sprintf("%s: %s\n", strings.ToUpper(msg.Role), msg.Content))
}
prompt := promptBuilder.String()
// Add one final assistant prefix to indicate where the response should go
// This might need adjustment depending on the specific model's fine-tuning
prompt += "ASSISTANT: "

// Call the underlying GenerateCompletion method with the formatted prompt
return p.GenerateCompletion(ctx, prompt, options)
return p.GenerateCompletion(ctx, messages, options)
}

// GetInfo returns information about the provider.
Expand Down Expand Up @@ -206,6 +201,13 @@ func (p *LangChainProvider) buildOptions(options ProviderOptions) []llms.CallOpt
p.logger.DebugKV("Adding MaxTokens option", "value", options.MaxTokens)
}

if len(options.Tools) > 0 {
// Convert tools to LangChainGo format if needed
// For now, we assume tools are compatible or handled by the underlying LLM
p.logger.DebugKV("Adding tools to call options", "count", len(options.Tools))
callOptions = append(callOptions, llms.WithTools(options.Tools))
}

// Note: options.TargetProvider is handled during factory creation, not here.

return callOptions
Expand Down
18 changes: 10 additions & 8 deletions internal/llm/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import (
"sync"

"github.com/mark3labs/mcp-go/mcp"
"github.com/tmc/langchaingo/llms"

"github.com/tuannvm/slack-mcp-client/internal/common/logging"
)

Expand Down Expand Up @@ -77,26 +79,26 @@ type ProviderInfo struct {

// RequestMessage represents a single message in a chat request
type RequestMessage struct {
Role string `json:"role"`
Content string `json:"content"`
llms.MessageContent
}

// ProviderOptions contains options for LLM requests
type ProviderOptions struct {
Model string // Model to use (specific model name, e.g., gpt-4o)
Temperature float64 // Temperature for response generation (0-1)
MaxTokens int // Maximum number of tokens to generate
TargetProvider string // For gateway providers: specifies the underlying provider (e.g., "openai", "ollama")
Model string // Model to use (specific model name, e.g., gpt-4o)
Temperature float64 // Temperature for response generation (0-1)
MaxTokens int // Maximum number of tokens to generate
TargetProvider string // For gateway providers: specifies the underlying provider (e.g., "openai", "ollama")
Tools []llms.Tool // Optional tools for the request (e.g., function calling)
}

// LLMProvider defines the interface for language model providers
type LLMProvider interface {
// GenerateCompletion generates a text completion (less common now, prefer chat)
// Deprecated: Prefer GenerateChatCompletion
GenerateCompletion(ctx context.Context, prompt string, options ProviderOptions) (string, error)
GenerateCompletion(ctx context.Context, messages []llms.MessageContent, options ProviderOptions) (*llms.ContentChoice, error)

// GenerateChatCompletion generates a chat completion using a message history
GenerateChatCompletion(ctx context.Context, messages []RequestMessage, options ProviderOptions) (string, error)
GenerateChatCompletion(ctx context.Context, messages []llms.MessageContent, options ProviderOptions) (*llms.ContentChoice, error)

// GetInfo returns information about the provider
GetInfo() ProviderInfo
Expand Down
12 changes: 7 additions & 5 deletions internal/llm/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
"fmt"
"sync"

"github.com/tmc/langchaingo/llms"

"github.com/tuannvm/slack-mcp-client/internal/common/logging"
"github.com/tuannvm/slack-mcp-client/internal/config" // Import config
)
Expand Down Expand Up @@ -185,24 +187,24 @@ func (r *ProviderRegistry) ListProviders() []ProviderInfo {

// GenerateCompletion generates a completion using the specified provider (or primary if empty).
// It checks for provider availability before making the call.
func (r *ProviderRegistry) GenerateCompletion(ctx context.Context, providerName string, prompt string, options ProviderOptions) (string, error) {
func (r *ProviderRegistry) GenerateCompletion(ctx context.Context, providerName string, messages []llms.MessageContent, options ProviderOptions) (*llms.ContentChoice, error) {
provider, err := r.GetProviderWithAvailabilityCheck(providerName) // Use the availability check method
if err != nil {
return "", err
return nil, err
}

info := provider.GetInfo()
r.logger.DebugKV("Using provider for completion", "name", info.Name)
// Note: GenerateCompletion is deprecated in the interface, but we keep the registry method for now.
return provider.GenerateCompletion(ctx, prompt, options)
return provider.GenerateCompletion(ctx, messages, options)
}

// GenerateChatCompletion generates a chat completion using the specified provider (or primary if empty).
// It checks for provider availability before making the call.
func (r *ProviderRegistry) GenerateChatCompletion(ctx context.Context, providerName string, messages []RequestMessage, options ProviderOptions) (string, error) {
func (r *ProviderRegistry) GenerateChatCompletion(ctx context.Context, providerName string, messages []llms.MessageContent, options ProviderOptions) (*llms.ContentChoice, error) {
provider, err := r.GetProviderWithAvailabilityCheck(providerName) // Use the availability check method
if err != nil {
return "", err
return nil, err
}

info := provider.GetInfo()
Expand Down
Loading
Loading