@@ -725,6 +725,9 @@ The user is waiting for you to explain what the tools discovered. Provide a dire
725725 // Build user context from current conversation
726726 const userContext = this . buildUserContext ( ) ;
727727
728+ // Store original arguments for comparison
729+ const originalArguments = { ...processedArguments } ;
730+
728731 // Use AI to intelligently prepare arguments
729732 processedArguments = await this . enhanceArgumentsWithAI (
730733 toolName ,
@@ -737,6 +740,13 @@ The user is waiting for you to explain what the tools discovered. Provide a dire
737740 original : JSON . parse ( tc . function . arguments ) ,
738741 enhanced : processedArguments ,
739742 } ) ;
743+
744+ // Mark which fields were enhanced by LLM for UI display
745+ processedArguments . _llmEnhanced = {
746+ enhanced : true ,
747+ originalArgs : originalArguments ,
748+ enhancedFields : this . identifyEnhancedFields ( originalArguments , processedArguments ) ,
749+ } ;
740750 }
741751 } catch ( error ) {
742752 console . warn ( `Failed to enhance arguments for ${ toolName } :` , error ) ;
@@ -1568,7 +1578,7 @@ Format your response to make the errors prominent and actionable.`,
15681578 * Enhance arguments using AI-like intelligence
15691579 */
15701580 private async enhanceArgumentsWithAI (
1571- _toolName : string ,
1581+ toolName : string ,
15721582 toolSchema : any ,
15731583 userContext : UserContext ,
15741584 originalArgs : Record < string , any >
@@ -1579,24 +1589,203 @@ Format your response to make the errors prominent and actionable.`,
15791589 return enhanced ;
15801590 }
15811591
1582- const properties = toolSchema . inputSchema . properties ;
1583- const required = toolSchema . inputSchema . required || [ ] ;
1592+ try {
1593+ // Use LLM to intelligently prepare arguments based on user context and tool schema
1594+ const llmEnhancedArgs = await this . prepareLLMArguments (
1595+ toolName ,
1596+ toolSchema ,
1597+ userContext ,
1598+ originalArgs
1599+ ) ;
1600+
1601+ // Merge LLM suggestions with original arguments, preferring LLM suggestions
1602+ Object . assign ( enhanced , llmEnhancedArgs ) ;
1603+ } catch ( error ) {
1604+ console . warn ( `Failed to get LLM enhancement for ${ toolName } :` , error ) ;
1605+ // Fall back to basic enhancement
1606+ const properties = toolSchema . inputSchema . properties ;
1607+ const required = toolSchema . inputSchema . required || [ ] ;
1608+
1609+ // Fill in required fields that are missing or empty
1610+ for ( const [ fieldName , fieldSchema ] of Object . entries ( properties ) ) {
1611+ const isRequired = required . includes ( fieldName ) ;
1612+ const currentValue = enhanced [ fieldName ] ;
1613+
1614+ if (
1615+ isRequired &&
1616+ ( currentValue === undefined || currentValue === null || currentValue === '' )
1617+ ) {
1618+ // Provide intelligent defaults based on field type and context
1619+ enhanced [ fieldName ] = this . getIntelligentDefault ( fieldName , fieldSchema , userContext ) ;
1620+ }
1621+ }
1622+ }
1623+
1624+ return enhanced ;
1625+ }
1626+
1627+ /**
1628+ * Use LLM to prepare intelligent arguments based on user request and tool schema
1629+ */
1630+ private async prepareLLMArguments (
1631+ toolName : string ,
1632+ toolSchema : any ,
1633+ userContext : UserContext ,
1634+ originalArgs : Record < string , any >
1635+ ) : Promise < Record < string , any > > {
1636+ // Build prompt for argument preparation
1637+ const argumentPreparationPrompt = this . createArgumentPreparationPrompt (
1638+ toolName ,
1639+ toolSchema ,
1640+ userContext ,
1641+ originalArgs
1642+ ) ;
1643+
1644+ try {
1645+ // Use the existing model instance but without tools to avoid recursive tool calls
1646+ const response = await this . model . invoke ( [
1647+ { role : 'system' , content : argumentPreparationPrompt . system } ,
1648+ { role : 'user' , content : argumentPreparationPrompt . user } ,
1649+ ] ) ;
1650+
1651+ // Parse the LLM response to extract arguments
1652+ const responseText = this . extractTextContent ( response . content ) ;
1653+ const parsedArgs = this . parseArgumentsFromLLMResponse ( responseText ) ;
1654+
1655+ return parsedArgs ;
1656+ } catch ( error ) {
1657+ console . warn ( 'Failed to prepare arguments with LLM:' , error ) ;
1658+ return { } ;
1659+ }
1660+ }
1661+
1662+ /**
1663+ * Create a prompt for the LLM to prepare tool arguments
1664+ */
1665+ private createArgumentPreparationPrompt (
1666+ toolName : string ,
1667+ toolSchema : any ,
1668+ userContext : UserContext ,
1669+ originalArgs : Record < string , any >
1670+ ) : { system : string ; user : string } {
1671+ const properties = toolSchema . inputSchema ?. properties || { } ;
1672+ const required = toolSchema . inputSchema ?. required || [ ] ;
1673+
1674+ // Create a description of the tool schema
1675+ const schemaDescription = Object . entries ( properties )
1676+ . map ( ( [ fieldName , fieldSchema ] : [ string , any ] ) => {
1677+ const isReq = required . includes ( fieldName ) ? ' (REQUIRED)' : ' (optional)' ;
1678+ const type = fieldSchema . type || 'any' ;
1679+ const desc = fieldSchema . description || 'No description' ;
1680+
1681+ // Handle nested properties for complex objects
1682+ let nestedProps = '' ;
1683+ if ( fieldSchema . properties ) {
1684+ nestedProps =
1685+ '\n Nested properties:\n' +
1686+ Object . entries ( fieldSchema . properties )
1687+ . map (
1688+ ( [ nestedName , nestedSchema ] : [ string , any ] ) =>
1689+ ` - ${ nestedName } (${ nestedSchema . type || 'any' } ): ${
1690+ nestedSchema . description || 'No description'
1691+ } `
1692+ )
1693+ . join ( '\n' ) ;
1694+ }
1695+
1696+ return `- ${ fieldName } ${ isReq } (${ type } ): ${ desc } ${ nestedProps } ` ;
1697+ } )
1698+ . join ( '\n' ) ;
1699+
1700+ const system = `You are an expert at preparing tool arguments based on user requests. Your task is to analyze the user's request and generate appropriate arguments for the "${ toolName } " tool.
1701+
1702+ TOOL SCHEMA:
1703+ ${ schemaDescription }
1704+
1705+ INSTRUCTIONS:
1706+ 1. Analyze the user's request to understand their intent
1707+ 2. Map their natural language request to the appropriate tool arguments
1708+ 3. For complex objects (like params), fill in the nested properties based on the user's requirements
1709+ 4. Use the conversation context to infer missing details
1710+ 5. Return ONLY a valid JSON object with the tool arguments
1711+ 6. If a required field cannot be determined from the user's request, provide a sensible default
1712+
1713+ RESPONSE FORMAT:
1714+ Return only valid JSON with the tool arguments. No explanations, no markdown, just the JSON.` ;
1715+
1716+ const conversationContext =
1717+ userContext . conversationHistory
1718+ ?. slice ( - 5 )
1719+ . map ( msg => `${ msg . role } : ${ msg . content } ` )
1720+ . join ( '\n' ) || '' ;
1721+
1722+ const user = `USER REQUEST: "${ userContext . userMessage } "
1723+
1724+ CONVERSATION CONTEXT:
1725+ ${ conversationContext }
1726+
1727+ CURRENT ARGUMENTS: ${ JSON . stringify ( originalArgs , null , 2 ) }
1728+
1729+ Based on the user's request and the tool schema above, generate the appropriate arguments for the "${ toolName } " tool. Focus on mapping the user's intent to the correct parameter values.
15841730
1585- // Fill in required fields that are missing or empty
1586- for ( const [ fieldName , fieldSchema ] of Object . entries ( properties ) ) {
1587- const isRequired = required . includes ( fieldName ) ;
1588- const currentValue = enhanced [ fieldName ] ;
1731+ For example, if the user says "get me info only from gadget namespace", the params object should include:
1732+ {"operator.KubeManager.namespace": "gadget"}
15891733
1734+ Return the complete arguments object:` ;
1735+
1736+ return { system, user } ;
1737+ }
1738+
1739+ /**
1740+ * Parse arguments from LLM response
1741+ */
1742+ private parseArgumentsFromLLMResponse ( response : string ) : Record < string , any > {
1743+ try {
1744+ // Try to extract JSON from the response
1745+ const jsonMatch = response . match ( / \{ [ \s \S ] * \} / ) ;
1746+ if ( jsonMatch ) {
1747+ return JSON . parse ( jsonMatch [ 0 ] ) ;
1748+ }
1749+
1750+ // If no JSON found, try to parse the entire response
1751+ return JSON . parse ( response . trim ( ) ) ;
1752+ } catch ( error ) {
1753+ console . warn ( 'Failed to parse LLM response for arguments:' , error , response ) ;
1754+ return { } ;
1755+ }
1756+ }
1757+
1758+ /**
1759+ * Identify which fields were enhanced by comparing original and enhanced arguments
1760+ */
1761+ private identifyEnhancedFields (
1762+ original : Record < string , any > ,
1763+ enhanced : Record < string , any >
1764+ ) : string [ ] {
1765+ const enhancedFields : string [ ] = [ ] ;
1766+
1767+ // Compare each field to see what was added or modified
1768+ for ( const [ key , enhancedValue ] of Object . entries ( enhanced ) ) {
1769+ if ( key === '_llmEnhanced' ) continue ; // Skip metadata
1770+
1771+ const originalValue = original [ key ] ;
1772+
1773+ // Field is enhanced if:
1774+ // 1. It didn't exist in original
1775+ // 2. It was null/undefined/empty in original but has value now
1776+ // 3. The value is different
15901777 if (
1591- isRequired &&
1592- ( currentValue === undefined || currentValue === null || currentValue === '' )
1778+ ! ( key in original ) ||
1779+ originalValue === null ||
1780+ originalValue === undefined ||
1781+ originalValue === '' ||
1782+ JSON . stringify ( originalValue ) !== JSON . stringify ( enhancedValue )
15931783 ) {
1594- // Provide intelligent defaults based on field type and context
1595- enhanced [ fieldName ] = this . getIntelligentDefault ( fieldName , fieldSchema , userContext ) ;
1784+ enhancedFields . push ( key ) ;
15961785 }
15971786 }
15981787
1599- return enhanced ;
1788+ return enhancedFields ;
16001789 }
16011790
16021791 /**
0 commit comments