-
Notifications
You must be signed in to change notification settings - Fork 165
Fix chat name generation #1719
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix chat name generation #1719
Changes from 5 commits
3d96feb
1e49029
b1c56e1
f8a8fc5
255c757
7485ab3
692c33f
c85698c
2955cf3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,9 +1,13 @@ | ||
| You are an AI assistant on the OpenOps platform, where users interact about FinOps, cloud providers (AWS, Azure, GCP), OpenOps features, and workflow automation. | ||
|
|
||
| Your task: | ||
| Given the following conversation, suggest a short, descriptive name (max five words) that best summarizes the main topic, question, or action discussed in this chat. | ||
| Task: | ||
| Analyze the provided conversation and attempt to produce a concise chat name describing the main topic, question, or action. | ||
|
|
||
| Guidelines: | ||
| - The name should be specific (not generic like "Chat" or "Conversation"), and reflect the user's intent (e.g., "AWS Cost Optimization", "Create Budget Workflow", "OpenOps Integration Help"). | ||
| - Limit the name to five words or less. | ||
| - Respond with only the chat name. | ||
| Rules: | ||
| - If you can confidently produce a specific, helpful name (not generic like "Chat" or "Conversation"), set `isGenerated` to true and provide `name`. | ||
| - The `name` must be five words or fewer | ||
| - If there is insufficient information, the content is unclear, or you cannot determine a good name, set `isGenerated` to false. | ||
|
|
||
| Notes: | ||
| - Keep the name short and specific. | ||
| - Avoid quotes, punctuation-heavy outputs, or trailing spaces in the name. |
| Original file line number | Diff line number | Diff line change | ||
|---|---|---|---|---|
|
|
@@ -2,6 +2,7 @@ import { | |||
| AiAuth, | ||||
| getAiModelFromConnection, | ||||
| getAiProviderLanguageModel, | ||||
| isLLMTelemetryEnabled, | ||||
| } from '@openops/common'; | ||||
| import { | ||||
| AppSystemProp, | ||||
|
|
@@ -14,14 +15,19 @@ import { | |||
| ApplicationError, | ||||
| CustomAuthConnectionValue, | ||||
| ErrorCode, | ||||
| GeneratedChatName, | ||||
| removeConnectionBrackets, | ||||
| } from '@openops/shared'; | ||||
| import { LanguageModel, ModelMessage, UIMessage, generateText } from 'ai'; | ||||
| import { generateObject, LanguageModel, ModelMessage, UIMessage } from 'ai'; | ||||
| import { z } from 'zod'; | ||||
| import { appConnectionService } from '../../app-connection/app-connection-service/app-connection-service'; | ||||
| import { aiConfigService } from '../config/ai-config.service'; | ||||
| import { loadPrompt } from './prompts.service'; | ||||
| import { Conversation } from './types'; | ||||
| import { mergeToolResultsIntoMessages } from './utils'; | ||||
| import { | ||||
| mergeToolResultsIntoMessages, | ||||
| sanitizeMessagesForChatName, | ||||
| } from './utils'; | ||||
|
|
||||
| const chatContextKey = ( | ||||
| chatId: string, | ||||
|
|
@@ -84,28 +90,39 @@ export const generateChatIdForMCP = (params: { | |||
| }); | ||||
| }; | ||||
|
|
||||
| const generatedChatNameSchema = z.object({ | ||||
| name: z | ||||
| .string() | ||||
| .max(100) | ||||
| .nullable() | ||||
| .describe('Conversation name or null if it was not generated'), | ||||
| isGenerated: z.boolean().describe('Whether the name was generated or not'), | ||||
| }); | ||||
|
|
||||
| export async function generateChatName( | ||||
| messages: ModelMessage[], | ||||
| projectId: string, | ||||
| ): Promise<string> { | ||||
| const { languageModel } = await getLLMConfig(projectId); | ||||
| ): Promise<GeneratedChatName> { | ||||
| const { languageModel, aiConfig } = await getLLMConfig(projectId); | ||||
| const systemPrompt = await loadPrompt('chat-name.txt'); | ||||
| if (!systemPrompt.trim()) { | ||||
| throw new Error('Failed to load prompt to generate the chat name.'); | ||||
| } | ||||
| const prompt: ModelMessage[] = [ | ||||
| { | ||||
| role: 'system', | ||||
| content: systemPrompt, | ||||
| } as const, | ||||
| ...messages, | ||||
| ]; | ||||
| const response = await generateText({ | ||||
|
|
||||
| const sanitizedMessages: ModelMessage[] = | ||||
| sanitizeMessagesForChatName(messages); | ||||
|
|
||||
| const result = await generateObject({ | ||||
|
||||
| experimental_repairText: async ({ text }) => repairText(text), |
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Also, you could wrap in try catch and return {isGenerated: false} in case there is an error and log the error
coderabbitai[bot] marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,7 +22,7 @@ import { | |
| UpdateChatModelRequest, | ||
| UpdateChatModelResponse, | ||
| } from '@openops/shared'; | ||
| import { ModelMessage, UserModelMessage } from 'ai'; | ||
| import { ModelMessage } from 'ai'; | ||
| import { FastifyReply } from 'fastify'; | ||
| import { StatusCodes } from 'http-status-codes'; | ||
| import { | ||
|
|
@@ -247,27 +247,24 @@ export const aiMCPChatController: FastifyPluginAsyncTypebox = async (app) => { | |
| const { chatHistory } = await getConversation(chatId, userId, projectId); | ||
|
|
||
| if (chatHistory.length === 0) { | ||
| return await reply.code(200).send({ chatName: DEFAULT_CHAT_NAME }); | ||
| return await reply | ||
| .code(200) | ||
| .send({ name: DEFAULT_CHAT_NAME, isGenerated: false }); | ||
| } | ||
|
|
||
| const userMessages = chatHistory.filter( | ||
| (msg): msg is UserModelMessage => | ||
| msg && | ||
| typeof msg === 'object' && | ||
| 'role' in msg && | ||
| msg.role === 'user', | ||
| ); | ||
| const generated = await generateChatName(chatHistory, projectId); | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The filtering is removed, having only user messages for generation is not enough |
||
|
|
||
| if (userMessages.length === 0) { | ||
| return await reply.code(200).send({ chatName: DEFAULT_CHAT_NAME }); | ||
| if (!generated.isGenerated) { | ||
| return await reply | ||
| .code(200) | ||
| .send({ name: DEFAULT_CHAT_NAME, isGenerated: false }); | ||
| } | ||
|
|
||
| const rawChatName = await generateChatName(userMessages, projectId); | ||
| const chatName = rawChatName.trim() || DEFAULT_CHAT_NAME; | ||
|
|
||
| await updateChatName(chatId, userId, projectId, chatName); | ||
| if (generated.isGenerated && generated.name) { | ||
| await updateChatName(chatId, userId, projectId, generated.name); | ||
| } | ||
|
|
||
| return await reply.code(200).send({ chatName }); | ||
| return await reply.code(200).send(generated); | ||
| } catch (error) { | ||
| return handleError(error, reply, 'generate chat name'); | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@rSnapkoOpenOps why do you need isGenerated?
you can deduce that from checking if (name). The less logic the LLM needs to do, the better.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I've tried that, but ai still adds dummy names