diff --git a/genai-function-calling/vercel-ai/README.md b/genai-function-calling/vercel-ai/README.md index 0f180f6..1e8be07 100644 --- a/genai-function-calling/vercel-ai/README.md +++ b/genai-function-calling/vercel-ai/README.md @@ -48,7 +48,7 @@ npm run start -- --mcp ## Notes The LLM should generate something like "The latest stable version of -Elasticsearch is 8.18.0," unless it hallucinates. Run it again, if you see +Elasticsearch is 8.19.3," unless it hallucinates. Run it again, if you see something else. Vercel AI's OpenTelemetry instrumentation only produces traces (not logs or diff --git a/genai-function-calling/vercel-ai/index.js b/genai-function-calling/vercel-ai/index.js index 6a9a317..13e4d5e 100644 --- a/genai-function-calling/vercel-ai/index.js +++ b/genai-function-calling/vercel-ai/index.js @@ -17,7 +17,7 @@ const model = process.env.CHAT_MODEL || 'gpt-4o-mini'; const getLatestElasticsearchVersion = tool({ description: 'Get the latest version of Elasticsearch', - parameters: z.object({ + inputSchema: z.object({ majorVersion: z.number().optional().describe('Major version to filter by (e.g. 7, 8). Defaults to latest'), }), execute: async ({majorVersion}) => { @@ -55,7 +55,7 @@ async function runAgent(tools) { // If using reasoning models, remove the format rewards from output. Non-reasoning models will not have // them making it effectively a no-op. model: wrapLanguageModel({ - model: openai(model), + model: openai.chat(model), middleware: [extractReasoningMiddleware({ tagName: 'think' })], }), messages: [{role: 'user', content: "What is the latest version of Elasticsearch 8?"}], diff --git a/genai-function-calling/vercel-ai/mcp.js b/genai-function-calling/vercel-ai/mcp.js index e09f8d0..3654c10 100644 --- a/genai-function-calling/vercel-ai/mcp.js +++ b/genai-function-calling/vercel-ai/mcp.js @@ -34,7 +34,7 @@ async function mcpServerMain(tools) { server.tool( toolName, tool.description, - tool.parameters.shape, + tool.inputSchema.shape, async (params) => { try { const result = await tool.execute(params); diff --git a/genai-function-calling/vercel-ai/package.json b/genai-function-calling/vercel-ai/package.json index 468d616..1d9f60d 100644 --- a/genai-function-calling/vercel-ai/package.json +++ b/genai-function-calling/vercel-ai/package.json @@ -10,11 +10,11 @@ "start": "node --env-file .env --import @elastic/opentelemetry-node --import ./telemetry.js index.js" }, "dependencies": { - "ai": "^4.3.11", - "@ai-sdk/azure": "^1.3.21", - "@ai-sdk/openai": "^1.3.20", - "@modelcontextprotocol/sdk": "^1.10.2", - "@elastic/opentelemetry-node": "^1", - "@arizeai/openinference-instrumentation-mcp": "^0.2.0" + "ai": "^5.0.35", + "@ai-sdk/azure": "^2.0.25", + "@ai-sdk/openai": "^2.0.25", + "@modelcontextprotocol/sdk": "^1.17.5", + "@elastic/opentelemetry-node": "^1.2.0", + "@arizeai/openinference-instrumentation-mcp": "^0.2.4" } }