diff --git a/genai-function-calling/openai-agents/env.example b/genai-function-calling/openai-agents/env.example index b2f272d..7284428 100644 --- a/genai-function-calling/openai-agents/env.example +++ b/genai-function-calling/openai-agents/env.example @@ -4,16 +4,12 @@ OPENAI_API_KEY= # Uncomment to use Ollama instead of OpenAI # OPENAI_BASE_URL=http://localhost:11434/v1 # OPENAI_API_KEY=unused -# # This works when you supply a major_version parameter in your prompt. If you -# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call. -# CHAT_MODEL=qwen2.5:0.5b +# CHAT_MODEL=qwen3:0.6b # Uncomment to use RamaLama instead of OpenAI # OPENAI_BASE_URL=http://localhost:8080/v1 # OPENAI_API_KEY=unused -# # This works when you supply a major_version parameter in your prompt. If you -# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call. -# CHAT_MODEL=qwen2.5:0.5b +# CHAT_MODEL=qwen3:0.6b # Uncomment and complete if you want to use Azure OpenAI Service ## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview diff --git a/genai-function-calling/semantic-kernel-dotnet/env.example b/genai-function-calling/semantic-kernel-dotnet/env.example index 0c6fae8..a1c4f48 100644 --- a/genai-function-calling/semantic-kernel-dotnet/env.example +++ b/genai-function-calling/semantic-kernel-dotnet/env.example @@ -4,16 +4,12 @@ OPENAI_API_KEY= # Uncomment to use Ollama instead of OpenAI # OPENAI_BASE_URL=http://localhost:11434/v1 # OPENAI_API_KEY=unused -# # This works when you supply a major_version parameter in your prompt. If you -# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call. -# CHAT_MODEL=qwen2.5:0.5b +# CHAT_MODEL=qwen3:0.6b # Uncomment to use RamaLama instead of OpenAI # OPENAI_BASE_URL=http://localhost:8080/v1 # OPENAI_API_KEY=unused -# # This works when you supply a major_version parameter in your prompt. If you -# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call. -# CHAT_MODEL=qwen2.5:0.5b +# CHAT_MODEL=qwen3:0.6b # Uncomment and complete if you want to use Azure OpenAI Service ## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview diff --git a/genai-function-calling/spring-ai/env.example b/genai-function-calling/spring-ai/env.example index f0ce5dc..3eab7cd 100644 --- a/genai-function-calling/spring-ai/env.example +++ b/genai-function-calling/spring-ai/env.example @@ -5,15 +5,15 @@ OPENAI_API_KEY= # OPENAI_BASE_URL=http://localhost:11434/v1 # OPENAI_API_KEY=unused # # This works when you supply a major_version parameter in your prompt. If you -# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call. -# CHAT_MODEL=qwen2.5:0.5b +# # leave it out, you need to update this to qwen3:1.7b to proceed the tool call. +# CHAT_MODEL=qwen3:0.6b # Uncomment to use RamaLama instead of OpenAI # OPENAI_BASE_URL=http://localhost:8080/v1 # OPENAI_API_KEY=unused # # This works when you supply a major_version parameter in your prompt. If you -# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call. -# CHAT_MODEL=qwen2.5:0.5b +# # leave it out, you need to update this to qwen3:1.7b to proceed the tool call. +# CHAT_MODEL=qwen3:0.6b # Uncomment and complete if you want to use Azure OpenAI Service ## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview diff --git a/genai-function-calling/vercel-ai/Dockerfile b/genai-function-calling/vercel-ai/Dockerfile index 4d0c74e..d6de894 100644 --- a/genai-function-calling/vercel-ai/Dockerfile +++ b/genai-function-calling/vercel-ai/Dockerfile @@ -1,7 +1,11 @@ FROM node:22-alpine WORKDIR /app -COPY package.json *.js /app/ + +COPY package.json /app/ + RUN touch .env && npm install +COPY *.js /app/ + ENTRYPOINT ["npm", "start"] diff --git a/genai-function-calling/vercel-ai/env.example b/genai-function-calling/vercel-ai/env.example index 3ea4e36..95b029b 100644 --- a/genai-function-calling/vercel-ai/env.example +++ b/genai-function-calling/vercel-ai/env.example @@ -4,14 +4,12 @@ OPENAI_API_KEY= # Uncomment to use Ollama instead of OpenAI # OPENAI_BASE_URL=http://localhost:11434/v1 # OPENAI_API_KEY=unused -# # This needs qwen2.5:3b, as qwen2.5:0.5b doesn't process the tool call -# CHAT_MODEL=qwen2.5:3b +# CHAT_MODEL=qwen3:0.6b # Uncomment to use RamaLama instead of OpenAI # OPENAI_BASE_URL=http://localhost:8080/v1 # OPENAI_API_KEY=unused -# # This needs qwen2.5:3b, as qwen2.5:0.5b doesn't process the tool call -# CHAT_MODEL=qwen2.5:3b +# CHAT_MODEL=qwen3:0.6b # Uncomment and complete if you want to use Azure OpenAI Service ## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview diff --git a/genai-function-calling/vercel-ai/package.json b/genai-function-calling/vercel-ai/package.json index 04ee921..468d616 100644 --- a/genai-function-calling/vercel-ai/package.json +++ b/genai-function-calling/vercel-ai/package.json @@ -10,7 +10,7 @@ "start": "node --env-file .env --import @elastic/opentelemetry-node --import ./telemetry.js index.js" }, "dependencies": { - "ai": "^4.3.10", + "ai": "^4.3.11", "@ai-sdk/azure": "^1.3.21", "@ai-sdk/openai": "^1.3.20", "@modelcontextprotocol/sdk": "^1.10.2",