Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions genai-function-calling/openai-agents/env.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,12 @@ OPENAI_API_KEY=
# Uncomment to use Ollama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:11434/v1
# OPENAI_API_KEY=unused
# # This works when you supply a major_version parameter in your prompt. If you
# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call.
# CHAT_MODEL=qwen2.5:0.5b
# CHAT_MODEL=qwen3:0.6b

# Uncomment to use RamaLama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:8080/v1
# OPENAI_API_KEY=unused
# # This works when you supply a major_version parameter in your prompt. If you
# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call.
# CHAT_MODEL=qwen2.5:0.5b
# CHAT_MODEL=qwen3:0.6b

# Uncomment and complete if you want to use Azure OpenAI Service
## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview
Expand Down
8 changes: 2 additions & 6 deletions genai-function-calling/semantic-kernel-dotnet/env.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,12 @@ OPENAI_API_KEY=
# Uncomment to use Ollama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:11434/v1
# OPENAI_API_KEY=unused
# # This works when you supply a major_version parameter in your prompt. If you
# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call.
# CHAT_MODEL=qwen2.5:0.5b
# CHAT_MODEL=qwen3:0.6b

# Uncomment to use RamaLama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:8080/v1
# OPENAI_API_KEY=unused
# # This works when you supply a major_version parameter in your prompt. If you
# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call.
# CHAT_MODEL=qwen2.5:0.5b
# CHAT_MODEL=qwen3:0.6b

# Uncomment and complete if you want to use Azure OpenAI Service
## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview
Expand Down
8 changes: 4 additions & 4 deletions genai-function-calling/spring-ai/env.example
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@ OPENAI_API_KEY=
# OPENAI_BASE_URL=http://localhost:11434/v1
# OPENAI_API_KEY=unused
# # This works when you supply a major_version parameter in your prompt. If you
# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call.
# CHAT_MODEL=qwen2.5:0.5b
# # leave it out, you need to update this to qwen3:1.7b to proceed the tool call.
# CHAT_MODEL=qwen3:0.6b

# Uncomment to use RamaLama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:8080/v1
# OPENAI_API_KEY=unused
# # This works when you supply a major_version parameter in your prompt. If you
# # leave it out, you need to update this to qwen2.5:3b to proceed the tool call.
# CHAT_MODEL=qwen2.5:0.5b
# # leave it out, you need to update this to qwen3:1.7b to proceed the tool call.
# CHAT_MODEL=qwen3:0.6b

# Uncomment and complete if you want to use Azure OpenAI Service
## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview
Expand Down
6 changes: 5 additions & 1 deletion genai-function-calling/vercel-ai/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
FROM node:22-alpine

WORKDIR /app
COPY package.json *.js /app/

COPY package.json /app/

RUN touch .env && npm install

COPY *.js /app/

ENTRYPOINT ["npm", "start"]
6 changes: 2 additions & 4 deletions genai-function-calling/vercel-ai/env.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,12 @@ OPENAI_API_KEY=
# Uncomment to use Ollama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:11434/v1
# OPENAI_API_KEY=unused
# # This needs qwen2.5:3b, as qwen2.5:0.5b doesn't process the tool call
# CHAT_MODEL=qwen2.5:3b
# CHAT_MODEL=qwen3:0.6b

# Uncomment to use RamaLama instead of OpenAI
# OPENAI_BASE_URL=http://localhost:8080/v1
# OPENAI_API_KEY=unused
# # This needs qwen2.5:3b, as qwen2.5:0.5b doesn't process the tool call
# CHAT_MODEL=qwen2.5:3b
# CHAT_MODEL=qwen3:0.6b

# Uncomment and complete if you want to use Azure OpenAI Service
## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview
Expand Down
2 changes: 1 addition & 1 deletion genai-function-calling/vercel-ai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"start": "node --env-file .env --import @elastic/opentelemetry-node --import ./telemetry.js index.js"
},
"dependencies": {
"ai": "^4.3.10",
"ai": "^4.3.11",
"@ai-sdk/azure": "^1.3.21",
"@ai-sdk/openai": "^1.3.20",
"@modelcontextprotocol/sdk": "^1.10.2",
Expand Down