Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion genai-function-calling/openai-agents/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ RUN --mount=type=cache,target=/root/.cache/pip pip install -r /tmp/requirements.
RUN --mount=type=cache,target=/root/.cache/pip edot-bootstrap --action=install

COPY main.py /
COPY mcp_server.py /

CMD [ "opentelemetry-instrument", "python", "main.py" ]
ENTRYPOINT [ "opentelemetry-instrument", "python", "main.py" ]
17 changes: 17 additions & 0 deletions genai-function-calling/openai-agents/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,22 @@ Finally, run `main.py` (notice the prefix of `opentelemetry-instrument):
dotenv run --no-override -- opentelemetry-instrument python main.py
```

## Run with Model Context Protocol (MCP)

[mcp_server](mcp_server.py) includes code needed to decouple tool discovery and invocation
via the [Model Context Protocol (MCP) flow][flow-mcp]. To run using MCP, append
`-- --mcp` flag to `dotenv run` or `docker compose run` command.

For example, to run with Docker:
```bash
docker compose run --build --rm genai-function-calling --mcp
```

Or to run with Python:
```bash
dotenv run --no-override -- opentelemetry-instrument python main.py --mcp
```

## Tests

Tests use [pytest-vcr][pytest-vcr] to capture HTTP traffic for offline unit
Expand Down Expand Up @@ -88,3 +104,4 @@ OpenAI Agents SDK's OpenTelemetry instrumentation is via
[pytest-vcr]: https://pytest-vcr.readthedocs.io/
[test_main.yaml]: cassettes/test_main.yaml
[openinference]: https://github.com/Arize-ai/openinference/tree/main/python/instrumentation/openinference-instrumentation-openai-agents
[flow-mcp]: ../README.md#model-context-protocol-flow
31 changes: 28 additions & 3 deletions genai-function-calling/openai-agents/main.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import argparse
import asyncio
import os

Expand All @@ -19,7 +20,6 @@
GLOBAL_TRACE_PROVIDER.shutdown()


@function_tool(strict_mode=False)
async def get_latest_elasticsearch_version(major_version: int = 0) -> str:
"""Returns the latest GA version of Elasticsearch in "X.Y.Z" format.

Expand Down Expand Up @@ -49,15 +49,15 @@ async def get_latest_elasticsearch_version(major_version: int = 0) -> str:
return max(versions, key=lambda v: tuple(map(int, v.split("."))))


async def main():
async def run_agent(**agent_kwargs: dict):
model_name = os.getenv("CHAT_MODEL", "gpt-4o-mini")
openai_client = AsyncAzureOpenAI() if os.getenv("AZURE_OPENAI_API_KEY") else None
model = OpenAIProvider(openai_client=openai_client, use_responses=False).get_model(model_name)
agent = Agent(
name="version_assistant",
tools=[get_latest_elasticsearch_version],
model=model,
model_settings=ModelSettings(temperature=0),
**agent_kwargs,
)

result = await Runner.run(
Expand All @@ -68,5 +68,30 @@ async def main():
print(result.final_output)


async def main():
parser = argparse.ArgumentParser(
prog="genai-function-calling",
description="Fetches the latest version of Elasticsearch 8",
)
parser.add_argument(
"--mcp",
action="store_true",
help="Run tools via a MCP server instead of directly",
)
args, _ = parser.parse_known_args()

if args.mcp:
from agents.mcp import MCPServerSse
from mcp_server import mcp_server

async with (
mcp_server([get_latest_elasticsearch_version]),
MCPServerSse(params={"url": "http://localhost:8000/sse"}) as mcp_client,
):
await run_agent(mcp_servers=[mcp_client])
else:
await run_agent(tools=[function_tool(strict_mode=False)(get_latest_elasticsearch_version)])


if __name__ == "__main__":
asyncio.run(main())
35 changes: 35 additions & 0 deletions genai-function-calling/openai-agents/mcp_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import asyncio
import contextlib
import httpx
from mcp.server.fastmcp import FastMCP
import uvicorn


@contextlib.asynccontextmanager
async def mcp_server(tools):
mcp_server = FastMCP(log_level="WARNING")
for tool in tools:
mcp_server.add_tool(tool)
# Manually setup uvicorn to allow shutting it down
config = uvicorn.Config(
mcp_server.sse_app(),
host="localhost",
port=8000,
log_level="critical", # To suppress an SSE background task cancellation ERROR
timeout_graceful_shutdown=1,
)
server = uvicorn.Server(config)
server_task = asyncio.create_task(server.serve())
# Wait for the server to start
async with httpx.AsyncClient() as client:
while True:
try:
await client.get("http://localhost:8000/")
break
except httpx.ConnectError:
pass
try:
yield
finally:
server.should_exit = True
await server_task
3 changes: 2 additions & 1 deletion genai-function-calling/openai-agents/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
openai-agents~=0.0.8
openai-agents~=0.0.9
httpx~=0.28.1
mcp~=1.6.0

elastic-opentelemetry~=1.0.0
# Use openai-agents instrumentation from OpenInference
Expand Down