Skip to content
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class FastMCPInstrumentor:

def __init__(self):
self._tracer = None
self._server_name = None

def instrument(self, tracer: Tracer):
"""Apply FastMCP-specific instrumentation."""
Expand All @@ -30,16 +31,39 @@ def instrument(self, tracer: Tracer):
"fastmcp.tools.tool_manager",
)

# Instrument FastMCP __init__ to capture server name
register_post_import_hook(
lambda _: wrap_function_wrapper(
"fastmcp", "FastMCP.__init__", self._fastmcp_init_wrapper()
),
"fastmcp",
)

def uninstrument(self):
"""Remove FastMCP-specific instrumentation."""
# Note: wrapt doesn't provide a clean way to unwrap post-import hooks
# This is a limitation we'll need to document
pass

def _fastmcp_init_wrapper(self):
"""Create wrapper for FastMCP initialization to capture server name."""
@dont_throw
def traced_method(wrapped, _instance, args, kwargs):
# Call the original __init__ first
result = wrapped(*args, **kwargs)

if args and len(args) > 0:
self._server_name = f"{args[0]}.mcp"
elif 'name' in kwargs:
self._server_name = f"{kwargs['name']}.mcp"

return result
return traced_method

def _fastmcp_tool_wrapper(self):
"""Create wrapper for FastMCP tool execution."""
@dont_throw
async def traced_method(wrapped, instance, args, kwargs):
async def traced_method(wrapped, _instance, args, kwargs):
if not self._tracer:
return await wrapped(*args, **kwargs)

Expand All @@ -62,12 +86,16 @@ async def traced_method(wrapped, instance, args, kwargs):
with self._tracer.start_as_current_span("mcp.server") as mcp_span:
mcp_span.set_attribute(SpanAttributes.TRACELOOP_SPAN_KIND, "server")
mcp_span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_NAME, "mcp.server")
if self._server_name:
mcp_span.set_attribute(SpanAttributes.TRACELOOP_WORKFLOW_NAME, self._server_name)

# Create nested tool span
span_name = f"{entity_name}.tool"
with self._tracer.start_as_current_span(span_name) as tool_span:
tool_span.set_attribute(SpanAttributes.TRACELOOP_SPAN_KIND, TraceloopSpanKindValues.TOOL.value)
tool_span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_NAME, entity_name)
if self._server_name:
tool_span.set_attribute(SpanAttributes.TRACELOOP_WORKFLOW_NAME, self._server_name)

if self._should_send_prompts():
try:
Expand Down
26 changes: 26 additions & 0 deletions packages/opentelemetry-instrumentation-mcp/tests/test_fastmcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,3 +121,29 @@ def get_greeting() -> str:
assert len(request_writer_spans) == 0, (
f"RequestStreamWriter spans should be removed, found {len(request_writer_spans)}"
)

# Verify TRACELOOP_WORKFLOW_NAME is set correctly on server spans
mcp_server_spans = [span for span in spans if span.name == 'mcp.server']
assert len(mcp_server_spans) >= 1, (
f"Expected at least 1 mcp.server span, found {len(mcp_server_spans)}"
)

for server_span in mcp_server_spans:
workflow_name = server_span.attributes.get('traceloop.workflow.name')
assert workflow_name == 'test-server.mcp', (
f"Expected workflow name 'test-server.mcp', got '{workflow_name}'"
)

# Verify TRACELOOP_WORKFLOW_NAME is also set on tool spans
server_tool_spans = [span for span in spans if span.name == 'add_numbers.tool'
and span.attributes.get('traceloop.span.kind') == 'tool'
and 'traceloop.workflow.name' in span.attributes]
assert len(server_tool_spans) >= 1, (
f"Expected at least 1 server-side tool span with workflow name, found {len(server_tool_spans)}"
)

for tool_span in server_tool_spans:
workflow_name = tool_span.attributes.get('traceloop.workflow.name')
assert workflow_name == 'test-server.mcp', (
f"Expected workflow name 'test-server.mcp' on tool span, got '{workflow_name}'"
)
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def get_test_config() -> dict:
# Test 2: Verify traceloop attributes
assert tool_span.attributes.get("traceloop.span.kind") == "tool"
assert tool_span.attributes.get("traceloop.entity.name") == "process_data"
assert tool_span.attributes.get("traceloop.workflow.name") == "attribute-test-server.mcp"

# Test 3: Verify span status
assert tool_span.status.status_code.name == "OK"
Expand Down Expand Up @@ -166,4 +167,7 @@ async def failing_tool(should_fail: bool = True) -> str:
assert error_span.attributes.get("traceloop.span.kind") == "tool"
assert error_span.attributes.get("traceloop.entity.name") == "failing_tool"

# Verify workflow name is set correctly even on error spans
assert error_span.attributes.get("traceloop.workflow.name") == "error-test-server.mcp"

print("✅ Error handling validated")
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@
from opentelemetry.trace import Tracer, Status, StatusCode, SpanKind, get_current_span, set_span_in_context
from opentelemetry import context
from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_COMPLETION
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_COMPLETION, GEN_AI_AGENT_NAME
from agents.tracing.processors import TracingProcessor
from .utils import dont_throw
from traceloop.sdk.tracing import set_agent_name


class OpenTelemetryTracingProcessor(TracingProcessor):
Expand All @@ -27,6 +28,7 @@ def __init__(self, tracer: Tracer):
self._span_contexts: Dict[str, Any] = {} # agents span -> context token
self._last_model_settings: Dict[str, Any] = {}
self._reverse_handoffs_dict: OrderedDict[str, str] = OrderedDict()
self._agent_name: str = None # Track current active agent name

@dont_throw
def on_trace_start(self, trace):
Expand Down Expand Up @@ -74,6 +76,13 @@ def on_span_start(self, span):
if isinstance(span_data, AgentSpanData):
agent_name = getattr(span_data, 'name', None) or "unknown_agent"

self._agent_name = agent_name

# Set agent name in OpenTelemetry context for propagation to child spans
print(f"Setting agent name in OpenTelemetry context: {agent_name}")
set_agent_name(agent_name)
print(f"Agent name in OpenTelemetry context: {agent_name}")

handoff_parent = None
trace_id = getattr(span, 'trace_id', None)
if trace_id:
Expand All @@ -83,7 +92,7 @@ def on_span_start(self, span):

attributes = {
SpanAttributes.TRACELOOP_SPAN_KIND: TraceloopSpanKindValues.AGENT.value,
"gen_ai.agent.name": agent_name,
GEN_AI_AGENT_NAME: agent_name,
"gen_ai.system": "openai_agents"
}

Expand Down Expand Up @@ -132,6 +141,7 @@ def on_span_start(self, span):

if from_agent and from_agent != 'unknown':
handoff_attributes["gen_ai.handoff.from_agent"] = from_agent
handoff_attributes[GEN_AI_AGENT_NAME] = from_agent
if to_agent and to_agent != 'unknown':
handoff_attributes["gen_ai.handoff.to_agent"] = to_agent

Expand All @@ -158,6 +168,8 @@ def on_span_start(self, span):
f"{GEN_AI_COMPLETION}.tool.strict_json_schema": True
}

self._set_agent_name_attribute(tool_attributes, current_agent_span)

if hasattr(span_data, 'description') and span_data.description:
# Only use description if it's not a generic class description
desc = span_data.description
Expand All @@ -182,6 +194,8 @@ def on_span_start(self, span):
"gen_ai.operation.name": "response"
}

self._set_agent_name_attribute(response_attributes, current_agent_span)

otel_span = self.tracer.start_span(
"openai.response",
kind=SpanKind.CLIENT,
Expand All @@ -201,6 +215,8 @@ def on_span_start(self, span):
"gen_ai.operation.name": "chat"
}

self._set_agent_name_attribute(response_attributes, current_agent_span)

otel_span = self.tracer.start_span(
"openai.response",
kind=SpanKind.CLIENT,
Expand Down Expand Up @@ -493,6 +509,13 @@ def on_span_end(self, span):
# Note: prompt_attributes, completion_attributes, and usage tokens are now
# on response spans only

# Only clear agent context if this agent matches current context
# This prevents premature clearing in complex handoff scenarios
if span_data and hasattr(span_data, 'name'):
agent_name = getattr(span_data, 'name', None)
if self._agent_name == agent_name:
self._agent_name = None

if hasattr(span, 'error') and span.error:
otel_span.set_status(Status(StatusCode.ERROR, str(span.error)))
else:
Expand Down Expand Up @@ -524,6 +547,11 @@ def _find_current_agent_span(self):
pass
return None

def _set_agent_name_attribute(self, attributes: Dict[str, Any], current_agent_span=None):
"""Set the agent name attribute using current agent name."""
if self._agent_name:
attributes[GEN_AI_AGENT_NAME] = self._agent_name

def force_flush(self):
"""Force flush any pending spans."""
pass
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
interactions:
- request:
body: '{"data":[{"object":"trace.span","id":"span_1682072c4a874ee68e5ab580","trace_id":"trace_2dc4a148df4c45ed8b309c32cc5c11a9","parent_id":"span_c0ea12fef2fa41949a7e3aa4","started_at":"2025-08-15T17:56:02.384852+00:00","ended_at":"2025-08-15T17:56:03.612731+00:00","span_data":{"type":"response","response_id":"resp_689f74b2f1e0819088b30c8105dc8b290a33650d0eca9a46"},"error":null},{"object":"trace.span","id":"span_f69f0eed2a614f6ca2486935","trace_id":"trace_2dc4a148df4c45ed8b309c32cc5c11a9","parent_id":"span_c0ea12fef2fa41949a7e3aa4","started_at":"2025-08-15T17:56:03.613427+00:00","ended_at":"2025-08-15T17:56:03.613997+00:00","span_data":{"type":"function","name":"generate_report","input":"{\"processed_data\":\"Processed
results: Analyzed data patterns for: Sales data from last quarter\"}","output":"Generated
report: Processed results: Analyzed data patterns for: Sales data from last
quarter","mcp_data":null},"error":null},{"object":"trace.span","id":"span_40705905ad4149d79d4419ad","trace_id":"trace_2dc4a148df4c45ed8b309c32cc5c11a9","parent_id":"span_c0ea12fef2fa41949a7e3aa4","started_at":"2025-08-15T17:56:03.614647+00:00","ended_at":"2025-08-15T17:56:08.170805+00:00","span_data":{"type":"response","response_id":"resp_689f74b439dc81909f83fedf09751dfe0a33650d0eca9a46"},"error":null},{"object":"trace.span","id":"span_c0ea12fef2fa41949a7e3aa4","trace_id":"trace_2dc4a148df4c45ed8b309c32cc5c11a9","parent_id":null,"started_at":"2025-08-15T17:55:59.725264+00:00","ended_at":"2025-08-15T17:56:08.171797+00:00","span_data":{"type":"agent","name":"Analytics
Agent","handoffs":[],"tools":["analyze_data","process_results","generate_report"],"output_type":"str"},"error":null},{"object":"trace","id":"trace_6a430ad653c745b78c89622b8e61fccc","workflow_name":"Agent
workflow","group_id":null,"metadata":null}]}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1811'
content-type:
- application/json
cookie:
- __cf_bm=UhrfEFws9O_ZBKuSryCKFovrTxciXL8p2WJuM1K2dN8-1755280562-1.0.1.1-dIIsnsWKGJtA9W6u0MbXjq7UUseSGAthIGNSZMriLzkecTBUlPjjJFr6r0QnteF8Ul.liPTWhJI6mlCKQBREwPTAAOYdCC2ZirAu9ZrwIWA;
_cfuvid=zDtlMy4g5CGjInt8L2ecM4HeWcHtz0bFgxVbfE5vSqk-1755280562683-0.0.1.1-604800000
host:
- api.openai.com
openai-beta:
- traces=v1
user-agent:
- python-httpx/0.28.1
method: POST
uri: https://api.openai.com/v1/traces/ingest
response:
body:
string: ''
headers:
CF-RAY:
- 96fa91223a586901-FRA
Connection:
- keep-alive
Date:
- Fri, 15 Aug 2025 17:56:09 GMT
Server:
- cloudflare
X-Content-Type-Options:
- nosniff
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- traceloop
openai-processing-ms:
- '242'
openai-project:
- proj_tzz1TbPPOXaf6j9tEkVUBIAa
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '253'
x-request-id:
- req_874f73c01f3025bae3501d414db98cdf
status:
code: 204
message: No Content
- request:
body: '{"include":[],"input":[{"content":"What is AI?","role":"user"}],"instructions":"You
are a helpful assistant that answers all questions","max_output_tokens":1024,"model":"gpt-4.1","stream":false,"temperature":0.3,"tools":[],"top_p":0.2}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '235'
content-type:
- application/json
cookie:
- __cf_bm=WwDHl7j6.dqwOcLIJAXqGOLTR6ZUq3JCq47vW3LBIBs-1755280559-1.0.1.1-na9dmQo.4u4zv1vUQ7SN457JVcBR1ifes3cOUutsLuVtLSfo_sZ1I8fRayi6NDR2VKiwUFBhrUYM85dJ8BB7Ior2pM9Ng5MfNJwvGRd3lgE;
_cfuvid=PWHn6CD5_OXbE3jv9HT7E4FDlSvoTN5AciqTl4Chslg-1755280559217-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- Agents/Python 0.2.7
x-stainless-arch:
- arm64
x-stainless-async:
- async:asyncio
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.99.9
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '1'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.13
method: POST
uri: https://api.openai.com/v1/responses
response:
body:
string: !!binary |
H4sIAAAAAAAAA3RV227jNhB9z1cM9LQ14kB2nET2m1EURYCiKLrdAsWmEEbUSGLNi5YcOtEu8u8F
KVmx2+yLYc3lcOYczvDbFUAm62wHmSPfl/fFtnnYVNuioqJYbQnrYlutxHYl1sVqW61zLKpNIYrb
bZFvmhVl1xHAVv+Q4BOINX6yC0fIVJcYfauHu7t1kd/db5PPM3LwMUdY3StiqsekCsWhdTaYWFWD
ylMyk3PWZTswQalkkOaUWNbEKJW/9Hp2QbC0Jh3ylw2AjgChI9U3QQF6Lz2jYeAOGdD4Z3IeUCn4
EsiPmQlL40tpA/eBS7YHSoCrfL2ZnWytKgWqyxK0rUnFs9uel5ub1XKdr++W+Wa52ky0JcxsB5+v
AAC+pd9ZD+3bkxyY00pEOaoiL27Xm/z+oagbcbt9V46EwUNPCYW8x/bM8T3ek1NYw2TeSjov6wL2
RAe98JydAtAYy3ii/fPfF05l297Z6h1PAtpBtljsHxcLiLrUHhrrYLHYO5aNFBIVPBompWRLRtBi
cQOPDI6aKBtb4I6gpiMp22syDLaB2GJgcuAHz6Q9WAfeNvwcr0KSXaCBnlxjnQZGf/DAQy+jlgM4
+hKkk6aFLmg0IM9Ov4E/OvI05UgjVKhp92SezBI+mZpcaiHmGuTgUIFC0wZsCT4oeSD4ybRK+i5W
ZLmLJfbxbs1h/ocI9TsJ2xr5NQJJHc2phZ5IdNH/C6Ez0dk4q4FeenIylgcfNIpOGgI1RSS4j1Yd
Y3RUQUU+0NSg8RBtNQnpo24x8DeFQzS2qMcjaydTpkDnY5f7x0RdRdDEUQVpQKMZgI7khhoHYBKd
scq2kvw1+CA6QD8R9Kd0HPBsBP1Eykfp5DXsFb3gdTz1Z2tbRbA/xc2caE2mTtdslnZE4M56Amvg
V+JGyZeIstf41ZqRAFLN8rKXJfzYIVeWRzZE8Gx11IPcUQqCONyp5UcDPmiNbriG/SNID1jZwCf+
JsI9PGVeo+OnDHy6lENiKq4dCB6eJXcwjl4qzlFPLFkep7t0DbZhMlANoKWW4vB2/7iTJn3GOk+6
3mTzHL1O/+bRypxVaVxnosfgGJiCsh4dKkXqcoWxC+PS7R0dpQ2+PO31Mu2mecX1zuqeS4Gio/JA
w7nPEXobK8x20w7JqGms47OguI9GSifjFcDr+DxgQzyUsiYTp58uVv8kTcmjPaupwaDGTZR5to7O
m2DSPbk4hdGc39xO1rRxpsri/OPb99mmS3Eja1PFR3KV9ZKHcb/WMuhsrnvksbNSjMQHttnseFt8
Gdu+PFuH+WzsU43r8dsFI9ItT11Kj5U6PZMhrfW5AWkunqfxcfqP/eydnNtM0tVviflFq/979e7u
3/O8BzzL/z1stozqDLrIZxKDv9RbE2ONjBH/9er1XwAAAP//AwB79dv3tQgAAA==
headers:
CF-RAY:
- 96fa9123bba209c9-HFA
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 15 Aug 2025 17:56:12 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- traceloop
openai-processing-ms:
- '2970'
openai-project:
- proj_tzz1TbPPOXaf6j9tEkVUBIAa
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '2974'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999957'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_75dd7627c6cae3f69948923a1d3a4850
status:
code: 200
message: OK
version: 1
Loading
Loading