diff --git a/pydantic_ai_slim/pydantic_ai/_otel_messages.py b/pydantic_ai_slim/pydantic_ai/_otel_messages.py index 18c780098b..d1f324d3c8 100644 --- a/pydantic_ai_slim/pydantic_ai/_otel_messages.py +++ b/pydantic_ai_slim/pydantic_ai/_otel_messages.py @@ -1,6 +1,8 @@ """Type definitions of OpenTelemetry GenAI spec message parts. -Based on https://github.com/lmolkova/semantic-conventions/blob/eccd1f806e426a32c98271c3ce77585492d26de2/docs/gen-ai/non-normative/models.ipynb +Based on the OpenTelemetry semantic conventions for GenAI: +https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-input-messages.json +https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-output-messages.json """ from __future__ import annotations @@ -12,11 +14,15 @@ class TextPart(TypedDict): + """A text part in a GenAI message.""" + type: Literal['text'] content: NotRequired[str] class ToolCallPart(TypedDict): + """A tool call part in a GenAI message.""" + type: Literal['tool_call'] id: str name: str @@ -25,36 +31,57 @@ class ToolCallPart(TypedDict): class ToolCallResponsePart(TypedDict): + """A tool call response part in a GenAI message.""" + type: Literal['tool_call_response'] id: str name: str + # TODO: This should be `response` not `result` result: NotRequired[JsonValue] builtin: NotRequired[bool] # Not (currently?) part of the spec, used by Logfire -class MediaUrlPart(TypedDict): - type: Literal['image-url', 'audio-url', 'video-url', 'document-url'] - url: NotRequired[str] +class UriPart(TypedDict): + """A URI part in a GenAI message (for images, audio, video, documents). + Per the semantic conventions, uses 'uri' type with modality field. + """ -class BinaryDataPart(TypedDict): - type: Literal['binary'] - media_type: str - content: NotRequired[str] + type: Literal['uri'] + uri: NotRequired[str] + modality: NotRequired[str] + + +class BlobPart(TypedDict): + """A blob (binary data) part in a GenAI message. + Per the semantic conventions, uses 'blob' type with modality field. + """ -class ThinkingPart(TypedDict): - type: Literal['thinking'] + type: Literal['blob'] + blob: NotRequired[str] + modality: NotRequired[str] + + +class ReasoningPart(TypedDict): + """A reasoning/thinking part in a GenAI message. + + Per the semantic conventions, uses 'reasoning' type. + """ + + type: Literal['reasoning'] content: NotRequired[str] -MessagePart: TypeAlias = 'TextPart | ToolCallPart | ToolCallResponsePart | MediaUrlPart | BinaryDataPart | ThinkingPart' +MessagePart: TypeAlias = 'TextPart | ToolCallPart | ToolCallResponsePart | UriPart | BlobPart | ReasoningPart' Role = Literal['system', 'user', 'assistant'] class ChatMessage(TypedDict): + """A chat message in the GenAI format.""" + role: Role parts: list[MessagePart] @@ -63,6 +90,8 @@ class ChatMessage(TypedDict): class OutputMessage(ChatMessage): + """An output message with optional finish reason.""" + finish_reason: NotRequired[str] diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index 58ae8c2a26..0fb422653f 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -102,8 +102,10 @@ async def execute_traced_output_function( instrumentation_names = InstrumentationNames.for_version(run_context.instrumentation_version) # Set up span attributes tool_name = run_context.tool_name or getattr(function_schema.function, '__name__', 'output_function') - attributes = { + attributes: dict[str, Any] = { 'gen_ai.tool.name': tool_name, + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': f'running output function: {tool_name}', } if run_context.tool_call_id: diff --git a/pydantic_ai_slim/pydantic_ai/_tool_manager.py b/pydantic_ai_slim/pydantic_ai/_tool_manager.py index a2730bd852..c109bed996 100644 --- a/pydantic_ai_slim/pydantic_ai/_tool_manager.py +++ b/pydantic_ai_slim/pydantic_ai/_tool_manager.py @@ -219,10 +219,12 @@ async def _call_function_tool( """See .""" instrumentation_names = InstrumentationNames.for_version(instrumentation_version) - span_attributes = { + span_attributes: dict[str, Any] = { 'gen_ai.tool.name': call.tool_name, # NOTE: this means `gen_ai.tool.call.id` will be included even if it was generated by pydantic-ai 'gen_ai.tool.call.id': call.tool_call_id, + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', **({instrumentation_names.tool_arguments_attr: call.args_as_json_str()} if include_content else {}), 'logfire.msg': f'running tool: {call.tool_name}', # add the JSON schema so these attributes are formatted nicely in Logfire diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index a17e44fc1f..a14fb4c37a 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -665,6 +665,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: 'model_name': model_used.model_name if model_used else 'no-model', 'agent_name': agent_name, 'gen_ai.agent.name': agent_name, + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': f'{agent_name} run', }, ) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 1f7fe71e82..b3deb51f6d 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -767,17 +767,33 @@ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_me _otel_messages.TextPart(type='text', **({'content': part} if settings.include_content else {})) ) elif isinstance(part, ImageUrl | AudioUrl | DocumentUrl | VideoUrl): - parts.append( - _otel_messages.MediaUrlPart( - type=part.kind, - **{'url': part.url} if settings.include_content else {}, - ) - ) + # Map file URL kinds to modality values + modality_map = { + 'image-url': 'image', + 'audio-url': 'audio', + 'document-url': 'document', + 'video-url': 'video', + } + uri_part = _otel_messages.UriPart(type='uri', modality=modality_map.get(part.kind, 'unknown')) + if settings.include_content: + uri_part['uri'] = part.url + parts.append(uri_part) elif isinstance(part, BinaryContent): - converted_part = _otel_messages.BinaryDataPart(type='binary', media_type=part.media_type) + # Map media type prefix to modality + if part.is_image: + modality = 'image' + elif part.is_audio: + modality = 'audio' + elif part.is_video: + modality = 'video' + elif part.is_document: + modality = 'document' + else: + modality = part.media_type + blob_part = _otel_messages.BlobPart(type='blob', modality=modality) if settings.include_content and settings.include_binary_content: - converted_part['content'] = base64.b64encode(part.data).decode() - parts.append(converted_part) + blob_part['blob'] = base64.b64encode(part.data).decode() + parts.append(blob_part) elif isinstance(part, CachePoint): # CachePoint is a marker, not actual content - skip it for otel pass @@ -1421,43 +1437,19 @@ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_me ) ) elif isinstance(part, ThinkingPart): + # Per semantic conventions, thinking/reasoning uses 'reasoning' type parts.append( - _otel_messages.ThinkingPart( - type='thinking', + _otel_messages.ReasoningPart( + type='reasoning', **({'content': part.content} if settings.include_content else {}), ) ) elif isinstance(part, FilePart): - converted_part = _otel_messages.BinaryDataPart(type='binary', media_type=part.content.media_type) - if settings.include_content and settings.include_binary_content: - converted_part['content'] = base64.b64encode(part.content.data).decode() - parts.append(converted_part) + parts.append(_file_part_to_otel(part.content, settings)) elif isinstance(part, BaseToolCallPart): - call_part = _otel_messages.ToolCallPart(type='tool_call', id=part.tool_call_id, name=part.tool_name) - if isinstance(part, BuiltinToolCallPart): - call_part['builtin'] = True - if settings.include_content and part.args is not None: - from .models.instrumented import InstrumentedModel - - if isinstance(part.args, str): - call_part['arguments'] = part.args - else: - call_part['arguments'] = {k: InstrumentedModel.serialize_any(v) for k, v in part.args.items()} - - parts.append(call_part) + parts.append(_tool_call_to_otel(part, settings)) elif isinstance(part, BuiltinToolReturnPart): - return_part = _otel_messages.ToolCallResponsePart( - type='tool_call_response', - id=part.tool_call_id, - name=part.tool_name, - builtin=True, - ) - if settings.include_content and part.content is not None: # pragma: no branch - from .models.instrumented import InstrumentedModel - - return_part['result'] = InstrumentedModel.serialize_any(part.content) - - parts.append(return_part) + parts.append(_builtin_tool_return_to_otel(part, settings)) return parts @property @@ -1478,6 +1470,57 @@ def provider_request_id(self) -> str | None: __repr__ = _utils.dataclasses_no_defaults_repr +def _file_part_to_otel(bc: BinaryContent, settings: InstrumentationSettings) -> _otel_messages.BlobPart: + """Convert a FilePart's BinaryContent to an otel BlobPart.""" + if bc.is_image: + modality = 'image' + elif bc.is_audio: + modality = 'audio' + elif bc.is_video: + modality = 'video' + elif bc.is_document: + modality = 'document' + else: + modality = bc.media_type + blob_part = _otel_messages.BlobPart(type='blob', modality=modality) + if settings.include_content and settings.include_binary_content: + blob_part['blob'] = base64.b64encode(bc.data).decode() + return blob_part + + +def _tool_call_to_otel(part: BaseToolCallPart, settings: InstrumentationSettings) -> _otel_messages.ToolCallPart: + """Convert a BaseToolCallPart to an otel ToolCallPart.""" + call_part = _otel_messages.ToolCallPart(type='tool_call', id=part.tool_call_id, name=part.tool_name) + if settings.include_content and part.args is not None: + from .models.instrumented import InstrumentedModel + + if isinstance(part.args, str): + call_part['arguments'] = part.args + else: + call_part['arguments'] = {k: InstrumentedModel.serialize_any(v) for k, v in part.args.items()} + + if isinstance(part, BuiltinToolCallPart): + call_part['builtin'] = True + return call_part + + +def _builtin_tool_return_to_otel( + part: BuiltinToolReturnPart, settings: InstrumentationSettings +) -> _otel_messages.ToolCallResponsePart: + """Convert a BuiltinToolReturnPart to an otel ToolCallResponsePart.""" + return_part = _otel_messages.ToolCallResponsePart( + type='tool_call_response', + id=part.tool_call_id, + name=part.tool_name, + builtin=True, + ) + if settings.include_content and part.content is not None: # pragma: no branch + from .models.instrumented import InstrumentedModel + + return_part['result'] = InstrumentedModel.serialize_any(part.content) + return return_part + + ModelMessage = Annotated[ModelRequest | ModelResponse, pydantic.Discriminator('kind')] """Any message sent to or returned by a model.""" diff --git a/tests/test_logfire.py b/tests/test_logfire.py index b33e8702e0..fc9a19b520 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -279,6 +279,7 @@ async def my_ret(x: int) -> str: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': '{"my_ret":"1"}', @@ -333,6 +334,7 @@ async def my_ret(x: int) -> str: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 103, @@ -595,6 +597,7 @@ class MyOutput: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 51, @@ -714,6 +717,7 @@ class MyOutput: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': '{"content": "a"}', @@ -805,6 +809,7 @@ class MyOutput: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 51, @@ -923,6 +928,7 @@ class MyOutput: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 51, @@ -1150,6 +1156,7 @@ async def test_feedback(capfire: CaptureLogfire) -> None: 'model_name': 'test', 'agent_name': 'agent', 'gen_ai.agent.name': 'agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 51, @@ -1241,6 +1248,8 @@ async def add_numbers(x: int, y: int) -> int: { 'gen_ai.tool.name': 'add_numbers', 'gen_ai.tool.call.id': IsStr(), + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'tool_arguments': '{"x":42,"y":42}', 'logfire.msg': 'running tool: add_numbers', 'logfire.json_schema': IsJson( @@ -1270,6 +1279,8 @@ async def add_numbers(x: int, y: int) -> int: { 'gen_ai.tool.name': 'add_numbers', 'gen_ai.tool.call.id': IsStr(), + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'tool_arguments': '{"x":42,"y":42}', 'tool_response': '84', 'logfire.msg': 'running tool: add_numbers', @@ -1295,6 +1306,8 @@ async def add_numbers(x: int, y: int) -> int: { 'gen_ai.tool.name': 'add_numbers', 'gen_ai.tool.call.id': IsStr(), + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running tool: add_numbers', 'logfire.json_schema': IsJson( snapshot( @@ -1316,6 +1329,8 @@ async def add_numbers(x: int, y: int) -> int: { 'gen_ai.tool.name': 'add_numbers', 'gen_ai.tool.call.id': IsStr(), + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running tool: add_numbers', 'logfire.json_schema': IsJson( snapshot( @@ -1393,6 +1408,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: final_result', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', @@ -1443,6 +1460,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: 'logfire.msg': 'running output function: final_result', 'gen_ai.tool.call.id': IsStr(), 'gen_ai.tool.call.arguments': '{"city":"Mexico City"}', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.json_schema': IsJson( snapshot( { @@ -1493,6 +1512,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: final_result', @@ -1517,6 +1538,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'logfire.msg': 'running output function: final_result', 'logfire.json_schema': '{"type": "object", "properties": {"gen_ai.tool.name": {}, "gen_ai.tool.call.id": {}}}', @@ -1557,6 +1580,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: final_result', @@ -1581,6 +1606,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'logfire.msg': 'running output function: final_result', 'logfire.json_schema': '{"type": "object", "properties": {"gen_ai.tool.name": {}, "gen_ai.tool.call.id": {}}}', @@ -1629,6 +1656,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: final_result', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"New York City"}', @@ -1650,6 +1679,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: }, { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: final_result', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', @@ -1676,6 +1707,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: final_result', 'gen_ai.tool.call.id': IsStr(), 'logfire.json_schema': IsJson( @@ -1686,6 +1719,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: }, { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: final_result', 'gen_ai.tool.call.id': IsStr(), 'logfire.json_schema': IsJson( @@ -1727,6 +1762,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'get_weather', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: get_weather', @@ -1751,6 +1788,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'get_weather', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'logfire.msg': 'running output function: get_weather', 'logfire.json_schema': IsJson( @@ -1798,6 +1837,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: final_result', @@ -1822,6 +1863,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'logfire.msg': 'running output function: final_result', 'logfire.json_schema': IsJson( @@ -1870,6 +1913,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: final_result', @@ -1894,6 +1939,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'logfire.msg': 'running output function: final_result', 'logfire.json_schema': IsJson( @@ -1937,6 +1984,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: final_result', @@ -1961,6 +2010,8 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'final_result', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'gen_ai.tool.call.id': IsStr(), 'logfire.msg': 'running output function: final_result', 'logfire.json_schema': IsJson( @@ -2009,6 +2060,8 @@ def call_text_response(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert text_function_attributes == snapshot( { 'gen_ai.tool.name': 'upcase_text', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'tool_arguments': '{"text":"hello world"}', 'logfire.msg': 'running output function: upcase_text', 'logfire.json_schema': IsJson( @@ -2031,6 +2084,8 @@ def call_text_response(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert text_function_attributes == snapshot( { 'gen_ai.tool.name': 'upcase_text', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: upcase_text', 'logfire.json_schema': IsJson(snapshot({'type': 'object', 'properties': {'gen_ai.tool.name': {}}})), 'logfire.span_type': 'span', @@ -2081,6 +2136,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'upcase_text', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'tool_arguments': '{"text":"hello world"}', 'logfire.msg': 'running output function: upcase_text', 'logfire.json_schema': IsJson( @@ -2103,6 +2160,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert output_function_attributes == snapshot( { 'gen_ai.tool.name': 'upcase_text', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: upcase_text', 'logfire.json_schema': IsJson(snapshot({'type': 'object', 'properties': {'gen_ai.tool.name': {}}})), 'logfire.span_type': 'span', @@ -2153,6 +2212,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ { 'gen_ai.tool.name': 'get_weather_with_retry', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'tool_arguments': '{"city":"New York City"}', 'logfire.msg': 'running output function: get_weather_with_retry', 'logfire.json_schema': IsJson( @@ -2172,6 +2233,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: }, { 'gen_ai.tool.name': 'get_weather_with_retry', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'tool_arguments': '{"city":"Mexico City"}', 'logfire.msg': 'running output function: get_weather_with_retry', 'logfire.json_schema': IsJson( @@ -2196,6 +2259,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ { 'gen_ai.tool.name': 'get_weather_with_retry', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: get_weather_with_retry', 'logfire.json_schema': IsJson(snapshot({'type': 'object', 'properties': {'gen_ai.tool.name': {}}})), 'logfire.span_type': 'span', @@ -2203,6 +2268,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: }, { 'gen_ai.tool.name': 'get_weather_with_retry', + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.type': 'function', 'logfire.msg': 'running output function: get_weather_with_retry', 'logfire.json_schema': IsJson(snapshot({'type': 'object', 'properties': {'gen_ai.tool.name': {}}})), 'logfire.span_type': 'span', @@ -2240,6 +2307,7 @@ def instructions(): 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 51, @@ -2359,6 +2427,7 @@ def instructions(): 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': '{"content": "a"}', @@ -2464,6 +2533,7 @@ def my_tool() -> str: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 107, @@ -2623,6 +2693,7 @@ def my_tool() -> str: 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': '{"content": "a"}', @@ -2755,6 +2826,7 @@ def instructions(ctx: RunContext[None]): 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'gen_ai.usage.input_tokens': 52, @@ -2900,6 +2972,7 @@ def instructions(ctx: RunContext[None]): 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': '{"content": "a"}', @@ -3008,6 +3081,7 @@ def instructions(ctx: RunContext[None]): 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': 'success (no tool calls)', @@ -3104,6 +3178,7 @@ def instructions(ctx: RunContext[None]): 'model_name': 'test', 'agent_name': 'my_agent', 'gen_ai.agent.name': 'my_agent', + 'gen_ai.operation.name': 'invoke_agent', 'logfire.msg': 'my_agent run', 'logfire.span_type': 'span', 'final_result': 'success (no tool calls)',