diff --git a/logfire/_internal/integrations/llm_providers/anthropic.py b/logfire/_internal/integrations/llm_providers/anthropic.py index 35c03df7e..0c356421b 100644 --- a/logfire/_internal/integrations/llm_providers/anthropic.py +++ b/logfire/_internal/integrations/llm_providers/anthropic.py @@ -1,12 +1,25 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, cast import anthropic -from anthropic.types import Message, TextBlock, TextDelta +from anthropic.types import Message, TextBlock, TextDelta, ToolUseBlock from logfire._internal.utils import handle_internal_errors +from .semconv import ( + INPUT_MESSAGES, + INPUT_TOKENS, + OPERATION_NAME, + OUTPUT_MESSAGES, + OUTPUT_TOKENS, + PROVIDER_NAME, + REQUEST_MODEL, + RESPONSE_FINISH_REASONS, + RESPONSE_ID, + RESPONSE_MODEL, + SYSTEM_INSTRUCTIONS, +) from .types import EndpointConfig, StreamState if TYPE_CHECKING: @@ -29,19 +42,170 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig: if not isinstance(json_data, dict): # pragma: no cover # Ensure that `{request_data[model]!r}` doesn't raise an error, just a warning about `model` missing. json_data = {} + json_data = cast('dict[str, Any]', json_data) if url == '/v1/messages': + span_data: dict[str, Any] = { + 'request_data': json_data, + PROVIDER_NAME: 'anthropic', + OPERATION_NAME: 'chat', + REQUEST_MODEL: json_data.get('model'), + } + + # Convert messages to semantic convention format + messages: list[dict[str, Any]] = json_data.get('messages', []) + system: str | list[dict[str, Any]] | None = json_data.get('system') + if messages or system: + input_messages, system_instructions = convert_anthropic_messages_to_semconv(messages, system) + span_data[INPUT_MESSAGES] = input_messages + if system_instructions: + span_data[SYSTEM_INSTRUCTIONS] = system_instructions + return EndpointConfig( message_template='Message with {request_data[model]!r}', - span_data={'request_data': json_data}, + span_data=span_data, stream_state_cls=AnthropicMessageStreamState, ) else: + span_data = { + 'request_data': json_data, + 'url': url, + PROVIDER_NAME: 'anthropic', + } + if 'model' in json_data: + span_data[REQUEST_MODEL] = json_data['model'] return EndpointConfig( message_template='Anthropic API call to {url!r}', - span_data={'request_data': json_data, 'url': url}, + span_data=span_data, + ) + + +def convert_anthropic_messages_to_semconv( + messages: list[dict[str, Any]], + system: str | list[dict[str, Any]] | None = None, +) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: + """Convert Anthropic messages format to OTel Gen AI Semantic Convention format. + + Returns a tuple of (input_messages, system_instructions). + """ + input_messages: list[dict[str, Any]] = [] + system_instructions: list[dict[str, Any]] = [] + + # Handle system parameter (Anthropic uses a separate 'system' parameter) + if system: + if isinstance(system, str): + system_instructions.append({'type': 'text', 'content': system}) + else: + for part in system: + if part.get('type') == 'text': + system_instructions.append({'type': 'text', 'content': part.get('text', '')}) + else: + system_instructions.append(part) + + for msg in messages: + role = msg.get('role', 'unknown') + content = msg.get('content') + + parts: list[dict[str, Any]] = [] + + if content is not None: + if isinstance(content, str): + parts.append({'type': 'text', 'content': content}) + elif isinstance(content, list): + for part in cast('list[dict[str, Any] | str]', content): + parts.append(_convert_anthropic_content_part(part)) + + input_messages.append( + { + 'role': role, + 'parts': parts, + } ) + return input_messages, system_instructions + + +def _convert_anthropic_content_part(part: dict[str, Any] | str) -> dict[str, Any]: + """Convert a single Anthropic content part to semconv format.""" + if isinstance(part, str): + return {'type': 'text', 'content': part} + + part_type = part.get('type', 'text') + if part_type == 'text': + return {'type': 'text', 'content': part.get('text', '')} + elif part_type == 'image': + source = part.get('source', {}) + if source.get('type') == 'base64': + return { + 'type': 'blob', + 'modality': 'image', + 'content': source.get('data', ''), + 'media_type': source.get('media_type'), + } + elif source.get('type') == 'url': + return {'type': 'uri', 'modality': 'image', 'uri': source.get('url', '')} + else: + return {'type': 'image', **part} + elif part_type == 'tool_use': + return { + 'type': 'tool_call', + 'id': part.get('id'), + 'name': part.get('name'), + 'arguments': part.get('input'), + } + elif part_type == 'tool_result': + result_content = part.get('content') + if isinstance(result_content, list): + # Extract text from tool result content + text_parts: list[str] = [] + for p in cast('list[dict[str, Any] | str]', result_content): + if isinstance(p, dict) and p.get('type') == 'text': + text_parts.append(str(p.get('text', ''))) + elif isinstance(p, str): + text_parts.append(p) + result_text = ' '.join(text_parts) + else: + result_text = str(result_content) if result_content else '' + return { + 'type': 'tool_call_response', + 'id': part.get('tool_use_id'), + 'response': result_text, + } + else: + # Return as generic part + return {'type': part_type, **{k: v for k, v in part.items() if k != 'type'}} + + +def convert_anthropic_response_to_semconv(message: Message) -> dict[str, Any]: + """Convert an Anthropic response message to OTel Gen AI Semantic Convention format.""" + parts: list[dict[str, Any]] = [] + + for block in message.content: + if isinstance(block, TextBlock): + parts.append({'type': 'text', 'content': block.text}) + elif isinstance(block, ToolUseBlock): + parts.append( + { + 'type': 'tool_call', + 'id': block.id, + 'name': block.name, + 'arguments': block.input, + } + ) + elif hasattr(block, 'type'): + # Handle other block types generically + block_dict = block.model_dump() if hasattr(block, 'model_dump') else dict(block) + parts.append(_convert_anthropic_content_part(block_dict)) + + result: dict[str, Any] = { + 'role': message.role, + 'parts': parts, + } + if message.stop_reason: + result['finish_reason'] = message.stop_reason + + return result + def content_from_messages(chunk: anthropic.types.MessageStreamEvent) -> str | None: if hasattr(chunk, 'content_block'): @@ -68,6 +232,7 @@ def get_response_data(self) -> Any: def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT: """Updates the span based on the type of response.""" if isinstance(response, Message): # pragma: no branch + # Keep response_data for backward compatibility message: dict[str, Any] = {'role': 'assistant'} for block in response.content: if block.type == 'text': @@ -82,6 +247,24 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT: } ) span.set_attribute('response_data', {'message': message, 'usage': response.usage}) + + # Add semantic convention attributes + span.set_attribute(RESPONSE_MODEL, response.model) + span.set_attribute(RESPONSE_ID, response.id) + + # Add token usage + if response.usage: + span.set_attribute(INPUT_TOKENS, response.usage.input_tokens) + span.set_attribute(OUTPUT_TOKENS, response.usage.output_tokens) + + # Add finish reason + if response.stop_reason: + span.set_attribute(RESPONSE_FINISH_REASONS, [response.stop_reason]) + + # Add semantic convention output messages + output_message = convert_anthropic_response_to_semconv(response) + span.set_attribute(OUTPUT_MESSAGES, [output_message]) + return response diff --git a/logfire/_internal/integrations/llm_providers/openai.py b/logfire/_internal/integrations/llm_providers/openai.py index 40c53d8bd..d3a9d0b36 100644 --- a/logfire/_internal/integrations/llm_providers/openai.py +++ b/logfire/_internal/integrations/llm_providers/openai.py @@ -18,6 +18,19 @@ from logfire import LogfireSpan from ...utils import handle_internal_errors, log_internal_error +from .semconv import ( + INPUT_MESSAGES, + INPUT_TOKENS, + OPERATION_NAME, + OUTPUT_MESSAGES, + OUTPUT_TOKENS, + PROVIDER_NAME, + REQUEST_MODEL, + RESPONSE_FINISH_REASONS, + RESPONSE_ID, + RESPONSE_MODEL, + SYSTEM_INSTRUCTIONS, +) from .types import EndpointConfig, StreamState if TYPE_CHECKING: @@ -41,61 +54,227 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig: if not isinstance(json_data, dict): # pragma: no cover # Ensure that `{request_data[model]!r}` doesn't raise an error, just a warning about `model` missing. json_data = {} + json_data = cast('dict[str, Any]', json_data) if url == '/chat/completions': if is_current_agent_span('Chat completion with {gen_ai.request.model!r}'): return EndpointConfig(message_template='', span_data={}) + span_data: dict[str, Any] = { + 'request_data': json_data, + PROVIDER_NAME: 'openai', + OPERATION_NAME: 'chat', + REQUEST_MODEL: json_data.get('model'), + } + # Convert messages to semantic convention format + messages: list[dict[str, Any]] = json_data.get('messages', []) + if messages: + input_messages, system_instructions = convert_openai_messages_to_semconv(messages) + span_data[INPUT_MESSAGES] = input_messages + if system_instructions: + span_data[SYSTEM_INSTRUCTIONS] = system_instructions + return EndpointConfig( message_template='Chat Completion with {request_data[model]!r}', - span_data={'request_data': json_data, 'gen_ai.request.model': json_data['model']}, + span_data=span_data, stream_state_cls=OpenaiChatCompletionStreamState, ) elif url == '/responses': if is_current_agent_span('Responses API', 'Responses API with {gen_ai.request.model!r}'): return EndpointConfig(message_template='', span_data={}) - stream = json_data.get('stream', False) # type: ignore - span_data: dict[str, Any] = { - 'gen_ai.request.model': json_data['model'], - 'request_data': {'model': json_data['model'], 'stream': stream}, + stream = json_data.get('stream', False) + span_data = { + PROVIDER_NAME: 'openai', + OPERATION_NAME: 'chat', + REQUEST_MODEL: json_data.get('model'), + 'request_data': {'model': json_data.get('model'), 'stream': stream}, 'events': inputs_to_events( - json_data['input'], # type: ignore - json_data.get('instructions'), # type: ignore + json_data.get('input'), + json_data.get('instructions'), ), } return EndpointConfig( - message_template='Responses API with {gen_ai.request.model!r}', + message_template='Responses API with {request_data[model]!r}', span_data=span_data, stream_state_cls=OpenaiResponsesStreamState, ) elif url == '/completions': + span_data = { + 'request_data': json_data, + PROVIDER_NAME: 'openai', + OPERATION_NAME: 'text_completion', + REQUEST_MODEL: json_data.get('model'), + } return EndpointConfig( message_template='Completion with {request_data[model]!r}', - span_data={'request_data': json_data, 'gen_ai.request.model': json_data['model']}, + span_data=span_data, stream_state_cls=OpenaiCompletionStreamState, ) elif url == '/embeddings': + span_data = { + 'request_data': json_data, + PROVIDER_NAME: 'openai', + OPERATION_NAME: 'embeddings', + REQUEST_MODEL: json_data.get('model'), + } return EndpointConfig( message_template='Embedding Creation with {request_data[model]!r}', - span_data={'request_data': json_data, 'gen_ai.request.model': json_data['model']}, + span_data=span_data, ) elif url == '/images/generations': + span_data = { + 'request_data': json_data, + PROVIDER_NAME: 'openai', + OPERATION_NAME: 'generate_content', + REQUEST_MODEL: json_data.get('model'), + } return EndpointConfig( message_template='Image Generation with {request_data[model]!r}', - span_data={'request_data': json_data, 'gen_ai.request.model': json_data['model']}, + span_data=span_data, ) else: - span_data = {'request_data': json_data, 'url': url} + span_data = {'request_data': json_data, 'url': url, PROVIDER_NAME: 'openai'} if 'model' in json_data: - span_data['gen_ai.request.model'] = json_data['model'] + span_data[REQUEST_MODEL] = json_data['model'] return EndpointConfig( message_template='OpenAI API call to {url!r}', span_data=span_data, ) +def convert_openai_messages_to_semconv( + messages: list[dict[str, Any]], +) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: + """Convert OpenAI messages format to OTel Gen AI Semantic Convention format. + + Returns a tuple of (input_messages, system_instructions). + """ + input_messages: list[dict[str, Any]] = [] + system_instructions: list[dict[str, Any]] = [] + + for msg in messages: + role = msg.get('role', 'unknown') + content = msg.get('content') + + if role == 'system': + # System messages go to system_instructions + if isinstance(content, str): + system_instructions.append({'type': 'text', 'content': content}) + elif isinstance(content, list): + for part in cast('list[dict[str, Any] | str]', content): + system_instructions.append(_convert_content_part(part)) + continue + + # Build the message with parts + parts: list[dict[str, Any]] = [] + + if content is not None: + if isinstance(content, str): + parts.append({'type': 'text', 'content': content}) + elif isinstance(content, list): + for part in cast('list[dict[str, Any] | str]', content): + parts.append(_convert_content_part(part)) + + # Handle tool calls from assistant messages + tool_calls = msg.get('tool_calls') + if tool_calls: + for tc in tool_calls: + function = tc.get('function', {}) + arguments = function.get('arguments') + if isinstance(arguments, str): + with contextlib.suppress(json.JSONDecodeError): + arguments = json.loads(arguments) + parts.append( + { + 'type': 'tool_call', + 'id': tc.get('id'), + 'name': function.get('name'), + 'arguments': arguments, + } + ) + + # Handle tool message (tool response) + tool_call_id = msg.get('tool_call_id') + if role == 'tool' and tool_call_id: + # For tool messages, the content is the response, not text content + # Clear text parts and add tool_call_response instead + parts = [p for p in parts if p.get('type') != 'text'] + parts.append( + { + 'type': 'tool_call_response', + 'id': tool_call_id, + 'response': content, + } + ) + + input_messages.append( + { + 'role': role, + 'parts': parts, + **({'name': msg.get('name')} if msg.get('name') else {}), + } + ) + + return input_messages, system_instructions + + +def _convert_content_part(part: dict[str, Any] | str) -> dict[str, Any]: + """Convert a single content part to semconv format.""" + if isinstance(part, str): + return {'type': 'text', 'content': part} + + part_type = part.get('type', 'text') + if part_type == 'text': + return {'type': 'text', 'content': part.get('text', '')} + elif part_type == 'image_url': + url = part.get('image_url', {}).get('url', '') + return {'type': 'uri', 'modality': 'image', 'uri': url} + elif part_type in ('input_audio', 'audio'): + return {'type': 'blob', 'modality': 'audio', 'content': part.get('data', '')} + else: + # Return as generic part + return {'type': part_type, **{k: v for k, v in part.items() if k != 'type'}} + + +def convert_openai_response_to_semconv( + message: Any, + finish_reason: str | None = None, +) -> dict[str, Any]: + """Convert an OpenAI response message to OTel Gen AI Semantic Convention format.""" + parts: list[dict[str, Any]] = [] + + if hasattr(message, 'content') and message.content: + parts.append({'type': 'text', 'content': message.content}) + + if hasattr(message, 'tool_calls') and message.tool_calls: + for tc in message.tool_calls: + function = tc.function if hasattr(tc, 'function') else tc.get('function', {}) + func_name = function.name if hasattr(function, 'name') else function.get('name') + func_args = function.arguments if hasattr(function, 'arguments') else function.get('arguments') + if isinstance(func_args, str): + with contextlib.suppress(json.JSONDecodeError): + func_args = json.loads(func_args) + parts.append( + { + 'type': 'tool_call', + 'id': tc.id if hasattr(tc, 'id') else tc.get('id'), + 'name': func_name, + 'arguments': func_args, + } + ) + + result: dict[str, Any] = { + 'role': message.role if hasattr(message, 'role') else message.get('role', 'assistant'), + 'parts': parts, + } + if finish_reason: + result['finish_reason'] = finish_reason + + return result + + def is_current_agent_span(*span_names: str): current_span = get_current_span() return ( @@ -183,10 +362,11 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT: on_response(response.parse(), span) # type: ignore return cast('ResponseT', response) + # Keep gen_ai.system for backward compatibility span.set_attribute('gen_ai.system', 'openai') if isinstance(response_model := getattr(response, 'model', None), str): - span.set_attribute('gen_ai.response.model', response_model) + span.set_attribute(RESPONSE_MODEL, response_model) try: from genai_prices import calc_price, extract_usage @@ -204,25 +384,59 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT: except Exception: pass + # Set response ID + response_id = getattr(response, 'id', None) + if isinstance(response_id, str): + span.set_attribute(RESPONSE_ID, response_id) + usage = getattr(response, 'usage', None) input_tokens = getattr(usage, 'prompt_tokens', getattr(usage, 'input_tokens', None)) output_tokens = getattr(usage, 'completion_tokens', getattr(usage, 'output_tokens', None)) if isinstance(input_tokens, int): - span.set_attribute('gen_ai.usage.input_tokens', input_tokens) + span.set_attribute(INPUT_TOKENS, input_tokens) if isinstance(output_tokens, int): - span.set_attribute('gen_ai.usage.output_tokens', output_tokens) + span.set_attribute(OUTPUT_TOKENS, output_tokens) if isinstance(response, ChatCompletion) and response.choices: + # Keep response_data for backward compatibility span.set_attribute( 'response_data', {'message': response.choices[0].message, 'usage': usage}, ) + # Add semantic convention output messages + output_messages: list[dict[str, Any]] = [] + finish_reasons: list[str] = [] + for choice in response.choices: + finish_reason = choice.finish_reason + if finish_reason: + finish_reasons.append(finish_reason) + output_messages.append(convert_openai_response_to_semconv(choice.message, finish_reason)) + span.set_attribute(OUTPUT_MESSAGES, output_messages) + if finish_reasons: + span.set_attribute(RESPONSE_FINISH_REASONS, finish_reasons) elif isinstance(response, Completion) and response.choices: first_choice = response.choices[0] span.set_attribute( 'response_data', {'finish_reason': first_choice.finish_reason, 'text': first_choice.text, 'usage': usage}, ) + # Add semantic convention output messages for text completion + output_messages_completion: list[dict[str, Any]] = [] + finish_reasons_completion: list[str] = [] + for choice in response.choices: + finish_reason = choice.finish_reason + if finish_reason: + finish_reasons_completion.append(finish_reason) + output_messages_completion.append( + { + 'role': 'assistant', + 'parts': [{'type': 'text', 'content': choice.text}], + 'finish_reason': finish_reason, + } + ) + span.set_attribute(OUTPUT_MESSAGES, output_messages_completion) + if finish_reasons_completion: + span.set_attribute(RESPONSE_FINISH_REASONS, finish_reasons_completion) elif isinstance(response, CreateEmbeddingResponse): span.set_attribute('response_data', {'usage': usage}) elif isinstance(response, ImagesResponse): diff --git a/logfire/_internal/integrations/llm_providers/semconv.py b/logfire/_internal/integrations/llm_providers/semconv.py new file mode 100644 index 000000000..0855237ab --- /dev/null +++ b/logfire/_internal/integrations/llm_providers/semconv.py @@ -0,0 +1,28 @@ +"""Gen AI Semantic Convention attribute names. + +These constants follow the OpenTelemetry Gen AI Semantic Conventions. +See: https://opentelemetry.io/docs/specs/semconv/gen-ai/ +""" + +from __future__ import annotations + +# Provider and operation +PROVIDER_NAME = 'gen_ai.provider.name' +OPERATION_NAME = 'gen_ai.operation.name' + +# Model information +REQUEST_MODEL = 'gen_ai.request.model' +RESPONSE_MODEL = 'gen_ai.response.model' + +# Response metadata +RESPONSE_ID = 'gen_ai.response.id' +RESPONSE_FINISH_REASONS = 'gen_ai.response.finish_reasons' + +# Token usage +INPUT_TOKENS = 'gen_ai.usage.input_tokens' +OUTPUT_TOKENS = 'gen_ai.usage.output_tokens' + +# Message content +INPUT_MESSAGES = 'gen_ai.input.messages' +OUTPUT_MESSAGES = 'gen_ai.output.messages' +SYSTEM_INSTRUCTIONS = 'gen_ai.system_instructions' diff --git a/tests/otel_integrations/test_anthropic.py b/tests/otel_integrations/test_anthropic.py index 24cbaca09..237b91649 100644 --- a/tests/otel_integrations/test_anthropic.py +++ b/tests/otel_integrations/test_anthropic.py @@ -160,6 +160,11 @@ def test_sync_messages(instrumented_client: anthropic.Anthropic, exporter: TestE } ) ), + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[{"role":"user","parts":[{"type":"text","content":"What is four plus five?"}]}]', + 'gen_ai.system_instructions': '[{"type":"text","content":"You are a helpful assistant."}]', 'async': False, 'logfire.msg_template': 'Message with {request_data[model]!r}', 'logfire.msg': "Message with 'claude-3-haiku-20240307'", @@ -186,12 +191,22 @@ def test_sync_messages(instrumented_client: anthropic.Anthropic, exporter: TestE } ) ), + 'gen_ai.response.model': 'claude-3-haiku-20240307', + 'gen_ai.response.id': 'test_id', + 'gen_ai.usage.input_tokens': 2, + 'gen_ai.usage.output_tokens': 3, + 'gen_ai.output.messages': '[{"role":"assistant","parts":[{"type":"text","content":"Nine"}]}]', 'logfire.json_schema': IsJson( snapshot( { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'async': {}, 'response_data': { 'type': 'object', @@ -203,6 +218,11 @@ def test_sync_messages(instrumented_client: anthropic.Anthropic, exporter: TestE }, }, }, + 'gen_ai.response.model': {}, + 'gen_ai.response.id': {}, + 'gen_ai.usage.input_tokens': {}, + 'gen_ai.usage.output_tokens': {}, + 'gen_ai.output.messages': {'type': 'array'}, }, } ) @@ -242,6 +262,11 @@ async def test_async_messages(instrumented_async_client: anthropic.AsyncAnthropi 'model': 'claude-3-haiku-20240307', } ), + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[{"role":"user","parts":[{"type":"text","content":"What is four plus five?"}]}]', + 'gen_ai.system_instructions': '[{"type":"text","content":"You are a helpful assistant."}]', 'async': True, 'logfire.msg_template': 'Message with {request_data[model]!r}', 'logfire.msg': "Message with 'claude-3-haiku-20240307'", @@ -268,11 +293,21 @@ async def test_async_messages(instrumented_async_client: anthropic.AsyncAnthropi } ) ), + 'gen_ai.response.model': 'claude-3-haiku-20240307', + 'gen_ai.response.id': 'test_id', + 'gen_ai.usage.input_tokens': 2, + 'gen_ai.usage.output_tokens': 3, + 'gen_ai.output.messages': '[{"role":"assistant","parts":[{"type":"text","content":"Nine"}]}]', 'logfire.json_schema': IsJson( { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'async': {}, 'response_data': { 'type': 'object', @@ -284,6 +319,11 @@ async def test_async_messages(instrumented_async_client: anthropic.AsyncAnthropi }, }, }, + 'gen_ai.response.model': {}, + 'gen_ai.response.id': {}, + 'gen_ai.usage.input_tokens': {}, + 'gen_ai.usage.output_tokens': {}, + 'gen_ai.output.messages': {'type': 'array'}, }, }, ), @@ -316,12 +356,18 @@ def test_sync_message_empty_response_chunk(instrumented_client: anthropic.Anthro 'code.function': 'test_sync_message_empty_response_chunk', 'code.lineno': 123, 'request_data': '{"max_tokens":1000,"messages":[],"model":"claude-3-haiku-20240307","stream":true,"system":"empty response chunk"}', + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[]', + 'gen_ai.system_instructions': '[{"type":"text","content":"empty response chunk"}]', 'async': False, 'logfire.msg_template': 'Message with {request_data[model]!r}', 'logfire.msg': "Message with 'claude-3-haiku-20240307'", - 'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{}}}', + 'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"gen_ai.provider.name":{},"gen_ai.operation.name":{},"gen_ai.request.model":{},"gen_ai.input.messages":{"type":"array"},"gen_ai.system_instructions":{"type":"array"},"async":{}}}', 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), + 'gen_ai.response.model': 'claude-3-haiku-20240307', }, }, { @@ -340,10 +386,16 @@ def test_sync_message_empty_response_chunk(instrumented_client: anthropic.Anthro 'code.lineno': 123, 'logfire.msg': "streaming response from 'claude-3-haiku-20240307' took 1.00s", 'logfire.span_type': 'log', + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[]', + 'gen_ai.system_instructions': '[{"type":"text","content":"empty response chunk"}]', 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': '{"combined_chunk_content":"","chunk_count":0}', - 'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}', + 'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"gen_ai.provider.name":{},"gen_ai.operation.name":{},"gen_ai.request.model":{},"gen_ai.input.messages":{"type":"array"},"gen_ai.system_instructions":{"type":"array"},"async":{},"response_data":{"type":"object"}}}', + 'gen_ai.response.model': 'claude-3-haiku-20240307', }, }, ] @@ -378,12 +430,18 @@ def test_sync_messages_stream(instrumented_client: anthropic.Anthropic, exporter 'code.function': 'test_sync_messages_stream', 'code.lineno': 123, 'request_data': '{"max_tokens":1000,"messages":[{"role":"user","content":"What is four plus five?"}],"model":"claude-3-haiku-20240307","stream":true,"system":"You are a helpful assistant."}', + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[{"role":"user","parts":[{"type":"text","content":"What is four plus five?"}]}]', + 'gen_ai.system_instructions': '[{"type":"text","content":"You are a helpful assistant."}]', 'async': False, 'logfire.msg_template': 'Message with {request_data[model]!r}', 'logfire.msg': "Message with 'claude-3-haiku-20240307'", - 'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{}}}', + 'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"gen_ai.provider.name":{},"gen_ai.operation.name":{},"gen_ai.request.model":{},"gen_ai.input.messages":{"type":"array"},"gen_ai.system_instructions":{"type":"array"},"async":{}}}', 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), + 'gen_ai.response.model': 'claude-3-haiku-20240307', }, }, { @@ -402,10 +460,16 @@ def test_sync_messages_stream(instrumented_client: anthropic.Anthropic, exporter 'code.lineno': 123, 'logfire.msg': "streaming response from 'claude-3-haiku-20240307' took 1.00s", 'logfire.span_type': 'log', + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[{"role":"user","parts":[{"type":"text","content":"What is four plus five?"}]}]', + 'gen_ai.system_instructions': '[{"type":"text","content":"You are a helpful assistant."}]', 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': '{"combined_chunk_content":"The answer is secret","chunk_count":2}', - 'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}', + 'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"gen_ai.provider.name":{},"gen_ai.operation.name":{},"gen_ai.request.model":{},"gen_ai.input.messages":{"type":"array"},"gen_ai.system_instructions":{"type":"array"},"async":{},"response_data":{"type":"object"}}}', + 'gen_ai.response.model': 'claude-3-haiku-20240307', }, }, ] @@ -443,12 +507,18 @@ async def test_async_messages_stream( 'code.function': 'test_async_messages_stream', 'code.lineno': 123, 'request_data': '{"max_tokens":1000,"messages":[{"role":"user","content":"What is four plus five?"}],"model":"claude-3-haiku-20240307","stream":true,"system":"You are a helpful assistant."}', + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[{"role":"user","parts":[{"type":"text","content":"What is four plus five?"}]}]', + 'gen_ai.system_instructions': '[{"type":"text","content":"You are a helpful assistant."}]', 'async': True, 'logfire.msg_template': 'Message with {request_data[model]!r}', 'logfire.msg': "Message with 'claude-3-haiku-20240307'", - 'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{}}}', + 'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"gen_ai.provider.name":{},"gen_ai.operation.name":{},"gen_ai.request.model":{},"gen_ai.input.messages":{"type":"array"},"gen_ai.system_instructions":{"type":"array"},"async":{}}}', 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), + 'gen_ai.response.model': 'claude-3-haiku-20240307', }, }, { @@ -467,10 +537,16 @@ async def test_async_messages_stream( 'code.lineno': 123, 'logfire.msg': "streaming response from 'claude-3-haiku-20240307' took 1.00s", 'logfire.span_type': 'log', + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': '[{"role":"user","parts":[{"type":"text","content":"What is four plus five?"}]}]', + 'gen_ai.system_instructions': '[{"type":"text","content":"You are a helpful assistant."}]', 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': '{"combined_chunk_content":"The answer is secret","chunk_count":2}', - 'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}', + 'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"gen_ai.provider.name":{},"gen_ai.operation.name":{},"gen_ai.request.model":{},"gen_ai.input.messages":{"type":"array"},"gen_ai.system_instructions":{"type":"array"},"async":{},"response_data":{"type":"object"}}}', + 'gen_ai.response.model': 'claude-3-haiku-20240307', }, }, ] @@ -504,6 +580,11 @@ def test_tool_messages(instrumented_client: anthropic.Anthropic, exporter: TestE 'model': 'claude-3-haiku-20240307', 'system': 'tool response', }, + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'tool response'}], 'async': False, 'logfire.msg_template': 'Message with {request_data[model]!r}', 'logfire.msg': "Message with 'claude-3-haiku-20240307'", @@ -526,10 +607,27 @@ def test_tool_messages(instrumented_client: anthropic.Anthropic, exporter: TestE } ), }, + 'gen_ai.response.model': 'claude-3-haiku-20240307', + 'gen_ai.response.id': 'test_id', + 'gen_ai.usage.input_tokens': 2, + 'gen_ai.usage.output_tokens': 3, + 'gen_ai.output.messages': [ + { + 'role': 'assistant', + 'parts': [ + {'type': 'tool_call', 'id': 'id', 'name': 'tool', 'arguments': {'param': 'param'}} + ], + } + ], 'logfire.json_schema': { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'async': {}, 'response_data': { 'type': 'object', @@ -537,6 +635,11 @@ def test_tool_messages(instrumented_client: anthropic.Anthropic, exporter: TestE 'usage': {'type': 'object', 'title': 'Usage', 'x-python-datatype': 'PydanticModel'} }, }, + 'gen_ai.response.model': {}, + 'gen_ai.response.id': {}, + 'gen_ai.usage.input_tokens': {}, + 'gen_ai.usage.output_tokens': {}, + 'gen_ai.output.messages': {'type': 'array'}, }, }, }, @@ -562,12 +665,15 @@ def test_unknown_method(instrumented_client: anthropic.Anthropic, exporter: Test 'request_data': '{"max_tokens_to_sample":1000,"model":"claude-2.1","prompt":"prompt"}', 'url': '/v1/complete', 'async': False, + 'gen_ai.provider.name': 'anthropic', + 'gen_ai.request.model': 'claude-2.1', 'logfire.msg_template': 'Anthropic API call to {url!r}', 'logfire.msg': "Anthropic API call to '/v1/complete'", 'code.filepath': 'test_anthropic.py', 'code.function': 'test_unknown_method', 'code.lineno': 123, 'logfire.json_schema': IsStr(), + 'gen_ai.response.model': 'claude-2.1', }, } ] diff --git a/tests/otel_integrations/test_openai.py b/tests/otel_integrations/test_openai.py index 7b36ad7ac..82caf6e48 100644 --- a/tests/otel_integrations/test_openai.py +++ b/tests/otel_integrations/test_openai.py @@ -412,76 +412,95 @@ def test_sync_chat_completions(instrumented_client: openai.Client, exporter: Tes 'code.filepath': 'test_openai.py', 'code.function': 'test_sync_chat_completions', 'code.lineno': 123, - 'request_data': ( + 'request_data': { + 'messages': [ + {'role': 'system', 'content': 'You are a helpful assistant.'}, + {'role': 'user', 'content': 'What is four plus five?'}, + ], + 'model': 'gpt-4', + }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [ { - 'messages': [ - {'role': 'system', 'content': 'You are a helpful assistant.'}, - {'role': 'user', 'content': 'What is four plus five?'}, - ], - 'model': 'gpt-4', + 'role': 'user', + 'parts': [{'type': 'text', 'content': 'What is four plus five?'}], } - ), + ], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'You are a helpful assistant.'}], 'async': False, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', 'gen_ai.response.model': 'gpt-4', + 'operation.cost': 0.00012, + 'gen_ai.response.id': 'test_id', 'gen_ai.usage.input_tokens': 2, 'gen_ai.usage.output_tokens': 1, - 'operation.cost': 0.00012, - 'response_data': ( + 'response_data': { + 'message': { + 'content': 'Nine', + 'refusal': None, + 'audio': None, + 'annotations': None, + 'role': 'assistant', + 'function_call': None, + 'tool_calls': None, + }, + 'usage': { + 'completion_tokens': 1, + 'prompt_tokens': 2, + 'total_tokens': 3, + 'completion_tokens_details': None, + 'prompt_tokens_details': None, + }, + }, + 'gen_ai.output.messages': [ { - 'message': { - 'content': 'Nine', - 'refusal': None, - 'audio': None, - 'annotations': None, - 'role': 'assistant', - 'function_call': None, - 'tool_calls': None, - }, - 'usage': { - 'completion_tokens': 1, - 'prompt_tokens': 2, - 'total_tokens': 3, - 'completion_tokens_details': None, - 'prompt_tokens_details': None, - }, + 'role': 'assistant', + 'parts': [{'type': 'text', 'content': 'Nine'}], + 'finish_reason': 'stop', } - ), - 'logfire.json_schema': ( - { - 'type': 'object', - 'properties': { - 'request_data': {'type': 'object'}, - 'async': {}, - 'gen_ai.system': {}, - 'gen_ai.request.model': {}, - 'gen_ai.response.model': {}, - 'gen_ai.usage.input_tokens': {}, - 'gen_ai.usage.output_tokens': {}, - 'operation.cost': {}, - 'response_data': { - 'type': 'object', - 'properties': { - 'message': { - 'type': 'object', - 'title': 'ChatCompletionMessage', - 'x-python-datatype': 'PydanticModel', - }, - 'usage': { - 'type': 'object', - 'title': 'CompletionUsage', - 'x-python-datatype': 'PydanticModel', - }, + ], + 'gen_ai.response.finish_reasons': ['stop'], + 'logfire.json_schema': { + 'type': 'object', + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + 'gen_ai.system': {}, + 'gen_ai.response.model': {}, + 'operation.cost': {}, + 'gen_ai.response.id': {}, + 'gen_ai.usage.input_tokens': {}, + 'gen_ai.usage.output_tokens': {}, + 'response_data': { + 'type': 'object', + 'properties': { + 'message': { + 'type': 'object', + 'title': 'ChatCompletionMessage', + 'x-python-datatype': 'PydanticModel', + }, + 'usage': { + 'type': 'object', + 'title': 'CompletionUsage', + 'x-python-datatype': 'PydanticModel', }, }, }, - } - ), + 'gen_ai.output.messages': {'type': 'array'}, + 'gen_ai.response.finish_reasons': {'type': 'array'}, + }, + }, }, } ] @@ -509,76 +528,95 @@ async def test_async_chat_completions(instrumented_async_client: openai.AsyncCli 'code.filepath': 'test_openai.py', 'code.function': 'test_async_chat_completions', 'code.lineno': 123, - 'request_data': ( + 'request_data': { + 'messages': [ + {'role': 'system', 'content': 'You are a helpful assistant.'}, + {'role': 'user', 'content': 'What is four plus five?'}, + ], + 'model': 'gpt-4', + }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [ { - 'messages': [ - {'role': 'system', 'content': 'You are a helpful assistant.'}, - {'role': 'user', 'content': 'What is four plus five?'}, - ], - 'model': 'gpt-4', + 'role': 'user', + 'parts': [{'type': 'text', 'content': 'What is four plus five?'}], } - ), + ], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'You are a helpful assistant.'}], 'async': True, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', 'gen_ai.response.model': 'gpt-4', + 'operation.cost': 0.00012, + 'gen_ai.response.id': 'test_id', 'gen_ai.usage.input_tokens': 2, 'gen_ai.usage.output_tokens': 1, - 'operation.cost': 0.00012, - 'response_data': ( + 'response_data': { + 'message': { + 'content': 'Nine', + 'refusal': None, + 'audio': None, + 'annotations': None, + 'role': 'assistant', + 'function_call': None, + 'tool_calls': None, + }, + 'usage': { + 'completion_tokens': 1, + 'prompt_tokens': 2, + 'total_tokens': 3, + 'completion_tokens_details': None, + 'prompt_tokens_details': None, + }, + }, + 'gen_ai.output.messages': [ { - 'message': { - 'content': 'Nine', - 'refusal': None, - 'audio': None, - 'annotations': None, - 'role': 'assistant', - 'function_call': None, - 'tool_calls': None, - }, - 'usage': { - 'completion_tokens': 1, - 'prompt_tokens': 2, - 'total_tokens': 3, - 'completion_tokens_details': None, - 'prompt_tokens_details': None, - }, + 'role': 'assistant', + 'parts': [{'type': 'text', 'content': 'Nine'}], + 'finish_reason': 'stop', } - ), - 'logfire.json_schema': ( - { - 'type': 'object', - 'properties': { - 'request_data': {'type': 'object'}, - 'async': {}, - 'gen_ai.system': {}, - 'gen_ai.request.model': {}, - 'gen_ai.response.model': {}, - 'gen_ai.usage.input_tokens': {}, - 'gen_ai.usage.output_tokens': {}, - 'operation.cost': {}, - 'response_data': { - 'type': 'object', - 'properties': { - 'message': { - 'type': 'object', - 'title': 'ChatCompletionMessage', - 'x-python-datatype': 'PydanticModel', - }, - 'usage': { - 'type': 'object', - 'title': 'CompletionUsage', - 'x-python-datatype': 'PydanticModel', - }, + ], + 'gen_ai.response.finish_reasons': ['stop'], + 'logfire.json_schema': { + 'type': 'object', + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + 'gen_ai.system': {}, + 'gen_ai.response.model': {}, + 'operation.cost': {}, + 'gen_ai.response.id': {}, + 'gen_ai.usage.input_tokens': {}, + 'gen_ai.usage.output_tokens': {}, + 'response_data': { + 'type': 'object', + 'properties': { + 'message': { + 'type': 'object', + 'title': 'ChatCompletionMessage', + 'x-python-datatype': 'PydanticModel', + }, + 'usage': { + 'type': 'object', + 'title': 'CompletionUsage', + 'x-python-datatype': 'PydanticModel', }, }, }, - } - ), + 'gen_ai.output.messages': {'type': 'array'}, + 'gen_ai.response.finish_reasons': {'type': 'array'}, + }, + }, }, } ] @@ -610,13 +648,25 @@ def test_sync_chat_empty_response_chunk(instrumented_client: openai.Client, expo 'model': 'gpt-4', 'stream': True, }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'empty response chunk'}], 'async': False, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), @@ -643,7 +693,11 @@ def test_sync_chat_empty_response_chunk(instrumented_client: openai.Client, expo 'code.lineno': 123, 'logfire.msg': "streaming response from 'gpt-4' took 1.00s", 'gen_ai.request.model': 'gpt-4', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'logfire.span_type': 'log', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'empty response chunk'}], 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': {'combined_chunk_content': '', 'chunk_count': 0}, @@ -652,7 +706,11 @@ def test_sync_chat_empty_response_chunk(instrumented_client: openai.Client, expo 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'duration': {}, 'response_data': {'type': 'object'}, }, @@ -690,13 +748,25 @@ def test_sync_chat_empty_response_choices(instrumented_client: openai.Client, ex 'model': 'gpt-4', 'stream': True, }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'empty choices in response chunk'}], 'async': False, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), @@ -723,7 +793,11 @@ def test_sync_chat_empty_response_choices(instrumented_client: openai.Client, ex 'code.lineno': 123, 'logfire.msg': "streaming response from 'gpt-4' took 1.00s", 'gen_ai.request.model': 'gpt-4', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'logfire.span_type': 'log', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'empty choices in response chunk'}], 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': {'message': None, 'usage': None}, @@ -732,7 +806,11 @@ def test_sync_chat_empty_response_choices(instrumented_client: openai.Client, ex 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'duration': {}, 'response_data': {'type': 'object'}, }, @@ -820,13 +898,25 @@ def test_sync_chat_tool_call_stream(instrumented_client: openai.Client, exporter } ], }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'streamed tool call'}], 'async': False, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.tags': ('LLM',), 'logfire.span_type': 'span', @@ -875,7 +965,11 @@ def test_sync_chat_tool_call_stream(instrumented_client: openai.Client, exporter ], }, 'gen_ai.request.model': 'gpt-4', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'async': False, + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'streamed tool call'}], 'duration': 1.0, 'response_data': { 'message': { @@ -912,7 +1006,11 @@ def test_sync_chat_tool_call_stream(instrumented_client: openai.Client, exporter 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'duration': {}, 'response_data': { 'type': 'object', @@ -1036,13 +1134,25 @@ async def test_async_chat_tool_call_stream( } ], }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'streamed tool call'}], 'async': True, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.tags': ('LLM',), 'logfire.span_type': 'span', @@ -1091,7 +1201,11 @@ async def test_async_chat_tool_call_stream( ], }, 'gen_ai.request.model': 'gpt-4', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'async': True, + 'gen_ai.input.messages': [], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'streamed tool call'}], 'duration': 1.0, 'response_data': { 'message': { @@ -1128,7 +1242,11 @@ async def test_async_chat_tool_call_stream( 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'duration': {}, 'response_data': { 'type': 'object', @@ -1203,13 +1321,27 @@ def test_sync_chat_completions_stream(instrumented_client: openai.Client, export 'model': 'gpt-4', 'stream': True, }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'What is four plus five?'}]} + ], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'You are a helpful assistant.'}], 'async': False, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), @@ -1239,7 +1371,13 @@ def test_sync_chat_completions_stream(instrumented_client: openai.Client, export 'code.lineno': 123, 'logfire.msg': "streaming response from 'gpt-4' took 1.00s", 'gen_ai.request.model': 'gpt-4', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'logfire.span_type': 'log', + 'gen_ai.input.messages': [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'What is four plus five?'}]} + ], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'You are a helpful assistant.'}], 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': { @@ -1260,7 +1398,11 @@ def test_sync_chat_completions_stream(instrumented_client: openai.Client, export 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'duration': {}, 'response_data': { 'type': 'object', @@ -1315,13 +1457,27 @@ async def test_async_chat_completions_stream( 'model': 'gpt-4', 'stream': True, }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4', + 'gen_ai.input.messages': [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'What is four plus five?'}]} + ], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'You are a helpful assistant.'}], 'async': True, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'gpt-4'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), @@ -1351,7 +1507,13 @@ async def test_async_chat_completions_stream( 'code.lineno': 123, 'logfire.msg': "streaming response from 'gpt-4' took 1.00s", 'gen_ai.request.model': 'gpt-4', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'logfire.span_type': 'log', + 'gen_ai.input.messages': [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'What is four plus five?'}]} + ], + 'gen_ai.system_instructions': [{'type': 'text', 'content': 'You are a helpful assistant.'}], 'logfire.tags': ('LLM',), 'duration': 1.0, 'response_data': { @@ -1372,7 +1534,11 @@ async def test_async_chat_completions_stream( 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, 'duration': {}, 'response_data': { 'type': 'object', @@ -1412,6 +1578,8 @@ def test_completions(instrumented_client: openai.Client, exporter: TestExporter) 'code.function': 'test_completions', 'code.lineno': 123, 'request_data': {'model': 'gpt-3.5-turbo-instruct', 'prompt': 'What is four plus five?'}, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'text_completion', 'async': False, 'logfire.msg_template': 'Completion with {request_data[model]!r}', 'logfire.msg': "Completion with 'gpt-3.5-turbo-instruct'", @@ -1421,6 +1589,7 @@ def test_completions(instrumented_client: openai.Client, exporter: TestExporter) 'gen_ai.request.model': 'gpt-3.5-turbo-instruct', 'gen_ai.response.model': 'gpt-3.5-turbo-instruct', 'gen_ai.usage.input_tokens': 2, + 'gen_ai.response.id': 'test_id', 'gen_ai.usage.output_tokens': 1, 'operation.cost': 5e-06, 'response_data': { @@ -1434,15 +1603,22 @@ def test_completions(instrumented_client: openai.Client, exporter: TestExporter) 'prompt_tokens_details': None, }, }, + 'gen_ai.output.messages': [ + {'role': 'assistant', 'parts': [{'type': 'text', 'content': 'Nine'}], 'finish_reason': 'stop'} + ], + 'gen_ai.response.finish_reasons': ['stop'], 'logfire.json_schema': { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, 'gen_ai.system': {}, 'gen_ai.request.model': {}, 'gen_ai.response.model': {}, 'gen_ai.usage.input_tokens': {}, + 'gen_ai.response.id': {}, 'gen_ai.usage.output_tokens': {}, 'operation.cost': {}, 'response_data': { @@ -1455,6 +1631,8 @@ def test_completions(instrumented_client: openai.Client, exporter: TestExporter) } }, }, + 'gen_ai.output.messages': {'type': 'array'}, + 'gen_ai.response.finish_reasons': {'type': 'array'}, }, }, }, @@ -1480,7 +1658,7 @@ def test_responses_stream(exporter: TestExporter) -> None: assert exporter.exported_spans_as_dict(parse_json_attributes=True) == snapshot( [ { - 'name': 'Responses API with {gen_ai.request.model!r}', + 'name': 'Responses API with {request_data[model]!r}', 'context': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, 'parent': None, 'start_time': 1000000000, @@ -1489,17 +1667,21 @@ def test_responses_stream(exporter: TestExporter) -> None: 'code.filepath': 'test_openai.py', 'code.function': 'test_responses_stream', 'code.lineno': 123, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'request_data': {'model': 'gpt-4.1', 'stream': True}, 'gen_ai.request.model': 'gpt-4.1', 'events': [ {'event.name': 'gen_ai.user.message', 'content': 'What is four plus five?', 'role': 'user'} ], 'async': False, - 'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}', + 'logfire.msg_template': 'Responses API with {request_data[model]!r}', 'logfire.msg': "Responses API with 'gpt-4.1'", 'logfire.json_schema': { 'type': 'object', 'properties': { + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'events': {'type': 'array'}, @@ -1526,6 +1708,8 @@ def test_responses_stream(exporter: TestExporter) -> None: 'code.function': 'test_responses_stream', 'code.lineno': 123, 'request_data': {'model': 'gpt-4.1', 'stream': True}, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'gpt-4.1', 'async': False, 'duration': 1.0, @@ -1545,6 +1729,8 @@ def test_responses_stream(exporter: TestExporter) -> None: 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'gen_ai.request.model': {}, 'async': {}, 'events': {'type': 'array'}, @@ -1584,13 +1770,21 @@ def test_completions_stream(instrumented_client: openai.Client, exporter: TestEx 'prompt': 'What is four plus five?', 'stream': True, }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'text_completion', 'gen_ai.request.model': 'gpt-3.5-turbo-instruct', 'async': False, 'logfire.msg_template': 'Completion with {request_data[model]!r}', 'logfire.msg': "Completion with 'gpt-3.5-turbo-instruct'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'async': {}, + }, }, 'logfire.span_type': 'span', 'logfire.tags': ('LLM',), @@ -1617,6 +1811,8 @@ def test_completions_stream(instrumented_client: openai.Client, exporter: TestEx 'code.lineno': 123, 'logfire.msg': "streaming response from 'gpt-3.5-turbo-instruct' took 1.00s", 'gen_ai.request.model': 'gpt-3.5-turbo-instruct', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'text_completion', 'logfire.span_type': 'log', 'logfire.tags': ('LLM',), 'duration': 1.0, @@ -1626,6 +1822,8 @@ def test_completions_stream(instrumented_client: openai.Client, exporter: TestEx 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, 'duration': {}, 'response_data': {'type': 'object'}, @@ -1661,6 +1859,8 @@ def test_embeddings(instrumented_client: openai.Client, exporter: TestExporter) 'model': 'text-embedding-3-small', 'encoding_format': 'base64', }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'embeddings', 'async': False, 'logfire.msg_template': 'Embedding Creation with {request_data[model]!r}', 'logfire.msg': "Embedding Creation with 'text-embedding-3-small'", @@ -1675,6 +1875,8 @@ def test_embeddings(instrumented_client: openai.Client, exporter: TestExporter) 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, 'gen_ai.system': {}, 'gen_ai.request.model': {}, @@ -1715,6 +1917,8 @@ def test_images(instrumented_client: openai.Client, exporter: TestExporter) -> N 'code.function': 'test_images', 'code.lineno': 123, 'request_data': {'prompt': 'A picture of a cat.', 'model': 'dall-e-3'}, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'generate_content', 'gen_ai.request.model': 'dall-e-3', 'async': False, 'logfire.msg_template': 'Image Generation with {request_data[model]!r}', @@ -1735,6 +1939,8 @@ def test_images(instrumented_client: openai.Client, exporter: TestExporter) -> N 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'gen_ai.request.model': {}, 'async': {}, 'gen_ai.system': {}, @@ -1840,6 +2046,8 @@ def test_dont_suppress_httpx(exporter: TestExporter) -> None: 'code.function': 'test_dont_suppress_httpx', 'code.lineno': 123, 'request_data': {'model': 'gpt-3.5-turbo-instruct', 'prompt': 'xxx'}, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'text_completion', 'async': False, 'logfire.msg_template': 'Completion with {request_data[model]!r}', 'logfire.msg': "Completion with 'gpt-3.5-turbo-instruct'", @@ -1849,6 +2057,7 @@ def test_dont_suppress_httpx(exporter: TestExporter) -> None: 'gen_ai.request.model': 'gpt-3.5-turbo-instruct', 'gen_ai.response.model': 'gpt-3.5-turbo-instruct', 'gen_ai.usage.input_tokens': 2, + 'gen_ai.response.id': 'test_id', 'gen_ai.usage.output_tokens': 1, 'operation.cost': 5e-06, 'response_data': { @@ -1862,15 +2071,22 @@ def test_dont_suppress_httpx(exporter: TestExporter) -> None: 'prompt_tokens_details': None, }, }, + 'gen_ai.output.messages': [ + {'role': 'assistant', 'parts': [{'type': 'text', 'content': 'Nine'}], 'finish_reason': 'stop'} + ], + 'gen_ai.response.finish_reasons': ['stop'], 'logfire.json_schema': { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, 'gen_ai.system': {}, 'gen_ai.request.model': {}, 'gen_ai.response.model': {}, 'gen_ai.usage.input_tokens': {}, + 'gen_ai.response.id': {}, 'gen_ai.usage.output_tokens': {}, 'operation.cost': {}, 'response_data': { @@ -1883,6 +2099,8 @@ def test_dont_suppress_httpx(exporter: TestExporter) -> None: } }, }, + 'gen_ai.output.messages': {'type': 'array'}, + 'gen_ai.response.finish_reasons': {'type': 'array'}, }, }, 'logfire.metrics': { @@ -1947,6 +2165,8 @@ def test_suppress_httpx(exporter: TestExporter) -> None: 'code.function': 'test_suppress_httpx', 'code.lineno': 123, 'request_data': {'model': 'gpt-3.5-turbo-instruct', 'prompt': 'xxx'}, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'text_completion', 'async': False, 'logfire.msg_template': 'Completion with {request_data[model]!r}', 'logfire.msg': "Completion with 'gpt-3.5-turbo-instruct'", @@ -1956,6 +2176,7 @@ def test_suppress_httpx(exporter: TestExporter) -> None: 'gen_ai.request.model': 'gpt-3.5-turbo-instruct', 'gen_ai.response.model': 'gpt-3.5-turbo-instruct', 'gen_ai.usage.input_tokens': 2, + 'gen_ai.response.id': 'test_id', 'gen_ai.usage.output_tokens': 1, 'operation.cost': 5e-06, 'response_data': { @@ -1969,15 +2190,22 @@ def test_suppress_httpx(exporter: TestExporter) -> None: 'prompt_tokens_details': None, }, }, + 'gen_ai.output.messages': [ + {'role': 'assistant', 'parts': [{'type': 'text', 'content': 'Nine'}], 'finish_reason': 'stop'} + ], + 'gen_ai.response.finish_reasons': ['stop'], 'logfire.json_schema': { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, 'gen_ai.system': {}, 'gen_ai.request.model': {}, 'gen_ai.response.model': {}, 'gen_ai.usage.input_tokens': {}, + 'gen_ai.response.id': {}, 'gen_ai.usage.output_tokens': {}, 'operation.cost': {}, 'response_data': { @@ -1990,6 +2218,8 @@ def test_suppress_httpx(exporter: TestExporter) -> None: } }, }, + 'gen_ai.output.messages': {'type': 'array'}, + 'gen_ai.response.finish_reasons': {'type': 'array'}, }, }, }, @@ -2039,15 +2269,24 @@ def test_create_files(instrumented_client: openai.Client, exporter: TestExporter 'request_data': {'purpose': 'fine-tune'}, 'url': '/files', 'async': False, + 'gen_ai.provider.name': 'openai', 'logfire.msg_template': 'OpenAI API call to {url!r}', 'logfire.msg': "OpenAI API call to '/files'", 'code.filepath': 'test_openai.py', 'code.function': 'test_create_files', 'code.lineno': 123, 'gen_ai.system': 'openai', + 'gen_ai.response.id': 'test_id', 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'url': {}, 'async': {}, 'gen_ai.system': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'url': {}, + 'gen_ai.provider.name': {}, + 'async': {}, + 'gen_ai.system': {}, + 'gen_ai.response.id': {}, + }, }, }, } @@ -2072,15 +2311,24 @@ async def test_create_files_async(instrumented_async_client: openai.AsyncClient, 'request_data': {'purpose': 'fine-tune'}, 'url': '/files', 'async': True, + 'gen_ai.provider.name': 'openai', 'logfire.msg_template': 'OpenAI API call to {url!r}', 'logfire.msg': "OpenAI API call to '/files'", 'code.filepath': 'test_openai.py', 'code.function': 'test_create_files_async', 'code.lineno': 123, 'gen_ai.system': 'openai', + 'gen_ai.response.id': 'test_id', 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'url': {}, 'async': {}, 'gen_ai.system': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'url': {}, + 'gen_ai.provider.name': {}, + 'async': {}, + 'gen_ai.system': {}, + 'gen_ai.response.id': {}, + }, }, }, } @@ -2117,6 +2365,7 @@ def test_create_assistant(instrumented_client: openai.Client, exporter: TestExpo ), 'url': '/assistants', 'async': False, + 'gen_ai.provider.name': 'openai', 'logfire.msg_template': 'OpenAI API call to {url!r}', 'logfire.msg': "OpenAI API call to '/assistants'", 'code.filepath': 'test_openai.py', @@ -2125,15 +2374,18 @@ def test_create_assistant(instrumented_client: openai.Client, exporter: TestExpo 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4o', 'gen_ai.response.model': 'gpt-4-turbo', + 'gen_ai.response.id': 'asst_abc123', 'logfire.json_schema': { 'type': 'object', 'properties': { 'request_data': {'type': 'object'}, 'url': {}, + 'gen_ai.provider.name': {}, 'async': {}, 'gen_ai.system': {}, 'gen_ai.request.model': {}, 'gen_ai.response.model': {}, + 'gen_ai.response.id': {}, }, }, }, @@ -2160,15 +2412,24 @@ def test_create_thread(instrumented_client: openai.Client, exporter: TestExporte 'request_data': {}, 'url': '/threads', 'async': False, + 'gen_ai.provider.name': 'openai', 'logfire.msg_template': 'OpenAI API call to {url!r}', 'logfire.msg': "OpenAI API call to '/threads'", 'code.filepath': 'test_openai.py', 'code.function': 'test_create_thread', 'code.lineno': 123, 'gen_ai.system': 'openai', + 'gen_ai.response.id': 'thread_abc123', 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'url': {}, 'async': {}, 'gen_ai.system': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'url': {}, + 'gen_ai.provider.name': {}, + 'async': {}, + 'gen_ai.system': {}, + 'gen_ai.response.id': {}, + }, }, }, } @@ -2208,7 +2469,7 @@ def test_responses_api(exporter: TestExporter) -> None: assert exporter.exported_spans_as_dict(parse_json_attributes=True) == snapshot( [ { - 'name': 'Responses API with {gen_ai.request.model!r}', + 'name': 'Responses API with {request_data[model]!r}', 'context': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, 'parent': None, 'start_time': 1000000000, @@ -2217,9 +2478,11 @@ def test_responses_api(exporter: TestExporter) -> None: 'code.filepath': 'test_openai.py', 'code.function': 'test_responses_api', 'code.lineno': 123, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'async': False, 'request_data': {'model': 'gpt-4.1', 'stream': False}, - 'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}', + 'logfire.msg_template': 'Responses API with {request_data[model]!r}', 'logfire.msg': "Responses API with 'gpt-4.1'", 'logfire.tags': ('LLM',), 'logfire.span_type': 'span', @@ -2227,6 +2490,7 @@ def test_responses_api(exporter: TestExporter) -> None: 'gen_ai.request.model': 'gpt-4.1', 'gen_ai.response.model': 'gpt-4.1-2025-04-14', 'gen_ai.usage.input_tokens': 65, + 'gen_ai.response.id': 'resp_039e74dd66b112920068dfe10528b8819c82d1214897014964', 'gen_ai.usage.output_tokens': 17, 'operation.cost': 0.000266, 'events': [ @@ -2251,6 +2515,8 @@ def test_responses_api(exporter: TestExporter) -> None: 'logfire.json_schema': { 'type': 'object', 'properties': { + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'gen_ai.request.model': {}, 'request_data': {'type': 'object'}, 'events': {'type': 'array'}, @@ -2258,6 +2524,7 @@ def test_responses_api(exporter: TestExporter) -> None: 'gen_ai.system': {}, 'gen_ai.response.model': {}, 'gen_ai.usage.input_tokens': {}, + 'gen_ai.response.id': {}, 'gen_ai.usage.output_tokens': {}, 'operation.cost': {}, }, @@ -2265,7 +2532,7 @@ def test_responses_api(exporter: TestExporter) -> None: }, }, { - 'name': 'Responses API with {gen_ai.request.model!r}', + 'name': 'Responses API with {request_data[model]!r}', 'context': {'trace_id': 2, 'span_id': 3, 'is_remote': False}, 'parent': None, 'start_time': 3000000000, @@ -2274,9 +2541,11 @@ def test_responses_api(exporter: TestExporter) -> None: 'code.filepath': 'test_openai.py', 'code.function': 'test_responses_api', 'code.lineno': 123, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'async': False, 'request_data': {'model': 'gpt-4.1', 'stream': False}, - 'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}', + 'logfire.msg_template': 'Responses API with {request_data[model]!r}', 'logfire.msg': "Responses API with 'gpt-4.1'", 'logfire.tags': ('LLM',), 'logfire.span_type': 'span', @@ -2284,6 +2553,7 @@ def test_responses_api(exporter: TestExporter) -> None: 'gen_ai.request.model': 'gpt-4.1', 'gen_ai.response.model': 'gpt-4.1-2025-04-14', 'gen_ai.usage.input_tokens': 43, + 'gen_ai.response.id': 'resp_039e74dd66b112920068dfe10687b4819cb0bc63819abcde35', 'gen_ai.usage.output_tokens': 21, 'operation.cost': 0.000254, 'events': [ @@ -2319,6 +2589,8 @@ def test_responses_api(exporter: TestExporter) -> None: 'logfire.json_schema': { 'type': 'object', 'properties': { + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'gen_ai.request.model': {}, 'request_data': {'type': 'object'}, 'events': {'type': 'array'}, @@ -2326,6 +2598,7 @@ def test_responses_api(exporter: TestExporter) -> None: 'gen_ai.system': {}, 'gen_ai.response.model': {}, 'gen_ai.usage.input_tokens': {}, + 'gen_ai.response.id': {}, 'gen_ai.usage.output_tokens': {}, 'operation.cost': {}, }, @@ -2380,13 +2653,28 @@ def test_openrouter_streaming_reasoning(exporter: TestExporter) -> None: 'model': 'google/gemini-2.5-flash', 'stream': True, }, + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'google/gemini-2.5-flash', + 'gen_ai.input.messages': [ + { + 'role': 'user', + 'parts': [{'type': 'text', 'content': 'Hello, how are you? (This is a trick question)'}], + } + ], 'async': False, 'logfire.msg_template': 'Chat Completion with {request_data[model]!r}', 'logfire.msg': "Chat Completion with 'google/gemini-2.5-flash'", 'logfire.json_schema': { 'type': 'object', - 'properties': {'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, 'async': {}}, + 'properties': { + 'request_data': {'type': 'object'}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, + 'gen_ai.request.model': {}, + 'gen_ai.input.messages': {'type': 'array'}, + 'async': {}, + }, }, 'logfire.tags': ('LLM',), 'logfire.span_type': 'span', @@ -2413,7 +2701,15 @@ def test_openrouter_streaming_reasoning(exporter: TestExporter) -> None: 'stream': True, }, 'gen_ai.request.model': 'google/gemini-2.5-flash', + 'gen_ai.provider.name': 'openai', + 'gen_ai.operation.name': 'chat', 'async': False, + 'gen_ai.input.messages': [ + { + 'role': 'user', + 'parts': [{'type': 'text', 'content': 'Hello, how are you? (This is a trick question)'}], + } + ], 'duration': 1.0, 'response_data': { 'message': { @@ -2467,7 +2763,10 @@ def test_openrouter_streaming_reasoning(exporter: TestExporter) -> None: 'properties': { 'request_data': {'type': 'object'}, 'gen_ai.request.model': {}, + 'gen_ai.provider.name': {}, + 'gen_ai.operation.name': {}, 'async': {}, + 'gen_ai.input.messages': {'type': 'array'}, 'duration': {}, 'response_data': { 'type': 'object',