Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 98 additions & 12 deletions logfire/_internal/integrations/llm_providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,14 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
OPERATION_NAME: 'chat',
REQUEST_MODEL: json_data.get('model'),
'request_data': {'model': json_data.get('model'), 'stream': stream},
'events': inputs_to_events(
json_data.get('input'),
json_data.get('instructions'),
),
}
input_messages, system_instructions = convert_responses_inputs_to_semconv(
json_data.get('input'), json_data.get('instructions')
)
if input_messages:
span_data[INPUT_MESSAGES] = input_messages
if system_instructions:
span_data[SYSTEM_INSTRUCTIONS] = system_instructions

return EndpointConfig(
message_template='Responses API with {request_data[model]!r}',
Expand Down Expand Up @@ -275,6 +278,91 @@ def convert_openai_response_to_semconv(
return result


def convert_responses_inputs_to_semconv(
inputs: str | list[dict[str, Any]] | None, instructions: str | None
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
"""Convert Responses API inputs to OTel Gen AI Semantic Convention format."""
input_messages: list[dict[str, Any]] = []
system_instructions: list[dict[str, Any]] = []
if instructions:
system_instructions.append({'type': 'text', 'content': instructions})
if inputs:
if isinstance(inputs, str):
input_messages.append({'role': 'user', 'parts': [{'type': 'text', 'content': inputs}]})
else:
for inp in inputs:
role, typ, content = inp.get('role', 'user'), inp.get('type'), inp.get('content')
if typ in (None, 'message') and content:
parts: list[dict[str, Any]] = []
if isinstance(content, str):
parts.append({'type': 'text', 'content': content})
elif isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get('type') == 'output_text':
parts.append({'type': 'text', 'content': item.get('text', '')})
else:
parts.append(item if isinstance(item, dict) else {'type': 'text', 'content': str(item)})
input_messages.append({'role': role, 'parts': parts})
elif typ == 'function_call':
input_messages.append(
{
'role': 'assistant',
'parts': [
{
'type': 'tool_call',
'id': inp.get('call_id'),
'name': inp.get('name'),
'arguments': inp.get('arguments'),
}
],
}
)
elif typ == 'function_call_output':
msg = {
'role': 'tool',
'parts': [
{'type': 'tool_call_response', 'id': inp.get('call_id'), 'response': inp.get('output')}
],
}
if 'name' in inp:
msg['name'] = inp['name']
input_messages.append(msg)
return input_messages, system_instructions


def convert_responses_outputs_to_semconv(response: Response) -> list[dict[str, Any]]:
"""Convert Responses API outputs to OTel Gen AI Semantic Convention format."""
output_messages: list[dict[str, Any]] = []
for out in response.output:
out_dict, typ, content = out.model_dump(), out.model_dump().get('type'), out.model_dump().get('content')
if typ in (None, 'message') and content:
parts: list[dict[str, Any]] = []
if isinstance(content, str):
parts.append({'type': 'text', 'content': content})
elif isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get('type') == 'output_text':
parts.append({'type': 'text', 'content': item.get('text', '')})
else:
parts.append(item if isinstance(item, dict) else {'type': 'text', 'content': str(item)})
output_messages.append({'role': 'assistant', 'parts': parts})
elif typ == 'function_call':
output_messages.append(
{
'role': 'assistant',
'parts': [
{
'type': 'tool_call',
'id': out_dict.get('call_id'),
'name': out_dict.get('name'),
'arguments': out_dict.get('arguments'),
}
],
}
)
return output_messages


def is_current_agent_span(*span_names: str):
current_span = get_current_span()
return (
Expand Down Expand Up @@ -320,7 +408,9 @@ def get_response_data(self) -> Any:

def get_attributes(self, span_data: dict[str, Any]) -> dict[str, Any]:
response = self.get_response_data()
span_data['events'] = span_data['events'] + responses_output_events(response)
output_messages = convert_responses_outputs_to_semconv(response)
if output_messages:
span_data[OUTPUT_MESSAGES] = output_messages
return span_data


Expand Down Expand Up @@ -442,13 +532,9 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT:
elif isinstance(response, ImagesResponse):
span.set_attribute('response_data', {'images': response.data})
elif isinstance(response, Response): # pragma: no branch
try:
events = json.loads(span.attributes['events']) # type: ignore
except Exception:
pass
else:
events += responses_output_events(response)
span.set_attribute('events', events)
output_messages = convert_responses_outputs_to_semconv(response)
if output_messages:
span.set_attribute(OUTPUT_MESSAGES, output_messages)

return response

Expand Down
113 changes: 60 additions & 53 deletions tests/otel_integrations/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1671,8 +1671,8 @@ def test_responses_stream(exporter: TestExporter) -> None:
'gen_ai.operation.name': 'chat',
'request_data': {'model': 'gpt-4.1', 'stream': True},
'gen_ai.request.model': 'gpt-4.1',
'events': [
{'event.name': 'gen_ai.user.message', 'content': 'What is four plus five?', 'role': 'user'}
'gen_ai.input.messages': [
{'role': 'user', 'parts': [{'type': 'text', 'content': 'What is four plus five?'}]}
],
'async': False,
'logfire.msg_template': 'Responses API with {request_data[model]!r}',
Expand All @@ -1684,7 +1684,7 @@ def test_responses_stream(exporter: TestExporter) -> None:
'gen_ai.operation.name': {},
'request_data': {'type': 'object'},
'gen_ai.request.model': {},
'events': {'type': 'array'},
'gen_ai.input.messages': {'type': 'array'},
'async': {},
},
},
Expand Down Expand Up @@ -1712,18 +1712,12 @@ def test_responses_stream(exporter: TestExporter) -> None:
'gen_ai.operation.name': 'chat',
'gen_ai.request.model': 'gpt-4.1',
'async': False,
'gen_ai.input.messages': [
{'role': 'user', 'parts': [{'type': 'text', 'content': 'What is four plus five?'}]}
],
'duration': 1.0,
'events': [
{
'event.name': 'gen_ai.user.message',
'content': 'What is four plus five?',
'role': 'user',
},
{
'event.name': 'gen_ai.assistant.message',
'content': 'Four plus five equals **nine**.',
'role': 'assistant',
},
'gen_ai.output.messages': [
{'role': 'assistant', 'parts': [{'type': 'text', 'content': 'Four plus five equals **nine**.'}]}
],
'logfire.json_schema': {
'type': 'object',
Expand All @@ -1733,8 +1727,9 @@ def test_responses_stream(exporter: TestExporter) -> None:
'gen_ai.operation.name': {},
'gen_ai.request.model': {},
'async': {},
'events': {'type': 'array'},
'gen_ai.input.messages': {'type': 'array'},
'duration': {},
'gen_ai.output.messages': {'type': 'array'},
},
},
'logfire.tags': ('LLM',),
Expand Down Expand Up @@ -2482,6 +2477,13 @@ def test_responses_api(exporter: TestExporter) -> None:
'gen_ai.operation.name': 'chat',
'async': False,
'request_data': {'model': 'gpt-4.1', 'stream': False},
'gen_ai.input.messages': [
{
'role': 'user',
'parts': [{'type': 'text', 'content': 'What is the weather like in Paris today?'}],
}
],
'gen_ai.system_instructions': [{'type': 'text', 'content': 'Be nice'}],
'logfire.msg_template': 'Responses API with {request_data[model]!r}',
'logfire.msg': "Responses API with 'gpt-4.1'",
'logfire.tags': ('LLM',),
Expand All @@ -2493,24 +2495,18 @@ def test_responses_api(exporter: TestExporter) -> None:
'gen_ai.response.id': 'resp_039e74dd66b112920068dfe10528b8819c82d1214897014964',
'gen_ai.usage.output_tokens': 17,
'operation.cost': 0.000266,
'events': [
{'event.name': 'gen_ai.system.message', 'content': 'Be nice', 'role': 'system'},
{
'event.name': 'gen_ai.user.message',
'content': 'What is the weather like in Paris today?',
'role': 'user',
},
'gen_ai.output.messages': [
{
'event.name': 'gen_ai.assistant.message',
'role': 'assistant',
'tool_calls': [
'parts': [
{
'type': 'tool_call',
'id': 'call_uilZSE2qAuMA2NWct72DBwd6',
'type': 'function',
'function': {'name': 'get_weather', 'arguments': '{"location":"Paris, France"}'},
'name': 'get_weather',
'arguments': '{"location":"Paris, France"}',
}
],
},
}
],
'logfire.json_schema': {
'type': 'object',
Expand All @@ -2519,14 +2515,16 @@ def test_responses_api(exporter: TestExporter) -> None:
'gen_ai.operation.name': {},
'gen_ai.request.model': {},
'request_data': {'type': 'object'},
'events': {'type': 'array'},
'gen_ai.input.messages': {'type': 'array'},
'gen_ai.system_instructions': {'type': 'array'},
'async': {},
'gen_ai.system': {},
'gen_ai.response.model': {},
'gen_ai.usage.input_tokens': {},
'gen_ai.response.id': {},
'gen_ai.usage.output_tokens': {},
'operation.cost': {},
'gen_ai.output.messages': {'type': 'array'},
},
},
},
Expand All @@ -2545,6 +2543,33 @@ def test_responses_api(exporter: TestExporter) -> None:
'gen_ai.operation.name': 'chat',
'async': False,
'request_data': {'model': 'gpt-4.1', 'stream': False},
'gen_ai.input.messages': [
{
'role': 'user',
'parts': [{'type': 'text', 'content': 'What is the weather like in Paris today?'}],
},
{
'role': 'assistant',
'parts': [
{
'type': 'tool_call',
'id': 'call_uilZSE2qAuMA2NWct72DBwd6',
'name': 'get_weather',
'arguments': '{"location":"Paris, France"}',
}
],
},
{
'role': 'tool',
'parts': [
{
'type': 'tool_call_response',
'id': 'call_uilZSE2qAuMA2NWct72DBwd6',
'response': 'Rainy',
}
],
},
],
'logfire.msg_template': 'Responses API with {request_data[model]!r}',
'logfire.msg': "Responses API with 'gpt-4.1'",
'logfire.tags': ('LLM',),
Expand All @@ -2556,35 +2581,16 @@ def test_responses_api(exporter: TestExporter) -> None:
'gen_ai.response.id': 'resp_039e74dd66b112920068dfe10687b4819cb0bc63819abcde35',
'gen_ai.usage.output_tokens': 21,
'operation.cost': 0.000254,
'events': [
{
'event.name': 'gen_ai.user.message',
'content': 'What is the weather like in Paris today?',
'role': 'user',
},
'gen_ai.output.messages': [
{
'event.name': 'gen_ai.assistant.message',
'role': 'assistant',
'tool_calls': [
'parts': [
{
'id': 'call_uilZSE2qAuMA2NWct72DBwd6',
'type': 'function',
'function': {'name': 'get_weather', 'arguments': '{"location":"Paris, France"}'},
'type': 'text',
'content': "The weather in Paris today is rainy. If you're planning to go out, don't forget an umbrella!",
}
],
},
{
'event.name': 'gen_ai.tool.message',
'role': 'tool',
'id': 'call_uilZSE2qAuMA2NWct72DBwd6',
'content': 'Rainy',
'name': 'get_weather',
},
{
'event.name': 'gen_ai.assistant.message',
'content': "The weather in Paris today is rainy. If you're planning to go out, don't forget an umbrella!",
'role': 'assistant',
},
}
],
'logfire.json_schema': {
'type': 'object',
Expand All @@ -2593,14 +2599,15 @@ def test_responses_api(exporter: TestExporter) -> None:
'gen_ai.operation.name': {},
'gen_ai.request.model': {},
'request_data': {'type': 'object'},
'events': {'type': 'array'},
'gen_ai.input.messages': {'type': 'array'},
'async': {},
'gen_ai.system': {},
'gen_ai.response.model': {},
'gen_ai.usage.input_tokens': {},
'gen_ai.response.id': {},
'gen_ai.usage.output_tokens': {},
'operation.cost': {},
'gen_ai.output.messages': {'type': 'array'},
},
},
},
Expand Down
Loading