From 67ce692a1bbdadcd0b6613a2ea095ce7d32901e6 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 8 Dec 2025 17:45:49 +0530 Subject: [PATCH 01/20] Added the metrics support for google generativeai --- .../google_generativeai/__init__.py | 101 ++++++++++++++---- 1 file changed, 82 insertions(+), 19 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 25ec21a439..d123cd6317 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -1,6 +1,8 @@ """OpenTelemetry Google Generative AI API instrumentation""" import logging +import os +import time import types from typing import Collection @@ -32,7 +34,9 @@ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, LLMRequestTypeValues, SpanAttributes, + Meters ) +from opentelemetry.metrics import Meter, get_meter from opentelemetry.trace import SpanKind, get_tracer from wrapt import wrap_function_wrapper @@ -79,6 +83,7 @@ def _build_from_streaming_response( response: GenerateContentResponse, llm_model, event_logger, + token_histogram, ): complete_response = "" last_chunk = None @@ -93,12 +98,12 @@ def _build_from_streaming_response( emit_choice_events(response, event_logger) else: set_response_attributes(span, complete_response, llm_model) - set_model_response_attributes(span, last_chunk or response, llm_model) + set_model_response_attributes(span, last_chunk or response, llm_model, token_histogram) span.end() async def _abuild_from_streaming_response( - span, response: GenerateContentResponse, llm_model, event_logger + span, response: GenerateContentResponse, llm_model, event_logger, token_histogram ): complete_response = "" last_chunk = None @@ -113,7 +118,7 @@ async def _abuild_from_streaming_response( emit_choice_events(response, event_logger) else: set_response_attributes(span, complete_response, llm_model) - set_model_response_attributes(span, last_chunk if last_chunk else response, llm_model) + set_model_response_attributes(span, last_chunk if last_chunk else response, llm_model, token_histogram) span.end() @@ -128,21 +133,21 @@ def _handle_request(span, args, kwargs, llm_model, event_logger): @dont_throw -def _handle_response(span, response, llm_model, event_logger): +def _handle_response(span, response, llm_model, event_logger, token_histogram): if should_emit_events() and event_logger: emit_choice_events(response, event_logger) else: set_response_attributes(span, response, llm_model) - set_model_response_attributes(span, response, llm_model) + set_model_response_attributes(span, response, llm_model, token_histogram) def _with_tracer_wrapper(func): """Helper for providing tracer for wrapper functions.""" - def _with_tracer(tracer, event_logger, to_wrap): + def _with_tracer(tracer, event_logger, to_wrap, token_histogram, duration_histogram): def wrapper(wrapped, instance, args, kwargs): - return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs) + return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs, token_histogram, duration_histogram) return wrapper @@ -158,6 +163,8 @@ async def _awrap( instance, args, kwargs, + token_histogram, + duration_histogram, ): """Instruments and calls every function defined in TO_WRAP.""" if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( @@ -186,22 +193,31 @@ async def _awrap( SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, }, ) - + start_time = time.time() _handle_request(span, args, kwargs, llm_model, event_logger) response = await wrapped(*args, **kwargs) + if duration_histogram: + duration = time.time() - start_time + duration_histogram.record( + duration, + attributes={ + GenAIAttributes.GEN_AI_SYSTEM: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model + }, + ) if response: if is_streaming_response(response): return _build_from_streaming_response( - span, response, llm_model, event_logger + span, response, llm_model, event_logger, token_histogram ) elif is_async_streaming_response(response): return _abuild_from_streaming_response( - span, response, llm_model, event_logger + span, response, llm_model, event_logger, token_histogram ) else: - _handle_response(span, response, llm_model, event_logger) + _handle_response(span, response, llm_model, event_logger, token_histogram) span.end() return response @@ -216,6 +232,8 @@ def _wrap( instance, args, kwargs, + token_histogram, + duration_histogram, ): """Instruments and calls every function defined in TO_WRAP.""" if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( @@ -245,25 +263,54 @@ def _wrap( }, ) + start_time = time.time() _handle_request(span, args, kwargs, llm_model, event_logger) response = wrapped(*args, **kwargs) + if duration_histogram: + duration = time.time() - start_time + duration_histogram.record( + duration, + attributes={ + GenAIAttributes.GEN_AI_SYSTEM: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model + }, + ) if response: if is_streaming_response(response): return _build_from_streaming_response( - span, response, llm_model, event_logger + span, response, llm_model, event_logger, token_histogram ) elif is_async_streaming_response(response): return _abuild_from_streaming_response( - span, response, llm_model, event_logger + span, response, llm_model, event_logger, token_histogram ) else: - _handle_response(span, response, llm_model, event_logger) + _handle_response(span, response, llm_model, event_logger, token_histogram) span.end() return response +def is_metrics_enabled() -> bool: + return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true" + +def _create_metrics(meter: Meter): + token_histogram = meter.create_histogram( + name=Meters.LLM_TOKEN_USAGE, + unit="token", + description="Measures number of input and output tokens used", + ) + + duration_histogram = meter.create_histogram( + name=Meters.LLM_OPERATION_DURATION, + unit="s", + description="GenAI operation duration", + ) + + return token_histogram, duration_histogram + + class GoogleGenerativeAiInstrumentor(BaseInstrumentor): """An instrumentor for Google Generative AI's client library.""" @@ -285,6 +332,12 @@ def _instrument(self, **kwargs): tracer_provider = kwargs.get("tracer_provider") tracer = get_tracer(__name__, __version__, tracer_provider) + meter_provider = kwargs.get("meter_provider") + meter = get_meter(__name__, __version__, meter_provider) + + if is_metrics_enabled(): + token_histogram, duration_histogram = _create_metrics(meter) + event_logger = None if not Config.use_legacy_attributes: logger_provider = kwargs.get("logger_provider") @@ -297,14 +350,24 @@ def _instrument(self, **kwargs): wrap_object = wrapped_method.get("object") wrap_method = wrapped_method.get("method") + wrapper_args = ( + tracer, + event_logger, + wrapped_method, + token_histogram, + duration_histogram, + ) + + wrapper = ( + _awrap(*wrapper_args) + if wrap_object == "AsyncModels" + else _wrap(*wrapper_args) + ) + wrap_function_wrapper( wrap_package, f"{wrap_object}.{wrap_method}", - ( - _awrap(tracer, event_logger, wrapped_method) - if wrap_object == "AsyncModels" - else _wrap(tracer, event_logger, wrapped_method) - ), + wrapper, ) def _uninstrument(self, **kwargs): From 53ced7afeccb5bdf01a715c17977846da642dc1a Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 8 Dec 2025 17:46:49 +0530 Subject: [PATCH 02/20] Collect tokens from response --- .../google_generativeai/span_utils.py | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py index 384f339982..fae3911ecc 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py @@ -446,7 +446,7 @@ def set_response_attributes(span, response, llm_model): ) -def set_model_response_attributes(span, response, llm_model): +def set_model_response_attributes(span, response, llm_model, token_histogram): if not span.is_recording(): return @@ -469,4 +469,22 @@ def set_model_response_attributes(span, response, llm_model): response.usage_metadata.prompt_token_count, ) + if token_histogram: + token_histogram.record( + response.usage_metadata.prompt_token_count, + attributes={ + GenAIAttributes.GEN_AI_SYSTEM: "Google", + GenAIAttributes.GEN_AI_TOKEN_TYPE: "input", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + } + ) + token_histogram.record( + response.usage_metadata.candidates_token_count, + attributes={ + GenAIAttributes.GEN_AI_SYSTEM: "Google", + GenAIAttributes.GEN_AI_TOKEN_TYPE: "output", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + }, + ) + span.set_status(Status(StatusCode.OK)) From 3ec203e4075d204781fca507a3929bfa014fc59f Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 8 Dec 2025 18:15:10 +0530 Subject: [PATCH 03/20] Done linting --- .../google_generativeai/__init__.py | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index d123cd6317..9ec2042db3 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -147,7 +147,17 @@ def _with_tracer_wrapper(func): def _with_tracer(tracer, event_logger, to_wrap, token_histogram, duration_histogram): def wrapper(wrapped, instance, args, kwargs): - return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs, token_histogram, duration_histogram) + return func( + tracer, + event_logger, + to_wrap, + token_histogram, + duration_histogram, + wrapped, + instance, + args, + kwargs, + ) return wrapper @@ -159,12 +169,13 @@ async def _awrap( tracer, event_logger, to_wrap, + token_histogram, + duration_histogram, wrapped, instance, args, kwargs, - token_histogram, - duration_histogram, + ): """Instruments and calls every function defined in TO_WRAP.""" if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( @@ -228,12 +239,13 @@ def _wrap( tracer, event_logger, to_wrap, + token_histogram, + duration_histogram, wrapped, instance, args, kwargs, - token_histogram, - duration_histogram, + ): """Instruments and calls every function defined in TO_WRAP.""" if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( @@ -292,9 +304,11 @@ def _wrap( span.end() return response + def is_metrics_enabled() -> bool: return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true" + def _create_metrics(meter: Meter): token_histogram = meter.create_histogram( name=Meters.LLM_TOKEN_USAGE, @@ -311,7 +325,6 @@ def _create_metrics(meter: Meter): return token_histogram, duration_histogram - class GoogleGenerativeAiInstrumentor(BaseInstrumentor): """An instrumentor for Google Generative AI's client library.""" @@ -353,9 +366,9 @@ def _instrument(self, **kwargs): wrapper_args = ( tracer, event_logger, - wrapped_method, token_histogram, duration_histogram, + wrapped_method, ) wrapper = ( From 012121ea5368ba6e4d10d3ad0680b7b2627cea94 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 8 Dec 2025 18:26:28 +0530 Subject: [PATCH 04/20] updated the wrapper --- .../instrumentation/google_generativeai/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 9ec2042db3..236e80c955 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -366,9 +366,9 @@ def _instrument(self, **kwargs): wrapper_args = ( tracer, event_logger, + wrapped_method, token_histogram, duration_histogram, - wrapped_method, ) wrapper = ( From 28f61d1eede6a86a258bafc103860a313c752774 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 8 Dec 2025 19:08:57 +0530 Subject: [PATCH 05/20] Address the suggestion --- .../instrumentation/google_generativeai/__init__.py | 8 ++++---- .../instrumentation/google_generativeai/span_utils.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 236e80c955..7cb5e93cfc 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -204,13 +204,13 @@ async def _awrap( SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, }, ) - start_time = time.time() + start_time = time.perf_counter() _handle_request(span, args, kwargs, llm_model, event_logger) response = await wrapped(*args, **kwargs) if duration_histogram: - duration = time.time() - start_time + duration = time.perf_counter() - start_time duration_histogram.record( duration, attributes={ @@ -275,13 +275,13 @@ def _wrap( }, ) - start_time = time.time() + start_time = time.perf_counter() _handle_request(span, args, kwargs, llm_model, event_logger) response = wrapped(*args, **kwargs) if duration_histogram: - duration = time.time() - start_time + duration = time.perf_counter() - start_time duration_histogram.record( duration, attributes={ diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py index fae3911ecc..39d70deed7 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py @@ -469,7 +469,7 @@ def set_model_response_attributes(span, response, llm_model, token_histogram): response.usage_metadata.prompt_token_count, ) - if token_histogram: + if token_histogram and hasattr(response, "usage_metadata"): token_histogram.record( response.usage_metadata.prompt_token_count, attributes={ From 98655d78784be251b501c4265c1d0e351e7e2eac Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 8 Dec 2025 19:13:55 +0530 Subject: [PATCH 06/20] Address the suggestion --- .../instrumentation/google_generativeai/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 7cb5e93cfc..334b80a07d 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -348,6 +348,9 @@ def _instrument(self, **kwargs): meter_provider = kwargs.get("meter_provider") meter = get_meter(__name__, __version__, meter_provider) + token_histogram = None + duration_histogram = None + if is_metrics_enabled(): token_histogram, duration_histogram = _create_metrics(meter) From ff67a8196d6571ff2f4cee9bbeb17dbed9f4fc1b Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:05:05 +0530 Subject: [PATCH 07/20] Changed the failing testcase --- .../tests/test_new_library_instrumentation.py | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_new_library_instrumentation.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_new_library_instrumentation.py index fef96d2154..a227c59d6b 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_new_library_instrumentation.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_new_library_instrumentation.py @@ -1,54 +1,54 @@ """Test that the google-genai library instrumentation works.""" - +import wrapt from opentelemetry.instrumentation.google_generativeai import ( GoogleGenerativeAiInstrumentor, ) +from google.genai.models import Models, AsyncModels + + +def _is_instrumented(func): + """ + OpenTelemetry instrumentations wrap functions using wrapt. + Presence of __wrapped__ or a wrapt wrapper means instrumented. + """ + return hasattr(func, "__wrapped__") or isinstance( + func, + (wrapt.FunctionWrapper, wrapt.BoundFunctionWrapper), + ) -def test_library_instrumentation(): - """Test that the google-genai library gets properly instrumented.""" - # Import the library - from google import genai - from google.genai.models import AsyncModels, Models +def test_google_genai_instrumentation_lifecycle(): + """Validate instrumentation, idempotency, and cleanup.""" - # Set up instrumentor instrumentor = GoogleGenerativeAiInstrumentor() - # Verify methods are not wrapped initially - assert not hasattr(Models.generate_content, '__wrapped__') - assert not hasattr(Models.generate_content_stream, '__wrapped__') - assert not hasattr(AsyncModels.generate_content, '__wrapped__') - assert not hasattr(AsyncModels.generate_content_stream, '__wrapped__') - - try: - instrumentor.instrument() - - # Verify all methods are now wrapped - assert hasattr(Models.generate_content, '__wrapped__') - assert hasattr(Models.generate_content_stream, '__wrapped__') - assert hasattr(AsyncModels.generate_content, '__wrapped__') - assert hasattr(AsyncModels.generate_content_stream, '__wrapped__') - - # Verify they're callable - assert callable(Models.generate_content) - assert callable(Models.generate_content_stream) - assert callable(AsyncModels.generate_content) - assert callable(AsyncModels.generate_content_stream) - - # Test that we can create a client - client = genai.Client(api_key="test_key") - assert client is not None - assert hasattr(client, 'models') - assert isinstance(client.models, Models) - - finally: - instrumentor.uninstrument() - - # Verify methods are unwrapped - assert not hasattr(Models.generate_content, '__wrapped__') - assert not hasattr(Models.generate_content_stream, '__wrapped__') - assert not hasattr(AsyncModels.generate_content, '__wrapped__') - assert not hasattr(AsyncModels.generate_content_stream, '__wrapped__') + # --- ensure clean state --- + instrumentor.uninstrument() + + assert not _is_instrumented(Models.generate_content) + assert not _is_instrumented(Models.generate_content_stream) + assert not _is_instrumented(AsyncModels.generate_content) + assert not _is_instrumented(AsyncModels.generate_content_stream) + + # --- instrument --- + instrumentor.instrument() + + assert _is_instrumented(Models.generate_content) + assert _is_instrumented(Models.generate_content_stream) + assert _is_instrumented(AsyncModels.generate_content) + assert _is_instrumented(AsyncModels.generate_content_stream) + + # --- instrumentation is idempotent --- + instrumentor.instrument() + assert _is_instrumented(Models.generate_content) + + # --- uninstrument --- + instrumentor.uninstrument() + + assert not _is_instrumented(Models.generate_content) + assert not _is_instrumented(Models.generate_content_stream) + assert not _is_instrumented(AsyncModels.generate_content) + assert not _is_instrumented(AsyncModels.generate_content_stream) def test_instrumentation_dependencies(): From c6d2e17cbae883fefa06a08a55ffd5890201c1e2 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:07:00 +0530 Subject: [PATCH 08/20] updated confest file --- .../tests/conftest.py | 49 ++++++++++++++----- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py index 865e331471..4d8ec26cca 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py @@ -10,29 +10,39 @@ from opentelemetry.instrumentation.google_generativeai.utils import ( TRACELOOP_TRACE_CONTENT, ) +from opentelemetry.sdk.resources import Resource from opentelemetry.sdk._logs import LoggerProvider from opentelemetry.sdk._logs.export import ( InMemoryLogExporter, SimpleLogRecordProcessor, ) from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.trace import set_tracer_provider +from opentelemetry.sdk.trace.export import ( + SimpleSpanProcessor, + InMemorySpanExporter, +) + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import InMemoryMetricReader + pytest_plugins = [] -@pytest.fixture(scope="function", name="span_exporter") -def fixture_span_exporter(): +@pytest.fixture(scope="session") +def exporter(): exporter = InMemorySpanExporter() - yield exporter - + processor = SimpleSpanProcessor(exporter) -@pytest.fixture(scope="function", name="tracer_provider") -def fixture_tracer_provider(span_exporter): provider = TracerProvider() - provider.add_span_processor(SimpleSpanProcessor(span_exporter)) - return provider + provider.add_span_processor(processor) + set_tracer_provider(provider) + + GoogleGenerativeAiInstrumentor().instrument() + + return exporter @pytest.fixture(scope="function", name="log_exporter") @@ -51,7 +61,7 @@ def fixture_logger_provider(log_exporter): @pytest.fixture def genai_client(): client = genai.Client(api_key=os.environ["GOOGLE_API_KEY"]) - return client.models + return client @pytest.fixture(scope="function") @@ -66,6 +76,23 @@ def instrument_legacy(tracer_provider): instrumentor.uninstrument() +@pytest.fixture(scope="session") +def metrics_test_context(): + resource = Resource.create() + reader = InMemoryMetricReader() + provider = MeterProvider(metric_readers=[reader], resource=resource) + metrics.set_meter_provider(provider) + GoogleGenerativeAiInstrumentor().instrument(meter_provider=provider) + return provider, reader + + +@pytest.fixture(scope="session", autouse=True) +def clear_metrics_test_context(metrics_test_context): + provider, reader = metrics_test_context + reader.shutdown() + provider.shutdown() + + @pytest.fixture(scope="function") def instrument_with_content(tracer_provider, logger_provider): os.environ.update({TRACELOOP_TRACE_CONTENT: "True"}) From e524d2b590954ce9c4ca61a1ba8ee7c06036d7b9 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:07:48 +0530 Subject: [PATCH 09/20] Added testcases for client --- .../test_client_spans.yaml | 92 ++++++++ .../tests/test_generate_content.py | 199 +++++------------- 2 files changed, 144 insertions(+), 147 deletions(-) create mode 100644 packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml new file mode 100644 index 0000000000..3b8a4e851d --- /dev/null +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml @@ -0,0 +1,92 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "What is ai?"}], "role": "user"}]}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.52.0 gl-python/3.11.11 + x-goog-api-client: + - google-genai-sdk/1.52.0 gl-python/3.11.11 + x-goog-api-key: + - AIzaSyCYDUVmQyuWr6y7ADadgKF9u-DGsP0OsNA + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/41Y0XLbuhF991dg9BLZQ2mSNOnk5k11nMRTO/WN1ZvOVH2AyJWEGgQYAJTEm8m/ + 9yxAUqRyb6aZxJFJYLF79uzZhb5dCDHJpSlUIQP5yVvxbzwR4lv8ye+sCWQCXnSP8LCSLpzWpj/f + Bp+xJNCRN02urhYuqI3KldTiFra0VlsyOYnp4vby6kooL6RYOysLsVGkC2E3IrdlVQdywucqri2o + UDkcLESwInckgzJbUcp8pwx5EXYyCEQhKnIb60oRpH9qH4emwlatG+Hoa60ciV1dYqka+DIXt+EZ + /FjbOgivylqnA9LK3G6NCmpPYlObPChrvNDqiYQm6QzWZaJydq2pnHmr9/FBQbnyWDkr5VNaQS6n + ijdnAnALLc22llsStSnI+cApwInK9MH7+cqszEdy9CxBRPKpsAfDAB1iZPyjJGn8W175Yi7E1dVy + R+KDlfotoF1aQUbCsQFSVqwmAZ+fVpPox2oSo8BvOFqKg2ySXSBmDwwhlbyJA6MuTJ8NvMY+hlye + QmNkncyDOKiwYwviYJ0uBogH3WSirPNdwjHC7EVhY8QvYxx/p0Zcy0qulQb28BxRL24R1cowv674 + 39VdmwAOdpFzdhOGvUPRH1dr7MczUXt+zx4N1szFcqfYPniO53uO1Iv7hJjojhDT+7vLDMAjHwBn + ax2CK33igKhkQMyIYeNsKVBKMgbPdFoT76ZjpVEDCJxB3DpZliAz+0R7cg2Ijkw5xQicwvtM0tsu + vn9G3+Nx1MWExIAVgFFWsHpUCIgEbBa0UaAsgUsm1zUT0Y8sP4z5yvbfA4+O912eI3wwFamJtXX4 + 0VLP6+jkiMyejLcIThkQOku5ho9beD697kr8t1gnl+y2r4gQzPQx/f+Z2srD69GZd13tjI7j8xFX + Tj4ixa6DamDioJT7qpt+kqF2kKTe1mDrTHy6exgf+a6tZ/DiqT3remdtz6c1eSiQrZ0nZqqMMpHY + sOe81sBSM0UkC8kYwkUhqyA7CBfFf2sfXUZ2DR2gR6GWSXU4JjJ75awpUUVJIf4S6+WjPaA+xBfr + IHzTCIxu2MoSUBqr7RZFdHlWP39I8ugEYgHPfb32FFLp/Rn1e8ZDRI0PJKOC/3+sZ5nOGD9wpEBA + atOcSomDhXqy6rD6JwQiuVMqvFhLD3PAmTMQi45NPdtzOlDLwUll4oIW7mE6qRpE/a6LGgHP+i50 + f5eksPYgrDx1MUOROYbCIaIdZQ61E1QFqdWyASfFdBcb12pS4KjV5JKzmQBjfdd07ANt9Qd/14Rs + cNCGmxUUISTVDztn6+3Os0QjEVOab+dZqiRe1xVJJkxL6p7mVU/qMZ1/xv4pk5/huIm9w58aEsdw + Ev4sCT2yE7JhtdFZrY0OPiv7iHrqNKNTVhNP1Hao/hSxV76G00N9j+w7tZghNOhSe7DK+nHon+3a + BpX72CBjJ+BDKRVsYjqfWu0az3ND3zqz1EsZIccmop/jeUPWwRpboth1Mzrz5oiFQTw2qI8ynvwZ + +j1L/PXpKXjt1dakEadUpcoTrceDhEgNsYki0wJN0focxcs1CxxLy41MgG1aAWrMWVEoXqUBoan+ + pJt+ks4lFZmizL6AeLw14ZQIyg6xEiDFElUWbCGbdnbqvWfwusrjSpcs6znXTkTpvBRvjpLLAaCI + R7TvDHSgo8wir6EWpkh5JrON88v0E4WNVkesK+XvaAwZuozezAqnuJFhCOTa29i8bqWhfcHrKlmi + mbEKI5llnCi1KJTcGgvBhXvW6rEyf4iM1hGSxYdbVp/HAPHddsAsxK6pLGAJ0RjWRcWorIdmeEqI + dSk7q56oBqly0L51M5pI0UEcjPBEBg4Lh1qKjSUSDZswoMHAnnSsG+nilIcD/mi2/QJrIIaxPDKy + hjcURmE+1iDQcM9PQvO1q2QM7cejmHRoTmhXPG4niY9qylKBOSQNBTzSRy1tB/k9sMla8dAI1xe2 + TLB4G/XWPymtU697PW/nIj37Egt+UXGHSe3xjM4fsSrsQIgYzrs+z4CvcPUWmfe5ZR/jcI5BS2r1 + O5cfe8XtVVSQsDEh3isjW3zeO1kXoH1I0pGdOiNTyckijv059y6ghnaK30e2lkiqr6w7tf7HcyZn + bGjDUAFqSBp7NZZTjAq2RI0/ktur5Ng10sT6lHW5EMiXYtKFcTA3rHwBlcpm48501zurPRjaypIL + P0ulDIu02SDssbl7aeoNBp/a9cNY6tt7vn8wTyJ2mfgKn7ge+GrprM6SniIxY++KOu+ReRgm6HTt + GtwoRID4xvG/1dPIl1soB8gajwWDVXfHo7KyB3KjS2R/W8m7a0fzo8T/9Ep5uij215ZY5kU7zXF/ + 6C5KxGnly22TtWNdgCGY5JGP9UP1N59WCs4rC6MCsu+a+WRw8/7ef/5PdrqvA2Xiy3hpC9Ld8u/d + gglfFvwu3TZ42ePyHw+T/i3OoSMeP7/oDoimJ7UHI+8pSJ68+u8HJhg5yios7ROZa1vHbw5eJ1uD + 7xlGr988f9MuCDZIPXr34s1fX2Y/GPbvcKzSw68gBt9OIMpIMA5lefOv5WSARBj51SFxMQBswte2 + 7S6MfXzx/NUvFy1mCcbfQEmV8EJhAsHZy/nr2UZLv4sHThyhuo2n24LXXP/t5pVcl4ePubx9Nbt5 + UL/s9ddff51cfL/4H+93HVuEEQAA + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 09 Dec 2025 12:07:36 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=11068 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py index 7ab4b8888b..b8398df8ef 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py @@ -1,154 +1,17 @@ +import pytest +from unittest.mock import MagicMock +from opentelemetry.instrumentation.google_generativeai import ( + GoogleGenerativeAiInstrumentor, +) +from opentelemetry.trace import StatusCode, SpanKind +from opentelemetry.semconv_ai import ( + SpanAttributes, +) from opentelemetry.sdk._logs import LogData + from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) -from opentelemetry.semconv_ai import SpanAttributes # noqa: F401 - - -def test_gemini_generate_content_legacy( - instrument_legacy, span_exporter, log_exporter, genai_client -): - # This test is working, but since Gemini uses gRPC, - # vcr does not record it, therefore we cannot test this without - # setting the API key in a shared secret store like GitHub secrets - pass - - # genai_client.generate_content( - # "The opposite of hot is", - # ) - # spans = span_exporter.get_finished_spans() - # assert all(span.name == "gemini.generate_content" for span in spans) - - # gemini_span = spans[0] - # assert ( - # gemini_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - # == "The opposite of hot is\n" - # ) - # assert gemini_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "user" - # assert ( - # gemini_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - # == "cold\n" - # ) - # assert ( - # gemini_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - # == "assistant" - # ) - - # assert gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 5 - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] - # + gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - # == gemini_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] - # ) - - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] - # == "models/gemini-1.5-flash" - # ) - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] - # == "models/gemini-1.5-flash" - # ) - - # logs = log_exporter.get_finished_logs() - # assert ( - # len(logs) == 0 - # ), "Assert that it doesn't emit logs when use_legacy_attributes is True" - - -def test_gemini_generate_content_with_events_with_content( - instrument_with_content, span_exporter, log_exporter, genai_client -): - # This test is working, but since Gemini uses gRPC, - # vcr does not record it, therefore we cannot test this without - # setting the API key in a shared secret store like GitHub secrets - pass - - # genai_client.generate_content( - # "The opposite of hot is", - # ) - # spans = span_exporter.get_finished_spans() - # assert all(span.name == "gemini.generate_content" for span in spans) - - # gemini_span = spans[0] - - # assert gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 5 - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] - # + gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - # == gemini_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] - # ) - - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] - # == "models/gemini-1.5-flash" - # ) - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] - # == "models/gemini-1.5-flash" - # ) - - # logs = log_exporter.get_finished_logs() - # assert len(logs) == 2 - - # # Validate user message Event - # user_message = {"content": "The opposite of hot is"} - # assert_message_in_logs(logs[0], "gen_ai.user.message", user_message) - - # # Validate the AI response - # ai_response = { - # "index": 0, - # "finish_reason": "STOP", - # "message": {"content": [{"text": "cold\n"}], "role": "model"}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", ai_response) - - -def test_gemini_generate_content_with_events_with_no_content( - instrument_with_no_content, span_exporter, log_exporter, genai_client -): - # This test is working, but since Gemini uses gRPC, - # vcr does not record it, therefore we cannot test this without - # setting the API key in a shared secret store like GitHub secrets - pass - - # genai_client.generate_content( - # "The opposite of hot is", - # ) - # spans = span_exporter.get_finished_spans() - # assert all(span.name == "gemini.generate_content" for span in spans) - - # gemini_span = spans[0] - - # assert gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 5 - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] - # + gemini_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - # == gemini_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] - # ) - - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] - # == "models/gemini-1.5-flash" - # ) - # assert ( - # gemini_span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] - # == "models/gemini-1.5-flash" - # ) - - # logs = log_exporter.get_finished_logs() - # assert len(logs) == 2 - - # # Validate user message Event - # assert_message_in_logs(logs[0], "gen_ai.user.message", {}) - - # # Validate the AI response - # ai_response = { - # "index": 0, - # "finish_reason": "STOP", - # "message": {}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", ai_response) def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict): @@ -160,3 +23,45 @@ def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict else: assert log.log_record.body assert dict(log.log_record.body) == expected_content + + +@pytest.fixture +def mock_instrumentor(): + instrumentor = GoogleGenerativeAiInstrumentor() + instrumentor.instrument = MagicMock() + instrumentor.uninstrument = MagicMock() + return instrumentor + + +@pytest.mark.vcr +def test_client_spans(exporter, genai_client): + genai_client.chats.create(model="gemini-2.5-flash").send_message("What is ai?") + spans = exporter.get_finished_spans() + + assert len(spans) > 0, "No spans were recorded" + + span = next( + (s for s in spans if s.name == "gemini.generate_content"), + None, + ) + assert span is not None, "gemini.generate_content span not found" + + assert span.kind == SpanKind.CLIENT + assert span.status.status_code == StatusCode.OK + + attrs = span.attributes + + assert attrs[SpanAttributes.LLM_SYSTEM] == "Google" + assert attrs[SpanAttributes.LLM_REQUEST_TYPE] == "completion" + assert attrs[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gemini-2.5-flash" + assert attrs[GenAIAttributes.GEN_AI_RESPONSE_MODEL] == "gemini-2.5-flash" + + assert "gen_ai.prompt.0.content" in attrs + assert attrs["gen_ai.prompt.0.role"] == "user" + + assert "gen_ai.completion.0.content" in attrs + assert attrs["gen_ai.completion.0.role"] == "assistant" + + assert attrs[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] > 0 + assert attrs[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] > 0 + assert attrs[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] > 0 From 1a178c74364fee1de8409cee7003d01348117e56 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:26:14 +0530 Subject: [PATCH 10/20] Added testcases for metrics --- .../tests/conftest.py | 2 + .../tests/test_generate_content.py | 69 +++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py index 4d8ec26cca..9732e5b6d1 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py @@ -20,6 +20,8 @@ from opentelemetry.trace import set_tracer_provider from opentelemetry.sdk.trace.export import ( SimpleSpanProcessor, +) +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py index b8398df8ef..8bf33573fe 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py @@ -6,6 +6,7 @@ from opentelemetry.trace import StatusCode, SpanKind from opentelemetry.semconv_ai import ( SpanAttributes, + Meters ) from opentelemetry.sdk._logs import LogData @@ -65,3 +66,71 @@ def test_client_spans(exporter, genai_client): assert attrs[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] > 0 assert attrs[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] > 0 assert attrs[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] > 0 + + +@pytest.mark.vcr +def test_generate_metrics(metrics_test_context, genai_client): + _, reader = metrics_test_context + + # ---- Trigger a generic GenAI request ---- + genai_client.chats.create(model="gemini-2.5-flash").send_message("What is ai?") + + metrics_data = reader.get_metrics_data() + resource_metrics = metrics_data.resource_metrics + + # ---- ResourceMetrics ---- + assert resource_metrics, "No ResourceMetrics emitted" + + rm = resource_metrics[0] + assert rm.scope_metrics, "No ScopeMetrics found" + + scope_metrics = rm.scope_metrics[0] + + # ---- Instrumentation scope (generic check) ---- + scope = scope_metrics.scope + assert scope.name, "Instrumentation scope name is missing" + + metrics = {m.name: m for m in scope_metrics.metrics} + + # ---- Required metrics (semantic conventions) ---- + required_metrics = { + Meters.LLM_OPERATION_DURATION, + Meters.LLM_TOKEN_USAGE, + } + assert required_metrics.issubset(metrics.keys()) + + duration_metric = metrics[Meters.LLM_OPERATION_DURATION] + + assert duration_metric.unit is not None + assert duration_metric.data.data_points + + duration_dp = duration_metric.data.data_points[0] + + # Minimal semantic validation + assert duration_dp.count >= 1 + assert duration_dp.sum >= 0 + + # Required attributes (values are intentionally not hard-coded) + assert SpanAttributes.LLM_SYSTEM in duration_dp.attributes + assert SpanAttributes.LLM_RESPONSE_MODEL in duration_dp.attributes + + token_metric = metrics[Meters.LLM_TOKEN_USAGE] + + assert token_metric.unit == "token" + assert token_metric.data.data_points + + token_points_by_type = { + dp.attributes.get(GenAIAttributes.GEN_AI_TOKEN_TYPE): dp + for dp in token_metric.data.data_points + } + + # Both input & output tokens must exist + assert {"input", "output"}.issubset(token_points_by_type.keys()) + + for token_type, dp in token_points_by_type.items(): + assert dp.count >= 1 + assert dp.sum >= 0 + + # Required semantic attributes + assert SpanAttributes.LLM_SYSTEM in dp.attributes + assert SpanAttributes.LLM_RESPONSE_MODEL in dp.attributes From e57ec68c84e874844c3bf037a0b4b7785fd22e83 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:26:50 +0530 Subject: [PATCH 11/20] Recorded the tests --- .../test_client_spans.yaml | 78 ++++++++-------- .../test_generate_metrics.yaml | 90 +++++++++++++++++++ 2 files changed, 130 insertions(+), 38 deletions(-) create mode 100644 packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml index 3b8a4e851d..d986c15c48 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml @@ -25,42 +25,44 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAC/41Y0XLbuhF991dg9BLZQ2mSNOnk5k11nMRTO/WN1ZvOVH2AyJWEGgQYAJTEm8m/ - 9yxAUqRyb6aZxJFJYLF79uzZhb5dCDHJpSlUIQP5yVvxbzwR4lv8ye+sCWQCXnSP8LCSLpzWpj/f - Bp+xJNCRN02urhYuqI3KldTiFra0VlsyOYnp4vby6kooL6RYOysLsVGkC2E3IrdlVQdywucqri2o - UDkcLESwInckgzJbUcp8pwx5EXYyCEQhKnIb60oRpH9qH4emwlatG+Hoa60ciV1dYqka+DIXt+EZ - /FjbOgivylqnA9LK3G6NCmpPYlObPChrvNDqiYQm6QzWZaJydq2pnHmr9/FBQbnyWDkr5VNaQS6n - ijdnAnALLc22llsStSnI+cApwInK9MH7+cqszEdy9CxBRPKpsAfDAB1iZPyjJGn8W175Yi7E1dVy - R+KDlfotoF1aQUbCsQFSVqwmAZ+fVpPox2oSo8BvOFqKg2ySXSBmDwwhlbyJA6MuTJ8NvMY+hlye - QmNkncyDOKiwYwviYJ0uBogH3WSirPNdwjHC7EVhY8QvYxx/p0Zcy0qulQb28BxRL24R1cowv674 - 39VdmwAOdpFzdhOGvUPRH1dr7MczUXt+zx4N1szFcqfYPniO53uO1Iv7hJjojhDT+7vLDMAjHwBn - ax2CK33igKhkQMyIYeNsKVBKMgbPdFoT76ZjpVEDCJxB3DpZliAz+0R7cg2Ijkw5xQicwvtM0tsu - vn9G3+Nx1MWExIAVgFFWsHpUCIgEbBa0UaAsgUsm1zUT0Y8sP4z5yvbfA4+O912eI3wwFamJtXX4 - 0VLP6+jkiMyejLcIThkQOku5ho9beD697kr8t1gnl+y2r4gQzPQx/f+Z2srD69GZd13tjI7j8xFX - Tj4ixa6DamDioJT7qpt+kqF2kKTe1mDrTHy6exgf+a6tZ/DiqT3remdtz6c1eSiQrZ0nZqqMMpHY - sOe81sBSM0UkC8kYwkUhqyA7CBfFf2sfXUZ2DR2gR6GWSXU4JjJ75awpUUVJIf4S6+WjPaA+xBfr - IHzTCIxu2MoSUBqr7RZFdHlWP39I8ugEYgHPfb32FFLp/Rn1e8ZDRI0PJKOC/3+sZ5nOGD9wpEBA - atOcSomDhXqy6rD6JwQiuVMqvFhLD3PAmTMQi45NPdtzOlDLwUll4oIW7mE6qRpE/a6LGgHP+i50 - f5eksPYgrDx1MUOROYbCIaIdZQ61E1QFqdWyASfFdBcb12pS4KjV5JKzmQBjfdd07ANt9Qd/14Rs - cNCGmxUUISTVDztn6+3Os0QjEVOab+dZqiRe1xVJJkxL6p7mVU/qMZ1/xv4pk5/huIm9w58aEsdw - Ev4sCT2yE7JhtdFZrY0OPiv7iHrqNKNTVhNP1Hao/hSxV76G00N9j+w7tZghNOhSe7DK+nHon+3a - BpX72CBjJ+BDKRVsYjqfWu0az3ND3zqz1EsZIccmop/jeUPWwRpboth1Mzrz5oiFQTw2qI8ynvwZ - +j1L/PXpKXjt1dakEadUpcoTrceDhEgNsYki0wJN0focxcs1CxxLy41MgG1aAWrMWVEoXqUBoan+ - pJt+ks4lFZmizL6AeLw14ZQIyg6xEiDFElUWbCGbdnbqvWfwusrjSpcs6znXTkTpvBRvjpLLAaCI - R7TvDHSgo8wir6EWpkh5JrON88v0E4WNVkesK+XvaAwZuozezAqnuJFhCOTa29i8bqWhfcHrKlmi - mbEKI5llnCi1KJTcGgvBhXvW6rEyf4iM1hGSxYdbVp/HAPHddsAsxK6pLGAJ0RjWRcWorIdmeEqI - dSk7q56oBqly0L51M5pI0UEcjPBEBg4Lh1qKjSUSDZswoMHAnnSsG+nilIcD/mi2/QJrIIaxPDKy - hjcURmE+1iDQcM9PQvO1q2QM7cejmHRoTmhXPG4niY9qylKBOSQNBTzSRy1tB/k9sMla8dAI1xe2 - TLB4G/XWPymtU697PW/nIj37Egt+UXGHSe3xjM4fsSrsQIgYzrs+z4CvcPUWmfe5ZR/jcI5BS2r1 - O5cfe8XtVVSQsDEh3isjW3zeO1kXoH1I0pGdOiNTyckijv059y6ghnaK30e2lkiqr6w7tf7HcyZn - bGjDUAFqSBp7NZZTjAq2RI0/ktur5Ng10sT6lHW5EMiXYtKFcTA3rHwBlcpm48501zurPRjaypIL - P0ulDIu02SDssbl7aeoNBp/a9cNY6tt7vn8wTyJ2mfgKn7ge+GrprM6SniIxY++KOu+ReRgm6HTt - GtwoRID4xvG/1dPIl1soB8gajwWDVXfHo7KyB3KjS2R/W8m7a0fzo8T/9Ep5uij215ZY5kU7zXF/ - 6C5KxGnly22TtWNdgCGY5JGP9UP1N59WCs4rC6MCsu+a+WRw8/7ef/5PdrqvA2Xiy3hpC9Ld8u/d - gglfFvwu3TZ42ePyHw+T/i3OoSMeP7/oDoimJ7UHI+8pSJ68+u8HJhg5yios7ROZa1vHbw5eJ1uD - 7xlGr988f9MuCDZIPXr34s1fX2Y/GPbvcKzSw68gBt9OIMpIMA5lefOv5WSARBj51SFxMQBswte2 - 7S6MfXzx/NUvFy1mCcbfQEmV8EJhAsHZy/nr2UZLv4sHThyhuo2n24LXXP/t5pVcl4ePubx9Nbt5 - UL/s9ddff51cfL/4H+93HVuEEQAA + H4sIAAAAAAAC/5VY73PbNhL97r8Coy+1NZLGyTXT1l9uNLbP9dVOPJUvuenlPkDkSkREAiwAymYy + +d/7dkFSlK8/5jKdVCHAxe7bt28X/HKi1CTTNje5jhQmF+o/eKLUF/mb15yNZCMW+kd4WGsfD3vT + ny+j39gS6ZlfmixvVYiwH9TGeTWdLn00G5MZXapbWC5LsyWb0XS6+Gg/2mVUJgaVOU8zhVdNUFpt + DJW5chs8ruomklchM/wSTGZNIKxZlXnS0ditqnRWGEtBxUJHhdBUTR5nVyrqsOsex7Y2mS7LVnn6 + tTGeVNFU2GpGLi3UY4Hzjc3KJhd7MB9UaXZ0wb5OEeZ0ekfaWyxcTKdqmbEtdsJYPhEOwTMEr3xT + UkKgCbIeF72Bn0kH11v4l6ym3dHBOQSjdF1792xgjhQs5LQx1uA3UgPXAs4Ig7UH79YlVfPgyn1n + 8x8G2YVVPGrYIbHMUJb0rLICKJDd0sgE+Yxq3ike2Zy8ZJBtcCyMka89CdqBbHC+PQr4lCFS8BdW + 2eFQE2XF2XDAnbbbBouqGdvmwy7hlaeCDochE+RTYlOGyu7lwdoVZYYxmFd615spnBMgY0FqTQE0 + cI0PxBzSGfsoZJtOf3RPKnfwElR7cn6nTo1VwTA0CkFW4ezv06nwEixuQ6QK+eeEq413ldprmNaV + ayw4C9soIT1TJke9mE2rah1hxIaZRAJfLLIPw4XD3/0iJwOe44Gn3GQpQZLlFFZYoE5wspYKWBOH + Rc91iRKKoC+YsfW6qlAEzC7aE5JRuxAMaIA6Iau9cTM+vVUaPAeTzdZiO85NobBzOtd17ED5CTsv + QS1wIMjiY1uTxLe8vUhwvFow8m+19wAQ2Jzi6A+kd/h9JltYAlJ6PnC5GfwXODOpooIkpnKSmKoC + ZVCPlE5QT6RQfYwpCj26XLdAIH4TDp4Lml4b2wWtmWEZq4qU+GJ8/PWz5mzK4e+dgWZoYMOcQ3Cn + K1QrdAaFoM9mqDb2BuRLNEZViI6cvqW4Kc0zNlb6s7PYGWpdQZZKuIjsBio389wbLjgIjscrWeM9 + WIAEuZr5y8cq2+PF6XDNtuirEBazgkKY16Vu2UiX1XAUymWhPdhLHu6bjAO6Tfq2JiVp3zoHcJKC + HiEyA7QQcRUaX3P4QzFlutZrU5poCHGsG8kUF4T9JgqL4FRXguWRNjJDIT/QYdcEgBSEO6+FFiOF + v+leHSu9Ol3e3PL7q+gdPPkryixV0dYOhIms2GOmiJJ3XiJPEtK8RAmU8G0LiTR74N7Hh8r32Au2 + PKFClYeOiCHpCkwxKECDPrPmd7j+u/qYjWQqVTL0uGyP4cBmbftnlMWGPYXh5KMe4LZ/SM53KUNE + 3IQODS4pgjqlxXYxUz8u79QP5+fnSX5en5+/ugA+q1pj67u8BQztTF1BhdKGVdRePXranf01kUQG + rYsQFzxWLcUk9gxYaNafEBSjha4OxWdgPQXAw+0J2+rClC44/I9zlNMalBdK/O0lJVYNCsIc82H1 + /1GgT33KV0fq32ng4tmY4ozs3njODhKYtFKGi1nX5aWAZZDYmwgo6+NuOhu1pFLtrHsqKZdW9Gfg + Xu+R003j4b9nD1j7Nk1sPLcCuIxySIYhacxjVjlGCxKlKCZEeVkSw60F//61QVfrGn+v2atmLcF0 + oo2Waw3vA995dDmod3L0Ps1Jqh9g1On93VlCm8cvTnqg2IsyfKffa4Hc8uQA18ShLw1dCWaPwLki + qkcHXvUHHs66v0u5bRgJSw0DbSlyc06BoFmi0KCTxDprbKgxvaFqW8E1kWDNzeHs0OH6YefQkEn0 + UWgAZCSI0dACSUf/6aqdB9lFX4AbLSTmTiEC4yAPVkdxs59MOPqMRGYPM8/bbtMw+zwMm9Bg7h4E + iGvLyhOGOVeGg7H6DKPXmImRXoxGvbOY7eLaRYQzrKBt2lDq5HhgMqHfcaXrskVbPPh72Y/a72UK + +UPvPk4gWR8nx4Mhiiw00jEOQ6GwpZsJefMeIuxC76pLApNTpCw5V/E0xDbwShoGX7r4s0NwJhOF + uLV7lCilwSINCjNpUNE3ncHUiOUnn9+kcdC7hFBqkFAB52vXDZzgPY8XVu/NtnsTMJu6KUd2JOhh + qOx1HWdFtUrlIupeIolqNEX2JJdZf77WfIvx/U1AZkJTcfeWcI4G3K6jtTLMdpknOXA4/6YbmqHS + UvSosJdtk+eGgT6WnlR3zwMrGtb0ILzvRnloWrx5eMSY0iUwPb5a3t3Nr7mV35v8EyZsSy32aOgo + Jk48lhwfsOWWUSMnqV1y9J5rpRomzw9Fyw2HYXI4CongOe0wghc6JRi9oNNBuSPtu4uN+cyhaD9o + Oy4+DRjQpq7NJR/TmG6wpTQbXA7aiwTasomuSmn3VKPRCHppNuh2MAE/8wYZ+1kzoFldiWZuL+IO + 4hWxfwMXKeRTl+ZzN7SjKXBf6tevupFRmr00SJWbIKbMsOkWNyIn22jDLRQGUjwvaCEQ4j7Dkc+4 + deoSIK693FkHLIeGwrWB7KSKCIesr40O3PfMXmdogJ/cmn3CXJpRJfTorjIAupu+u6TKfSPn4cvV + vHMxGX0P+Dr8/u/s8BXBu5L4E0Hlcir77V/7DRO+5YYiXY952+rx3cNkWEVu6RmPz0/6A8T0pAlg + 6D1FzQkavlpMEs8e3Y7sJd8tsPIm2Rp9/Tha/v6777oN0UVdHq29+uHb89n/GA5XONaU4w8jo28m + iFJz3XIoj9f/fpyMkIhHfvVInIwAm6QbQzz28dX5m+9POswSjO/BOZPw2hL6r5m/XryZb0odCjlw + 0tfgbc575sXuW715t7r/5afdp+b+wf7zw81uuZucfD35DXc/SmwaEgAA headers: Alt-Svc: - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 @@ -69,11 +71,11 @@ interactions: Content-Type: - application/json; charset=UTF-8 Date: - - Tue, 09 Dec 2025 12:07:36 GMT + - Tue, 09 Dec 2025 12:45:46 GMT Server: - scaffolding on HTTPServer2 Server-Timing: - - gfet4t7; dur=11068 + - gfet4t7; dur=11009 Transfer-Encoding: - chunked Vary: diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml new file mode 100644 index 0000000000..1ee31e1df6 --- /dev/null +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml @@ -0,0 +1,90 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "What is ai?"}], "role": "user"}]}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.52.0 gl-python/3.11.11 + x-goog-api-client: + - google-genai-sdk/1.52.0 gl-python/3.11.11 + x-goog-api-key: + - AIzaSyCYDUVmQyuWr6y7ADadgKF9u-DGsP0OsNA + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/31XTXPbNhC9+1dgdKmtkdU4Tdo0N9fOh6ZJmsZ20mndA0SuRIxBgAFAyUom/71v + F6RMOp52ph4FBBa7b9/bXXw9UGpSaFeaUieKk+fqH6wo9VX+8jfvErmED/0SFhsd0t3e/N/XwW9s + SXTLhyanCxUT7Ee18kFNp6chmZUpjLZqAcvWmjW5gqbT+bW7dqdJmRRV4QPNFI6aqLRaGbKl8iss + 102bKKhYGD6kSipNAb9LlbxatsaWxq1VrYvKOIoqVTopBKcaCri9VknHm2457RoctXanAn1uTSBV + tTW2moFT4tJ0+poC/cCOLAPpm9JvHTuzFSv8pybt4vPplHefzBWCfGtqU9ywL6/F6DBSbOTItKkj + e01OLy3tQ5O10sMwTkdlzQ09v3aM6JT/n74hHZyYKLLXxnFkOhnvFGBWobWUsW4jO5Cq0Z750NgH + 0tGLtatI3UlcjyiLSummCf7W4BgpWCtpZZxJ7KkrLGx7F0fG3gePQOrj6O1GglyU4I1Z7cQFjs87 + LETGTqsm7xaXS9oY3K9BlIDb1jt2QsyADOM7KBRksvkrVwIuppYY4byFJlBSkVz0YYcVIKoOGUKF + MNYUZ7DagoozXNAW1dHI9sDeG+3WLQ7wNQiroBjlDuSP2MOOKrbbNjLzVuO6cyqMIMQWzirvER6j + sKQIQvo24N8MQyFpW+oIBnP+NtpYoQPEqIV9j4VPr/0WUKhPPoC+hxembixEROWRsI4vX7iYSItK + lsR5p1vsKUwCwYH1Oui6xiXMC9oQ0Gl8jIavigUoGIwXvcUdzICYOrCDEL5CSqAlcW+j4b2uAWHO + ojipLqvg23XFjI15N66fcbg7ZZmtmVKFXzvzhVSjExLlkIOakULGIGGGASuSy1I3rCsyAZFUemN8 + ECSGEIu+lWiBYz18+4aRgCtwgb0A1LUXqOuacWUqM6fhCZ8F33AK4SZf6t1cLVB13IYJF9XayMce + CDall8aaJKzMAa2CryV6tTWp8mDZ/2A+Vy8Z9FuNpKGomZXa+VbFCinVfanKdtmEtmsfYBS1Coaj + FE5AjVqV2gD3QA+ySMc1ina6nghk1xPnuc6lGRZMrnh75E2vQnwHOE452u7NjZh7TtQMMD3PmJ6q + 2C4jZAUvvvNWyl8b4ZejNqCkO0pbISkDg/0OHNA7LmuHxsUGBatUy1wRIPZWvJAYVq3LYsA1/DUr + bMl0OnoA+LkSb/eOIOnck0zRWh0AP61WBHuoIMx4Lj6Wbrvyf1cQelryxagNDRE4Mlpj15xOElsv + d84tlwTcK8T8SSR6uWtIUnW62Isy4/pOh4Bkg26Hn9A/8OM7suLbFjFr+LunJDpOSdGsWXzsRi9E + DkizswX3UYlprl5kenGCUZxLUhtv0B81vOSSlqRqBAONAwd9NJMowU5X5s7R0/3wHaWVNbfYWOsv + 3h0xLLpGC7bcnDIgkezquAxZKIUOkYsA1K6lHfWdDqWi1/FdasRddOoENAv8gCxRdXO2jyUxHfib + XnbmHktfSQ22AudFCj4L+Ud1+moMa7VrPG5P3ORF6kzVrW8xSXDp45Le3Yp6aEdtX4JcwhXdSDFG + Utt9c5Di1vOuK1lNY3fCwqERrjVu162BjC38kOjFE90xHFqdq08k2LGK4TJ+7Wjc9y5aTDDm/hBh + ox+FOcs1OAe55DD2ga8AfWxDoznyByYdlMUAVNTGBHYUCsKfPHgh6ZlUUhZ58JIBThUYE5AnFMZZ + 1xgtVB9LX3cs8TLkxRtjbRShPBGh7KkqWuGK9IIbEliv3pgV3RPPR+Hx6Z7HHPiAyTP1yvu1HewY + zzcjkl9kkrOJTzK3+Y3JE89WJygfW/b0h8ZLlKeYB8sdf8uKQN/C8FPwwgWgR2UdZeqllqg/3BWR + PKtYn+dB1H7oocIoBGVR0QZuLJ36xiln2b3MshtOU9IkmJ+9xdZtETYKA9WYHu4ZYaWed0o909nS + Vdx3Px6MZaDqhEoOBPCuFvXiEmnRvdLLfqgZzzsyglt1bvTaeSRBqCnZkEO+SD5I4zGDCEoMfJqp + KBW97mzkKW2Mp3HaCaQvg25LlP1ERQ8rJyBlDNoo+gqYxLupqh80RubOkPelzzQ6a2PyNb8mKGyY + ZTJC5tOZsAunUCdYH/vHyJK7faY+y6FGZXvgvSH1YZZnmayGh14gQZfCkOErpHxAnLMcF5/nSzc8 + rbXwhXtAnsSYVNbw+JJH4RKhBa6ck8Gj7Nv+97+zu6dc8Jb4nVb7kmy//Vu/YcJDf6zyQ4G3XVz+ + 8X6y/4qb6BbLjw76C8T0BIPLmt5S0tyr90/HCURVN+nS35A74ykSX55mW4Mn6OjzLycn3Ybkk7aj + byfPTh7PvjMcwRDWweB1Oni4IkrNsxyHcvnir8vJAIk08qtH4mAA2IRnsnWVxj6ePPr154MOswzj + R2jWZLzWhJyZ48fzp8crq2MlF04weuE1FGlR8p7fKv9Ek7tc/P375yfHL97flMfbZ39eTQ6+HfwH + q121MJ8PAAA= + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 09 Dec 2025 12:45:58 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=11375 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 From bbdaa9ea55122c64c26fad4092f4babc73359dcd Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:31:35 +0530 Subject: [PATCH 12/20] Addressed the suggestion --- .../cassettes/test_generate_content/test_client_spans.yaml | 2 +- .../cassettes/test_generate_content/test_generate_metrics.yaml | 2 +- .../tests/conftest.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml index d986c15c48..d81c024a33 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_client_spans.yaml @@ -19,7 +19,7 @@ interactions: x-goog-api-client: - google-genai-sdk/1.52.0 gl-python/3.11.11 x-goog-api-key: - - AIzaSyCYDUVmQyuWr6y7ADadgKF9u-DGsP0OsNA + - DUMMY_GOOGLE_API_KEY method: POST uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent response: diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml index 1ee31e1df6..c26b762b88 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/cassettes/test_generate_content/test_generate_metrics.yaml @@ -19,7 +19,7 @@ interactions: x-goog-api-client: - google-genai-sdk/1.52.0 gl-python/3.11.11 x-goog-api-key: - - AIzaSyCYDUVmQyuWr6y7ADadgKF9u-DGsP0OsNA + - DUMMY_GOOGLE_API_KEY method: POST uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent response: diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py index 9732e5b6d1..5e83ce52e9 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py @@ -129,7 +129,7 @@ def instrument_with_no_content(tracer_provider, logger_provider): @pytest.fixture(scope="module") def vcr_config(): - return {"filter_headers": ["authorization"]} + return {"filter_headers": ["authorization", "x-goog-api-key"]} @pytest.fixture(autouse=True) From 8e70176ced40b6161594cef92ff857476e0ca017 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 9 Dec 2025 18:59:06 +0530 Subject: [PATCH 13/20] Updated the suggestion --- .../tests/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py index 5e83ce52e9..d02aae42ae 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py @@ -90,6 +90,7 @@ def metrics_test_context(): @pytest.fixture(scope="session", autouse=True) def clear_metrics_test_context(metrics_test_context): + yield provider, reader = metrics_test_context reader.shutdown() provider.shutdown() From 1dcce3e2a97c0cbd84f7c8c653600a3d66ed04f0 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 6 Jan 2026 17:22:17 +0530 Subject: [PATCH 14/20] Done the suggested change --- .../tests/conftest.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py index d02aae42ae..595f87fef3 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py @@ -34,7 +34,7 @@ @pytest.fixture(scope="session") -def exporter(): +def exporter(metrics_test_context): exporter = InMemorySpanExporter() processor = SimpleSpanProcessor(exporter) @@ -42,11 +42,17 @@ def exporter(): provider.add_span_processor(processor) set_tracer_provider(provider) - GoogleGenerativeAiInstrumentor().instrument() + meter_provider, _ = metrics_test_context + GoogleGenerativeAiInstrumentor().instrument(meter_provider=meter_provider) return exporter +@pytest.fixture(scope="function") +def tracer_provider(): + provider = TracerProvider() + return provider + @pytest.fixture(scope="function", name="log_exporter") def fixture_log_exporter(): exporter = InMemoryLogExporter() @@ -84,7 +90,6 @@ def metrics_test_context(): reader = InMemoryMetricReader() provider = MeterProvider(metric_readers=[reader], resource=resource) metrics.set_meter_provider(provider) - GoogleGenerativeAiInstrumentor().instrument(meter_provider=provider) return provider, reader From e3bbde2480fb39c97b2451d7b0ea826d91ecb7d5 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 6 Jan 2026 17:31:50 +0530 Subject: [PATCH 15/20] Done linting --- .../tests/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py index 595f87fef3..7917844866 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py @@ -53,6 +53,7 @@ def tracer_provider(): provider = TracerProvider() return provider + @pytest.fixture(scope="function", name="log_exporter") def fixture_log_exporter(): exporter = InMemoryLogExporter() From 851e4e07b5aa10b4d9f28b9abab617eea75f47bb Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 12 Jan 2026 12:04:58 +0530 Subject: [PATCH 16/20] Addressed the review comments --- .../google_generativeai/__init__.py | 15 +++++++-------- .../google_generativeai/span_utils.py | 12 ++++++------ .../tests/test_generate_content.py | 8 ++++---- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 334b80a07d..3d7bc850c3 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -33,8 +33,7 @@ from opentelemetry.semconv_ai import ( SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, LLMRequestTypeValues, - SpanAttributes, - Meters + SpanAttributes ) from opentelemetry.metrics import Meter, get_meter from opentelemetry.trace import SpanKind, get_tracer @@ -214,8 +213,8 @@ async def _awrap( duration_histogram.record( duration, attributes={ - GenAIAttributes.GEN_AI_SYSTEM: "Google", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model + "gen_ai.provider.name": "Google", + "gen_ai.response.model": llm_model }, ) if response: @@ -285,8 +284,8 @@ def _wrap( duration_histogram.record( duration, attributes={ - GenAIAttributes.GEN_AI_SYSTEM: "Google", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model + "gen_ai.provider.name": "Google", + "gen_ai.response.model": llm_model }, ) if response: @@ -311,13 +310,13 @@ def is_metrics_enabled() -> bool: def _create_metrics(meter: Meter): token_histogram = meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, + name="gen_ai.client.token.usage", unit="token", description="Measures number of input and output tokens used", ) duration_histogram = meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, + name="gen_ai.client.operation.duration", unit="s", description="GenAI operation duration", ) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py index 39d70deed7..5c4b053f91 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py @@ -473,17 +473,17 @@ def set_model_response_attributes(span, response, llm_model, token_histogram): token_histogram.record( response.usage_metadata.prompt_token_count, attributes={ - GenAIAttributes.GEN_AI_SYSTEM: "Google", - GenAIAttributes.GEN_AI_TOKEN_TYPE: "input", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + "gen_ai.provider.name": "Google", + "gen_ai.token.type": "input", + "gen_ai.response.model": llm_model, } ) token_histogram.record( response.usage_metadata.candidates_token_count, attributes={ - GenAIAttributes.GEN_AI_SYSTEM: "Google", - GenAIAttributes.GEN_AI_TOKEN_TYPE: "output", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + "gen_ai.provider.name": "Google", + "gen_ai.token.type": "output", + "gen_ai.response.model": llm_model, }, ) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py index 8bf33573fe..b1a873db84 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py @@ -111,8 +111,8 @@ def test_generate_metrics(metrics_test_context, genai_client): assert duration_dp.sum >= 0 # Required attributes (values are intentionally not hard-coded) - assert SpanAttributes.LLM_SYSTEM in duration_dp.attributes - assert SpanAttributes.LLM_RESPONSE_MODEL in duration_dp.attributes + assert "gen_ai.provider.name" in duration_dp.attributes + assert "gen_ai.response.model" in duration_dp.attributes token_metric = metrics[Meters.LLM_TOKEN_USAGE] @@ -132,5 +132,5 @@ def test_generate_metrics(metrics_test_context, genai_client): assert dp.sum >= 0 # Required semantic attributes - assert SpanAttributes.LLM_SYSTEM in dp.attributes - assert SpanAttributes.LLM_RESPONSE_MODEL in dp.attributes + assert "gen_ai.provider.name" in dp.attributes + assert "gen_ai.response.model" in dp.attributes From 6d332351e66dc2775e6565dc93aee60a5ec6b6e1 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Mon, 12 Jan 2026 12:14:37 +0530 Subject: [PATCH 17/20] Updated the tests --- .../instrumentation/google_generativeai/__init__.py | 2 +- .../tests/test_generate_content.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 3d7bc850c3..21f52464a5 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -33,7 +33,7 @@ from opentelemetry.semconv_ai import ( SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, LLMRequestTypeValues, - SpanAttributes + SpanAttributes, ) from opentelemetry.metrics import Meter, get_meter from opentelemetry.trace import SpanKind, get_tracer diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py index b1a873db84..252e196b73 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py @@ -94,12 +94,12 @@ def test_generate_metrics(metrics_test_context, genai_client): # ---- Required metrics (semantic conventions) ---- required_metrics = { - Meters.LLM_OPERATION_DURATION, - Meters.LLM_TOKEN_USAGE, + "gen_ai.client.operation.duration", + "gen_ai.client.token.usage", } assert required_metrics.issubset(metrics.keys()) - duration_metric = metrics[Meters.LLM_OPERATION_DURATION] + duration_metric = metrics["gen_ai.client.operation.duration"] assert duration_metric.unit is not None assert duration_metric.data.data_points @@ -120,7 +120,7 @@ def test_generate_metrics(metrics_test_context, genai_client): assert token_metric.data.data_points token_points_by_type = { - dp.attributes.get(GenAIAttributes.GEN_AI_TOKEN_TYPE): dp + dp.attributes.get("gen_ai.token.type"): dp for dp in token_metric.data.data_points } From f07f311447aa5d9f23b101b8ba62438650cb3f85 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 13 Jan 2026 20:20:00 +0530 Subject: [PATCH 18/20] Updated the review comments --- .../google_generativeai/__init__.py | 8 ++++---- .../google_generativeai/span_utils.py | 12 ++++++------ .../tests/test_generate_content.py | 16 ++++++++-------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 21f52464a5..a98a257d72 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -213,8 +213,8 @@ async def _awrap( duration_histogram.record( duration, attributes={ - "gen_ai.provider.name": "Google", - "gen_ai.response.model": llm_model + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model }, ) if response: @@ -284,8 +284,8 @@ def _wrap( duration_histogram.record( duration, attributes={ - "gen_ai.provider.name": "Google", - "gen_ai.response.model": llm_model + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model }, ) if response: diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py index 5c4b053f91..0c92bc6840 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/span_utils.py @@ -473,17 +473,17 @@ def set_model_response_attributes(span, response, llm_model, token_histogram): token_histogram.record( response.usage_metadata.prompt_token_count, attributes={ - "gen_ai.provider.name": "Google", - "gen_ai.token.type": "input", - "gen_ai.response.model": llm_model, + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_TOKEN_TYPE: "input", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, } ) token_histogram.record( response.usage_metadata.candidates_token_count, attributes={ - "gen_ai.provider.name": "Google", - "gen_ai.token.type": "output", - "gen_ai.response.model": llm_model, + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_TOKEN_TYPE: "output", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, }, ) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py index 252e196b73..017cf04417 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py @@ -94,12 +94,12 @@ def test_generate_metrics(metrics_test_context, genai_client): # ---- Required metrics (semantic conventions) ---- required_metrics = { - "gen_ai.client.operation.duration", - "gen_ai.client.token.usage", + Meters.LLM_OPERATION_DURATION, + Meters.LLM_TOKEN_USAGE, } assert required_metrics.issubset(metrics.keys()) - duration_metric = metrics["gen_ai.client.operation.duration"] + duration_metric = metrics[Meters.LLM_OPERATION_DURATION] assert duration_metric.unit is not None assert duration_metric.data.data_points @@ -111,8 +111,8 @@ def test_generate_metrics(metrics_test_context, genai_client): assert duration_dp.sum >= 0 # Required attributes (values are intentionally not hard-coded) - assert "gen_ai.provider.name" in duration_dp.attributes - assert "gen_ai.response.model" in duration_dp.attributes + assert GenAIAttributes.GEN_AI_PROVIDER_NAME in duration_dp.attributes + assert GenAIAttributes.GEN_AI_RESPONSE_MODEL in duration_dp.attributes token_metric = metrics[Meters.LLM_TOKEN_USAGE] @@ -120,7 +120,7 @@ def test_generate_metrics(metrics_test_context, genai_client): assert token_metric.data.data_points token_points_by_type = { - dp.attributes.get("gen_ai.token.type"): dp + dp.attributes.get(SpanAttributes.LLM_TOKEN_TYPE): dp for dp in token_metric.data.data_points } @@ -132,5 +132,5 @@ def test_generate_metrics(metrics_test_context, genai_client): assert dp.sum >= 0 # Required semantic attributes - assert "gen_ai.provider.name" in dp.attributes - assert "gen_ai.response.model" in dp.attributes + assert GenAIAttributes.GEN_AI_PROVIDER_NAME in dp.attributes + assert GenAIAttributes.GEN_AI_RESPONSE_MODEL in dp.attributes From c19901c0f90978421d9d7488c0c2f6a4318a25d1 Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 13 Jan 2026 20:47:59 +0530 Subject: [PATCH 19/20] Updated the suggestions --- .../google_generativeai/__init__.py | 133 ++++++++++-------- .../tests/test_generate_content.py | 12 -- 2 files changed, 76 insertions(+), 69 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index a98a257d72..7b77cdfa2a 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -36,7 +36,7 @@ SpanAttributes, ) from opentelemetry.metrics import Meter, get_meter -from opentelemetry.trace import SpanKind, get_tracer +from opentelemetry.trace import SpanKind, get_tracer, StatusCode from wrapt import wrap_function_wrapper logger = logging.getLogger(__name__) @@ -97,7 +97,9 @@ def _build_from_streaming_response( emit_choice_events(response, event_logger) else: set_response_attributes(span, complete_response, llm_model) - set_model_response_attributes(span, last_chunk or response, llm_model, token_histogram) + set_model_response_attributes( + span, last_chunk or response, llm_model, token_histogram + ) span.end() @@ -117,7 +119,9 @@ async def _abuild_from_streaming_response( emit_choice_events(response, event_logger) else: set_response_attributes(span, complete_response, llm_model) - set_model_response_attributes(span, last_chunk if last_chunk else response, llm_model, token_histogram) + set_model_response_attributes( + span, last_chunk if last_chunk else response, llm_model, token_histogram + ) span.end() @@ -144,7 +148,9 @@ def _handle_response(span, response, llm_model, event_logger, token_histogram): def _with_tracer_wrapper(func): """Helper for providing tracer for wrapper functions.""" - def _with_tracer(tracer, event_logger, to_wrap, token_histogram, duration_histogram): + def _with_tracer( + tracer, event_logger, to_wrap, token_histogram, duration_histogram + ): def wrapper(wrapped, instance, args, kwargs): return func( tracer, @@ -174,7 +180,6 @@ async def _awrap( instance, args, kwargs, - ): """Instruments and calls every function defined in TO_WRAP.""" if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( @@ -205,32 +210,37 @@ async def _awrap( ) start_time = time.perf_counter() _handle_request(span, args, kwargs, llm_model, event_logger) - - response = await wrapped(*args, **kwargs) - - if duration_histogram: - duration = time.perf_counter() - start_time - duration_histogram.record( - duration, - attributes={ - GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model - }, - ) - if response: - if is_streaming_response(response): - return _build_from_streaming_response( - span, response, llm_model, event_logger, token_histogram - ) - elif is_async_streaming_response(response): - return _abuild_from_streaming_response( - span, response, llm_model, event_logger, token_histogram + try: + response = await wrapped(*args, **kwargs) + + if duration_histogram: + duration = time.perf_counter() - start_time + duration_histogram.record( + duration, + attributes={ + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + }, ) - else: - _handle_response(span, response, llm_model, event_logger, token_histogram) - - span.end() - return response + if response: + if is_streaming_response(response): + return _build_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + elif is_async_streaming_response(response): + return _abuild_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + else: + _handle_response( + span, response, llm_model, event_logger, token_histogram + ) + + span.end() + return response + except Exception as e: + span.record_exception(e) + span.set_status(StatusCode.ERROR) @_with_tracer_wrapper @@ -244,7 +254,6 @@ def _wrap( instance, args, kwargs, - ): """Instruments and calls every function defined in TO_WRAP.""" if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( @@ -276,32 +285,37 @@ def _wrap( start_time = time.perf_counter() _handle_request(span, args, kwargs, llm_model, event_logger) - - response = wrapped(*args, **kwargs) - - if duration_histogram: - duration = time.perf_counter() - start_time - duration_histogram.record( - duration, - attributes={ - GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model - }, - ) - if response: - if is_streaming_response(response): - return _build_from_streaming_response( - span, response, llm_model, event_logger, token_histogram - ) - elif is_async_streaming_response(response): - return _abuild_from_streaming_response( - span, response, llm_model, event_logger, token_histogram + try: + response = wrapped(*args, **kwargs) + + if duration_histogram: + duration = time.perf_counter() - start_time + duration_histogram.record( + duration, + attributes={ + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + }, ) - else: - _handle_response(span, response, llm_model, event_logger, token_histogram) - - span.end() - return response + if response: + if is_streaming_response(response): + return _build_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + elif is_async_streaming_response(response): + return _abuild_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + else: + _handle_response( + span, response, llm_model, event_logger, token_histogram + ) + + span.end() + return response + except Exception as e: + span.record_exception(e) + span.set_status(StatusCode.ERROR) def is_metrics_enabled() -> bool: @@ -327,7 +341,12 @@ def _create_metrics(meter: Meter): class GoogleGenerativeAiInstrumentor(BaseInstrumentor): """An instrumentor for Google Generative AI's client library.""" - def __init__(self, exception_logger=None, use_legacy_attributes=True, upload_base64_image=None): + def __init__( + self, + exception_logger=None, + use_legacy_attributes=True, + upload_base64_image=None, + ): super().__init__() Config.exception_logger = exception_logger Config.use_legacy_attributes = use_legacy_attributes diff --git a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py index 017cf04417..573d7422fa 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/tests/test_generate_content.py @@ -1,8 +1,4 @@ import pytest -from unittest.mock import MagicMock -from opentelemetry.instrumentation.google_generativeai import ( - GoogleGenerativeAiInstrumentor, -) from opentelemetry.trace import StatusCode, SpanKind from opentelemetry.semconv_ai import ( SpanAttributes, @@ -26,14 +22,6 @@ def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict assert dict(log.log_record.body) == expected_content -@pytest.fixture -def mock_instrumentor(): - instrumentor = GoogleGenerativeAiInstrumentor() - instrumentor.instrument = MagicMock() - instrumentor.uninstrument = MagicMock() - return instrumentor - - @pytest.mark.vcr def test_client_spans(exporter, genai_client): genai_client.chats.create(model="gemini-2.5-flash").send_message("What is ai?") From 53da2c5c56076208a9fb727411e89367332b5d5a Mon Sep 17 00:00:00 2001 From: Adharsh Date: Tue, 13 Jan 2026 21:04:06 +0530 Subject: [PATCH 20/20] Updated the suggestions --- .../google_generativeai/__init__.py | 108 +++++++++--------- 1 file changed, 56 insertions(+), 52 deletions(-) diff --git a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py index 7b77cdfa2a..dea047e2f5 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py +++ b/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/__init__.py @@ -212,35 +212,37 @@ async def _awrap( _handle_request(span, args, kwargs, llm_model, event_logger) try: response = await wrapped(*args, **kwargs) - - if duration_histogram: - duration = time.perf_counter() - start_time - duration_histogram.record( - duration, - attributes={ - GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, - }, - ) - if response: - if is_streaming_response(response): - return _build_from_streaming_response( - span, response, llm_model, event_logger, token_histogram - ) - elif is_async_streaming_response(response): - return _abuild_from_streaming_response( - span, response, llm_model, event_logger, token_histogram - ) - else: - _handle_response( - span, response, llm_model, event_logger, token_histogram - ) - - span.end() - return response except Exception as e: span.record_exception(e) span.set_status(StatusCode.ERROR) + span.end() + raise e + + if duration_histogram: + duration = time.perf_counter() - start_time + duration_histogram.record( + duration, + attributes={ + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + }, + ) + if response: + if is_streaming_response(response): + return _build_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + elif is_async_streaming_response(response): + return _abuild_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + else: + _handle_response( + span, response, llm_model, event_logger, token_histogram + ) + + span.end() + return response @_with_tracer_wrapper @@ -287,35 +289,37 @@ def _wrap( _handle_request(span, args, kwargs, llm_model, event_logger) try: response = wrapped(*args, **kwargs) - - if duration_histogram: - duration = time.perf_counter() - start_time - duration_histogram.record( - duration, - attributes={ - GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", - GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, - }, - ) - if response: - if is_streaming_response(response): - return _build_from_streaming_response( - span, response, llm_model, event_logger, token_histogram - ) - elif is_async_streaming_response(response): - return _abuild_from_streaming_response( - span, response, llm_model, event_logger, token_histogram - ) - else: - _handle_response( - span, response, llm_model, event_logger, token_histogram - ) - - span.end() - return response except Exception as e: span.record_exception(e) span.set_status(StatusCode.ERROR) + span.end() + raise e + + if duration_histogram: + duration = time.perf_counter() - start_time + duration_histogram.record( + duration, + attributes={ + GenAIAttributes.GEN_AI_PROVIDER_NAME: "Google", + GenAIAttributes.GEN_AI_RESPONSE_MODEL: llm_model, + }, + ) + if response: + if is_streaming_response(response): + return _build_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + elif is_async_streaming_response(response): + return _abuild_from_streaming_response( + span, response, llm_model, event_logger, token_histogram + ) + else: + _handle_response( + span, response, llm_model, event_logger, token_histogram + ) + + span.end() + return response def is_metrics_enabled() -> bool: