Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion packages/traceloop-sdk/.flake8
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,6 @@ exclude =
.venv,
.pytest_cache
max-line-length = 120
per-file-ignores = __init__.py:F401
per-file-ignores =
__init__.py:F401
traceloop/sdk/evaluators_generated/*.py:E501
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
traceloop/sdk/evaluators_generated/*.py:E501
traceloop/sdk/generated/**/*.py:E501

456 changes: 286 additions & 170 deletions packages/traceloop-sdk/poetry.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions packages/traceloop-sdk/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ mypy = "^1.18.2"
types-requests = "^2.31.0"
types-colorama = "^0.4.15"
pandas-stubs = "*"
datamodel-code-generator = "^0.26.0"

[tool.poetry.group.test.dependencies]
openai = "^1.31.1"
Expand Down
24 changes: 24 additions & 0 deletions packages/traceloop-sdk/traceloop/sdk/evaluator/evaluator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import httpx
from typing import Dict, Optional, Any, List
from pydantic import ValidationError
from .field_mapping import normalize_task_output, get_field_suggestions, format_field_help

from .model import (
Expand All @@ -11,6 +12,25 @@
)
from .stream_client import SSEClient
from .config import EvaluatorDetails
from ..evaluators_generated import get_request_model


def _validate_evaluator_input(slug: str, input: Dict[str, str]) -> None:
"""Validate input against the evaluator's request model if available.

Args:
slug: The evaluator slug (e.g., "pii-detector")
input: Dictionary of input field names to values

Raises:
ValueError: If input fails validation against the request model
"""
request_model = get_request_model(slug)
if request_model:
try:
request_model(**input)
except ValidationError as e:
raise ValueError(f"Invalid input for '{slug}': {e}") from e


class Evaluator:
Expand Down Expand Up @@ -94,6 +114,8 @@ async def run_experiment_evaluator(
Returns:
ExecutionResponse: The evaluation result from SSE stream
"""
_validate_evaluator_input(evaluator_slug, input)

request = self._build_evaluator_request(
task_id, experiment_id, experiment_run_id, input, evaluator_version, evaluator_config
)
Expand Down Expand Up @@ -136,6 +158,8 @@ async def trigger_experiment_evaluator(
Returns:
str: The execution_id that can be used to check results later
"""
_validate_evaluator_input(evaluator_slug, input)

request = self._build_evaluator_request(
task_id, experiment_id, experiment_run_id, input, evaluator_version, evaluator_config
)
Expand Down
21 changes: 20 additions & 1 deletion packages/traceloop-sdk/traceloop/sdk/evaluator/model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import datetime
from typing import Dict, Any, Optional
from typing import Dict, Any, Optional, TypeVar, Type
from pydantic import BaseModel, RootModel

T = TypeVar('T', bound=BaseModel)


class InputExtractor(BaseModel):
source: str
Expand Down Expand Up @@ -42,3 +44,20 @@ class ExecutionResponse(BaseModel):

execution_id: str
result: Dict[str, Any]

def typed_result(self, model: Type[T]) -> T:
"""Parse result into a typed Pydantic model.

Args:
model: The Pydantic model class to parse the result into

Returns:
An instance of the provided model class

Example:
from traceloop.sdk.evaluators_generated import PIIDetectorResponse
result = await evaluator.run_experiment_evaluator(...)
pii = result.typed_result(PIIDetectorResponse)
print(pii.has_pii) # IDE autocomplete works!
"""
return model(**self.result)
162 changes: 162 additions & 0 deletions packages/traceloop-sdk/traceloop/sdk/evaluators_generated/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
# generated by datamodel-codegen
# Models for v2/evaluators/execute endpoints from OpenAPI spec
#
# DO NOT EDIT MANUALLY - Regenerate with:
# ./scripts/generate-models.sh /path/to/swagger.json

from .request import (
AgentEfficiencyRequest,
AgentFlowQualityRequest,
AgentGoalAccuracyRequest,
AgentGoalCompletenessRequest,
AgentToolErrorDetectorRequest,
AnswerCompletenessRequest,
AnswerCorrectnessRequest,
AnswerRelevancyRequest,
CharCountRatioRequest,
CharCountRequest,
ContextRelevanceRequest,
ConversationQualityRequest,
FaithfulnessRequest,
InstructionAdherenceRequest,
IntentChangeRequest,
JSONValidatorRequest,
PIIDetectorRequest,
PerplexityRequest,
PlaceholderRegexRequest,
ProfanityDetectorRequest,
PromptInjectionRequest,
PromptPerplexityRequest,
RegexValidatorRequest,
SQLValidatorRequest,
SecretsDetectorRequest,
SemanticSimilarityRequest,
SexismDetectorRequest,
ToneDetectionRequest,
TopicAdherenceRequest,
ToxicityDetectorRequest,
UncertaintyDetectorRequest,
WordCountRatioRequest,
WordCountRequest,
)

from .registry import (
REQUEST_MODELS,
RESPONSE_MODELS,
get_request_model,
get_response_model,
)

from .response import (
AgentEfficiencyResponse,
AgentFlowQualityResponse,
AgentGoalAccuracyResponse,
AgentGoalCompletenessResponse,
AgentToolErrorDetectorResponse,
AnswerCompletenessResponse,
AnswerCorrectnessResponse,
AnswerRelevancyResponse,
CharCountRatioResponse,
CharCountResponse,
ContextRelevanceResponse,
ConversationQualityResponse,
ErrorResponse,
FaithfulnessResponse,
InstructionAdherenceResponse,
IntentChangeResponse,
JSONValidatorResponse,
PIIDetectorResponse,
PerplexityResponse,
PlaceholderRegexResponse,
ProfanityDetectorResponse,
PromptInjectionResponse,
PromptPerplexityResponse,
RegexValidatorResponse,
SQLValidatorResponse,
SecretsDetectorResponse,
SemanticSimilarityResponse,
SexismDetectorResponse,
ToneDetectionResponse,
TopicAdherenceResponse,
ToxicityDetectorResponse,
UncertaintyDetectorResponse,
WordCountRatioResponse,
WordCountResponse,
)

__all__ = [
# Registry functions
"REQUEST_MODELS",
"RESPONSE_MODELS",
"get_request_model",
"get_response_model",
# Evaluator request models
"AgentEfficiencyRequest",
"AgentFlowQualityRequest",
"AgentGoalAccuracyRequest",
"AgentGoalCompletenessRequest",
"AgentToolErrorDetectorRequest",
"AnswerCompletenessRequest",
"AnswerCorrectnessRequest",
"AnswerRelevancyRequest",
"CharCountRatioRequest",
"CharCountRequest",
"ContextRelevanceRequest",
"ConversationQualityRequest",
"FaithfulnessRequest",
"InstructionAdherenceRequest",
"IntentChangeRequest",
"JSONValidatorRequest",
"PIIDetectorRequest",
"PerplexityRequest",
"PlaceholderRegexRequest",
"ProfanityDetectorRequest",
"PromptInjectionRequest",
"PromptPerplexityRequest",
"RegexValidatorRequest",
"SQLValidatorRequest",
"SecretsDetectorRequest",
"SemanticSimilarityRequest",
"SexismDetectorRequest",
"ToneDetectionRequest",
"TopicAdherenceRequest",
"ToxicityDetectorRequest",
"UncertaintyDetectorRequest",
"WordCountRatioRequest",
"WordCountRequest",
# Evaluator response models
"AgentEfficiencyResponse",
"AgentFlowQualityResponse",
"AgentGoalAccuracyResponse",
"AgentGoalCompletenessResponse",
"AgentToolErrorDetectorResponse",
"AnswerCompletenessResponse",
"AnswerCorrectnessResponse",
"AnswerRelevancyResponse",
"CharCountRatioResponse",
"CharCountResponse",
"ContextRelevanceResponse",
"ConversationQualityResponse",
"ErrorResponse",
"FaithfulnessResponse",
"InstructionAdherenceResponse",
"IntentChangeResponse",
"JSONValidatorResponse",
"PIIDetectorResponse",
"PerplexityResponse",
"PlaceholderRegexResponse",
"ProfanityDetectorResponse",
"PromptInjectionResponse",
"PromptPerplexityResponse",
"RegexValidatorResponse",
"SQLValidatorResponse",
"SecretsDetectorResponse",
"SemanticSimilarityResponse",
"SexismDetectorResponse",
"ToneDetectionResponse",
"TopicAdherenceResponse",
"ToxicityDetectorResponse",
"UncertaintyDetectorResponse",
"WordCountRatioResponse",
"WordCountResponse",
]
Loading