| 
1 | 1 | from unitxt.catalog import add_to_catalog  | 
2 | 2 | from unitxt.metrics import (  | 
3 | 3 |     MultiTurnToolCallingMetric,  | 
 | 4 | +    ReflectionToolCallingMetric,  | 
4 | 5 |     ReflectionToolCallingMetricSyntactic,  | 
5 | 6 |     ToolCallingMetric,  | 
6 | 7 |     ToolCallKeyValueExtraction,  | 
 | 
48 | 49 | 
 
  | 
49 | 50 | add_to_catalog(  | 
50 | 51 |     MultiTurnToolCallingMetric(  | 
51 |  | -        __description__="""Metric that evaluates tool call predictions for the validity with regards to the tools schema."""  | 
 | 52 | +        __description__="""A metric that assesses tool call predictions for their conformity to the tool schema."""  | 
52 | 53 |     ),  | 
53 | 54 |     "metrics.tool_calling.multi_turn.validity",  | 
54 | 55 |     overwrite=True,  | 
55 | 56 | )  | 
56 | 57 | 
 
  | 
 | 58 | +add_to_catalog(  | 
 | 59 | +    ReflectionToolCallingMetric(  | 
 | 60 | +        __description__="""A metric that assesses tool call predictions for both syntactic correctness and semantic validity, using predefined checks combined with LLM-based evaluations. For each instance, it returns a score reflecting its overall validity, as well as a breakdown of the specific checks/metrics that passed or failed, including hallucination check, value format alignment, function selection and agentic constraints satisfaction. Each metric also contains an evidence from the input, an explanation describing the reflection decision, a confidence, and a validity score with a range of 1-5 (higher score -> more valid)."""  | 
 | 61 | +    ),  | 
 | 62 | +    "metrics.tool_calling.reflection",  | 
 | 63 | +    overwrite=True,  | 
 | 64 | +)  | 
 | 65 | + | 
57 | 66 | add_to_catalog(  | 
58 | 67 |     ReflectionToolCallingMetricSyntactic(  | 
59 |  | -        __description__="""This metric evaluates whether a model's tool call outputs are structurally valid by checking their compliance with the provided tool schema. For each instance, it returns a binary score (True for valid, False for invalid), and aggregates these into a global percentage across all instances. The evaluation covers a wide range of possible issues, including nonexistent functions or parameters, incorrect parameter types, missing required parameters, values outside allowed ranges, JSON schema violations, invalid or empty API specifications, and malformed tool calls. The main reported score, overall_valid (aliased as score), reflects the proportion of calls that are fully valid, making the metric a measure of syntactic and schema-level correctness rather than semantic accuracy."""  | 
 | 68 | +        __description__="""This metric evaluates whether a model's tool call outputs are structurally valid by checking their compliance with the provided tool schema. For each instance, it returns a binary score (True for valid, False for invalid), and aggregates these into a global percentage across all instances. The evaluation covers a wide range of possible issues, including nonexistent functions or parameters, incorrect parameter types, missing required parameters, values outside allowed ranges, JSON schema violations, invalid or empty API specifications, and malformed tool calls. The main reported score, overall_valid (aliased as score), reflects the proportion of calls that are fully valid, making the metric a measure of syntactic and schema-level correctness rather than semantic accuracy. Each metric also contains an explanation describing the errors that it detected (if no errors were found - the explanation will be None)."""  | 
60 | 69 |     ),  | 
61 | 70 |     "metrics.tool_calling.reflection.syntactic",  | 
62 | 71 |     overwrite=True,  | 
 | 
0 commit comments