Skip to content

Commit 2d532be

Browse files
singankitnick863
andauthored
PR to unblock release (#3453)
# Description - Gets trace destination from pf config taking into consideration if override is provided in evaluate API - Token is fetched from ml_client since token cache change is not released with pf azure. # All Promptflow Contribution checklist: - [ ] **The pull request does not introduce [breaking changes].** - [ ] **CHANGELOG is updated for new features, bug fixes or other significant changes.** - [ ] **I have read the [contribution guidelines](../CONTRIBUTING.md).** - [ ] **Create an issue and link to the pull request to get dedicated review from promptflow team. Learn more: [suggested workflow](../CONTRIBUTING.md#suggested-workflow).** ## General Guidelines and Best Practices - [ ] Title of the pull request is clear and informative. - [ ] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. For more information on cleaning up the commits in your PR, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). ### Testing Guidelines - [ ] Pull request includes test coverage for the included changes. --------- Co-authored-by: nick863 <[email protected]>
1 parent 00c720f commit 2d532be

File tree

6 files changed

+1530
-58
lines changed

6 files changed

+1530
-58
lines changed

src/promptflow-evals/promptflow/evals/evaluate/_eval_run.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,7 @@ def get_metrics_url(self):
249249
def _get_token(self):
250250
# We have to use lazy import because promptflow.azure
251251
# is an optional dependency.
252-
from promptflow.azure._utils._token_cache import ArmTokenCache
253-
return ArmTokenCache().get_token(self._ml_client._credential)
252+
return self._ml_client._credential.get_token(EvalRun._SCOPE)
254253

255254
def request_with_retry(
256255
self, url: str, method: str, json_dict: Dict[str, Any], headers: Optional[Dict[str, str]] = None
@@ -271,7 +270,7 @@ def request_with_retry(
271270
if headers is None:
272271
headers = {}
273272
headers["User-Agent"] = f"promptflow/{VERSION}"
274-
headers["Authorization"] = f"Bearer {self._get_token()}"
273+
headers["Authorization"] = f"Bearer {self._get_token().token}"
275274
retry = Retry(
276275
total=EvalRun._MAX_RETRIES,
277276
connect=EvalRun._MAX_RETRIES,

src/promptflow-evals/promptflow/evals/evaluate/_evaluate.py

+7-3
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
from ._utils import (
2020
_apply_column_mapping,
2121
_log_metrics_and_instance_results,
22-
_trace_destination_from_project_scope,
2322
_write_output,
23+
_trace_destination_from_project_scope,
2424
)
2525

2626

@@ -377,7 +377,6 @@ def _evaluate(
377377
output_path: Optional[str] = None,
378378
**kwargs,
379379
):
380-
trace_destination = _trace_destination_from_project_scope(azure_ai_project) if azure_ai_project else None
381380

382381
input_data_df = _validate_and_load_data(target, data, evaluators, output_path, azure_ai_project, evaluation_name)
383382

@@ -389,9 +388,13 @@ def _evaluate(
389388

390389
# Target Run
391390
pf_client = PFClient(
392-
config={"trace.destination": trace_destination} if trace_destination else None,
391+
config={
392+
"trace.destination": _trace_destination_from_project_scope(azure_ai_project)} if azure_ai_project else None,
393393
user_agent=USER_AGENT,
394394
)
395+
396+
trace_destination = pf_client._config.get_trace_destination()
397+
395398
target_run = None
396399

397400
target_generated_columns = set()
@@ -437,6 +440,7 @@ def _evaluate(
437440
column_mapping=evaluator_config.get(evaluator_name, evaluator_config.get("default", None)),
438441
data=input_data_df if isinstance(batch_run_client, CodeClient) else data,
439442
stream=True,
443+
name=kwargs.get("_run_name"),
440444
)
441445

442446
# get_details needs to be called within BatchRunContext scope in order to have user agent populated

src/promptflow-evals/tests/evals/conftest.py

+8
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,14 @@ def mock_trace_destination_to_cloud(project_scope: dict):
136136
yield
137137

138138

139+
@pytest.fixture
140+
def mock_validate_trace_destination():
141+
"""Mock validate trace destination config to use in unit tests."""
142+
143+
with patch("promptflow._sdk._tracing.TraceDestinationConfig.validate", return_value=None):
144+
yield
145+
146+
139147
@pytest.fixture
140148
def azure_ml_client(project_scope: Dict):
141149
"""The fixture, returning MLClient"""

src/promptflow-evals/tests/evals/e2etests/test_metrics_upload.py

+14-2
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,8 @@ def test_log_artifact(self, setup_data, caplog, tmp_path):
132132
self._assert_no_errors_for_module(caplog.records, EvalRun.__module__)
133133

134134
@pytest.mark.skipif(
135-
condition=not is_live(), reason="promptflow run create files with random names, which cannot be recorded."
135+
condition=not is_live(),
136+
reason="promptflow run create files with random names, which cannot be recorded. See work item 3305909."
136137
)
137138
@pytest.mark.usefixtures("vcr_recording")
138139
def test_e2e_run_target_fn(self, caplog, project_scope, questions_answers_file):
@@ -164,6 +165,11 @@ def test_e2e_run_target_fn(self, caplog, project_scope, questions_answers_file):
164165
)
165166
self._assert_no_errors_for_module(caplog.records, (ev_utils.__name__, EvalRun.__module__))
166167

168+
@pytest.mark.skipif(
169+
condition=not is_live(),
170+
reason="promptflow run create files with random names, which cannot be recorded. See work item 3305909."
171+
)
172+
@pytest.mark.usefixtures("vcr_recording")
167173
def test_e2e_run(self, caplog, project_scope, questions_answers_file):
168174
"""Test evaluation run logging."""
169175
# Make sure that the URL ending in TraceSessions is in the recording, it is not always being recorded.
@@ -180,5 +186,11 @@ def test_e2e_run(self, caplog, project_scope, questions_answers_file):
180186
# captured by caplog. Here we will skip this logger to capture logs.
181187
logger.parent = logging.root
182188
f1_score_eval = F1ScoreEvaluator()
183-
evaluate(data=questions_answers_file, evaluators={"f1": f1_score_eval}, azure_ai_project=project_scope)
189+
# We need the deterministic name of a run, however it cannot be recorded
190+
# into database more then once or the database may be non writable.
191+
# By this reason we will cancel writing to database by mocking it.
192+
# Please uncomment this line for the local tests
193+
# with patch('promptflow._sdk.entities._run.Run._dump'):
194+
evaluate(data=questions_answers_file, evaluators={"f1": f1_score_eval}, azure_ai_project=project_scope,
195+
_run_name="eval_test_run4",)
184196
self._assert_no_errors_for_module(caplog.records, (ev_utils.__name__, EvalRun.__module__))

src/promptflow-evals/tests/evals/unittests/test_evaluate.py

+17
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from promptflow.evals.evaluate._evaluate import _apply_target_to_data, _rename_columns_conditionally
1515
from promptflow.evals.evaluate._utils import _apply_column_mapping
1616
from promptflow.evals.evaluators import F1ScoreEvaluator, GroundednessEvaluator
17+
from promptflow.evals.evaluate._utils import _trace_destination_from_project_scope
1718

1819

1920
def _get_file(name):
@@ -398,3 +399,19 @@ def test_evaluate_main_entry_guard(self, mock_evaluate, evaluate_test_data_jsonl
398399
)
399400

400401
assert "Please ensure the evaluate API is properly guarded with the '__main__' block" in exc_info.value.args[0]
402+
403+
def test_get_trace_destination(self, mock_validate_trace_destination, mock_project_scope):
404+
pf_client = PFClient()
405+
trace_destination_without_override = pf_client._config.get_trace_destination()
406+
407+
pf_client = PFClient(
408+
config={
409+
"trace.destination": _trace_destination_from_project_scope(mock_project_scope)
410+
if mock_project_scope else None
411+
}
412+
)
413+
414+
trace_destination_with_override = pf_client._config.get_trace_destination()
415+
416+
assert trace_destination_with_override != trace_destination_without_override
417+
assert trace_destination_with_override == _trace_destination_from_project_scope(mock_project_scope)

0 commit comments

Comments
 (0)