Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/fate_test/scripts/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ def _find_testsuite_files(path):
d = DATA_LOAD_HOOK.hook(d)
dataset.append(Data.load(d, suite_path, for_upload=False))
suite.dataset = dataset
# add loader_conf
suite.loader_conf = suite_config.get("loader_conf", "")
# add job status
suite_status = {}
for pair in suite.pairs:
Expand Down
50 changes: 41 additions & 9 deletions python/fate_test/scripts/llmsuite_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#

import os
import tarfile
import time
import uuid
from datetime import timedelta
Expand All @@ -31,6 +32,7 @@
from fate_test.utils import extract_job_status



@click.command("llmsuite")
@click.option('-i', '--include', required=False, type=click.Path(exists=True), multiple=True,
metavar="<include>",
Expand Down Expand Up @@ -141,6 +143,8 @@ def run_llmsuite(ctx, include, exclude, algorithm_suite, glob, provider, task_co
echo.stdout_newline()
suite_file = str(suite.path).split("/")[-1]
record_non_success_jobs(suite, suite_file)
#Addpretty_final_summary
#echo.echo(suite.pretty_final_summary(time_consuming, suite_file))
non_success_summary()
echo.farewell()
echo.echo(f"llmsuite namespace: {namespace}", fg='red')
Expand Down Expand Up @@ -183,7 +187,6 @@ def _raise(err_msg, status="failed", job_id=None, event=None, time_elapsed=None)
param = Config.load_from_file(conf_path)
mod = _load_module_from_script(script_path)
input_params = signature(mod.main).parameters

try:
# pipeline should return pretrained model path
pretrained_model_path = _run_mod(mod, input_params, config, param,
Expand Down Expand Up @@ -213,14 +216,43 @@ def _raise(err_msg, status="failed", job_id=None, event=None, time_elapsed=None)
if job.model_task_name:
model_task_name = job.model_task_name
from lm_eval.utils import apply_template
peft_path = apply_template(job.peft_path_format,
{"fate_base": config.fate_base,
"job_id": job_id[0],
"party_id": guest_party_id,
"model_task_name": model_task_name}
)
job.peft_path = peft_path
echo.echo(f"Evaluating job: {job.job_name} with tasks: {job.tasks}")
if job.requires_untar:
requires_untar = apply_template(job.requires_untar,
{"fate_base": config.fate_base,
"job_id": job_id[0],
"party_id": guest_party_id,
"model_task_name": model_task_name}
)
job.requires_untar = requires_untar

current_dir = job.requires_untar
tar_file_name = "output_model"
tar_file_path = os.path.join(current_dir, tar_file_name)

output_dir = os.path.join(current_dir, "output_models")
os.makedirs(output_dir, exist_ok=True)

try:
with tarfile.open(tar_file_path, "r") as tar_ref:
tar_ref.extractall(output_dir)
except Exception as e:
print(f"解压失败: {e}")
if job.peft_path_format:
peft_path = apply_template(job.peft_path_format,
{"fate_base": config.fate_base,
"job_id": job_id[0],
"party_id": guest_party_id,
"model_task_name": model_task_name}
)
job.peft_path=peft_path
if job.model_weights_format:
model_weights_format = apply_template(job.model_weights_format,
{"fate_base": config.fate_base,
"job_id": job_id[0],
"party_id": guest_party_id,
"model_task_name": model_task_name}
)
job.model_weights_format = model_weights_format
result = run_job_eval(job, eval_conf)
job_results[job_name] = result
except Exception as e:
Expand Down
6 changes: 5 additions & 1 deletion python/fate_test/scripts/suite_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def run_suite(ctx, include, exclude, glob,
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob, provider=provider)
echo.echo(f"suites: {suites}", fg='blue')
for suite in suites:
_config.jobs_num += len(suite.pipeline_jobs)
echo.echo(f"\tdataset({len(suite.dataset)}) "
Expand All @@ -91,7 +92,7 @@ def run_suite(ctx, include, exclude, glob,
echo.stdout_newline()
# with Clients(config_inst) as client:
client = Clients(config_inst)

echo.echo(f"client: {client}", fg='blue')
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
Expand All @@ -109,14 +110,17 @@ def run_suite(ctx, include, exclude, glob,
os.environ['enable_pipeline_job_info_callback'] = '1'
try:
time_consuming = _run_pipeline_jobs(config_inst, suite, namespace, data_namespace_mangling, client)
#echo.echo(f"time_consuming: {time_consuming}", fg='blue')
except Exception as e:
raise RuntimeError(f"exception occur while running pipeline jobs for {suite.path}") from e

if not skip_data and clean_data:
_delete_data(client, suite)
echo.echo(f"[{i + 1}/{len(suites)}]elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
if not skip_jobs:
echo.echo(f"time_consuming = {time_consuming}")
suite_file = str(suite.path).split("/")[-1]
echo.echo(f"suite_file = {suite_file}")
echo.echo(suite.pretty_final_summary(time_consuming, suite_file))

except Exception:
Expand Down