|
7 | 7 | from datetime import datetime
|
8 | 8 | from functools import lru_cache
|
9 | 9 | from glob import glob
|
| 10 | +import urllib.parse |
10 | 11 |
|
11 | 12 | import pandas as pd
|
12 | 13 | from jinja2 import Environment, FileSystemLoader
|
@@ -478,11 +479,15 @@ def format_test_name_for_linewrap(text: str) -> str:
|
478 | 479 |
|
479 | 480 | def format_test_status(text: str) -> str:
|
480 | 481 | """Format the test status for better readability."""
|
481 |
| - color = ( |
482 |
| - "red" |
483 |
| - if text.lower().startswith("fail") |
484 |
| - else "orange" if text.lower() in ("error", "broken", "pending") else "green" |
485 |
| - ) |
| 482 | + if text.lower().startswith("fail"): |
| 483 | + color = "red" |
| 484 | + elif text.lower() == "skipped": |
| 485 | + color = "grey" |
| 486 | + elif text.lower() in ("success", "ok", "passed", "pass"): |
| 487 | + color = "green" |
| 488 | + else: |
| 489 | + color = "orange" |
| 490 | + |
486 | 491 | return f'<span style="font-weight: bold; color: {color}">{text}</span>'
|
487 | 492 |
|
488 | 493 |
|
@@ -510,6 +515,101 @@ def format_results_as_html_table(results) -> str:
|
510 | 515 | return html
|
511 | 516 |
|
512 | 517 |
|
| 518 | +def backfill_skipped_statuses( |
| 519 | + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str |
| 520 | +): |
| 521 | + """ |
| 522 | + Fill in the job statuses for skipped jobs. |
| 523 | + """ |
| 524 | + |
| 525 | + if pr_number == 0: |
| 526 | + ref_param = f"REF={branch}" |
| 527 | + workflow_name = "MasterCI" |
| 528 | + else: |
| 529 | + ref_param = f"PR={pr_number}" |
| 530 | + workflow_name = "PR" |
| 531 | + |
| 532 | + status_file = f"result_{workflow_name.lower()}.json" |
| 533 | + s3_path = f"https://{S3_BUCKET}.s3.amazonaws.com/{ref_param.replace('=', 's/')}/{commit_sha}/{status_file}" |
| 534 | + response = requests.get(s3_path) |
| 535 | + |
| 536 | + if response.status_code != 200: |
| 537 | + return job_statuses |
| 538 | + |
| 539 | + status_data = response.json() |
| 540 | + skipped_jobs = [] |
| 541 | + for job in status_data["results"]: |
| 542 | + if job["status"] == "skipped" and len(job["links"]) > 0: |
| 543 | + skipped_jobs.append( |
| 544 | + { |
| 545 | + "job_name": job["name"], |
| 546 | + "job_status": job["status"], |
| 547 | + "message": job["info"], |
| 548 | + "results_link": job["links"][0], |
| 549 | + } |
| 550 | + ) |
| 551 | + |
| 552 | + return pd.concat([job_statuses, pd.DataFrame(skipped_jobs)], ignore_index=True) |
| 553 | + |
| 554 | + |
| 555 | +def get_build_report_links( |
| 556 | + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str |
| 557 | +): |
| 558 | + """ |
| 559 | + Get the build report links for the given PR number, branch, and commit SHA. |
| 560 | +
|
| 561 | + First checks if a build job submitted a success or skipped status. |
| 562 | + If not available, it guesses the links. |
| 563 | + """ |
| 564 | + build_job_names = [ |
| 565 | + "Build (amd_release)", |
| 566 | + "Build (arm_release)", |
| 567 | + "Docker server image", |
| 568 | + "Docker keeper image", |
| 569 | + ] |
| 570 | + build_report_links = {} |
| 571 | + |
| 572 | + for job in job_statuses.itertuples(): |
| 573 | + if ( |
| 574 | + job.job_name in build_job_names |
| 575 | + and job.job_status |
| 576 | + in ( |
| 577 | + "success", |
| 578 | + "skipped", |
| 579 | + ) |
| 580 | + and job.results_link |
| 581 | + ): |
| 582 | + build_report_links[job.job_name] = job.results_link |
| 583 | + |
| 584 | + if 0 < len(build_report_links) < len(build_job_names): |
| 585 | + # Only have some of the build jobs, guess the rest. |
| 586 | + # (It was straightforward to force the build jobs to always appear in the cache, |
| 587 | + # however doing the same for the docker image jobs is difficult.) |
| 588 | + ref_job, ref_link = list(build_report_links.items())[0] |
| 589 | + link_template = ref_link.replace( |
| 590 | + urllib.parse.quote(ref_job, safe=""), "{job_name}" |
| 591 | + ) |
| 592 | + for job in build_job_names: |
| 593 | + if job not in build_report_links: |
| 594 | + build_report_links[job] = link_template.format(job_name=job) |
| 595 | + return build_report_links |
| 596 | + |
| 597 | + # No cache or build result was found, guess the links |
| 598 | + if pr_number == 0: |
| 599 | + ref_param = f"REF={branch}" |
| 600 | + workflow_name = "MasterCI" |
| 601 | + else: |
| 602 | + ref_param = f"PR={pr_number}" |
| 603 | + workflow_name = "PR" |
| 604 | + |
| 605 | + build_report_link_base = f"https://{S3_BUCKET}.s3.amazonaws.com/json.html?{ref_param}&sha={commit_sha}&name_0={urllib.parse.quote(workflow_name, safe='')}" |
| 606 | + build_report_links = { |
| 607 | + job_name: f"{build_report_link_base}&name_1={urllib.parse.quote(job_name, safe='')}" |
| 608 | + for job_name in build_job_names |
| 609 | + } |
| 610 | + return build_report_links |
| 611 | + |
| 612 | + |
513 | 613 | def parse_args() -> argparse.Namespace:
|
514 | 614 | parser = argparse.ArgumentParser(description="Create a combined CI report.")
|
515 | 615 | parser.add_argument( # Need the full URL rather than just the ID to query the databases
|
@@ -626,6 +726,10 @@ def create_workflow_report(
|
626 | 726 | except Exception as e:
|
627 | 727 | pr_info_html = e
|
628 | 728 |
|
| 729 | + fail_results["job_statuses"] = backfill_skipped_statuses( |
| 730 | + fail_results["job_statuses"], pr_number, branch_name, commit_sha |
| 731 | + ) |
| 732 | + |
629 | 733 | high_cve_count = 0
|
630 | 734 | if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0:
|
631 | 735 | high_cve_count = (
|
@@ -666,6 +770,9 @@ def create_workflow_report(
|
666 | 770 | ),
|
667 | 771 | "pr_new_fails": len(fail_results["pr_new_fails"]),
|
668 | 772 | },
|
| 773 | + "build_report_links": get_build_report_links( |
| 774 | + fail_results["job_statuses"], pr_number, branch_name, commit_sha |
| 775 | + ), |
669 | 776 | "ci_jobs_status_html": format_results_as_html_table(
|
670 | 777 | fail_results["job_statuses"]
|
671 | 778 | ),
|
|
0 commit comments