@@ -314,7 +314,7 @@ def get_build_info(self, build_ids: List[str], project: str, test_type: str, que
314
314
315
315
# Remove already found builds from the search list
316
316
build_ids = [bid for bid in build_ids if bid not in builds ]
317
-
317
+
318
318
if not build_ids :
319
319
logger .info ("All builds found in cache" )
320
320
return builds
@@ -733,7 +733,7 @@ def print_summary(problematic_tests: Dict[str, Dict], flaky_regressions: Dict[st
733
733
734
734
# Combine and sort all test cases by failure rate
735
735
all_problem_cases = []
736
-
736
+
737
737
# Process problematic quarantined tests
738
738
if len (problematic_tests ) > 0 :
739
739
print (f"Found { len (problematic_tests )} tests that have been quarantined for a while and are still flaky." )
@@ -742,15 +742,15 @@ def print_summary(problematic_tests: Dict[str, Dict], flaky_regressions: Dict[st
742
742
total_runs = test_case .outcome_distribution .total
743
743
method_name = test_case .name .split ('.' )[- 1 ]
744
744
if total_runs > 0 :
745
- failure_rate = (test_case .outcome_distribution .failed +
745
+ failure_rate = (test_case .outcome_distribution .failed +
746
746
test_case .outcome_distribution .flaky ) / total_runs
747
747
all_problem_cases .append ({
748
748
'class' : full_class_name ,
749
749
'method' : method_name ,
750
750
'failure_rate' : failure_rate ,
751
751
'total_runs' : total_runs
752
752
})
753
-
753
+
754
754
# Process flaky regressions
755
755
if len (flaky_regressions ) > 0 :
756
756
print (f"Found { len (flaky_regressions )} tests that have started recently failing." )
@@ -763,8 +763,8 @@ def print_summary(problematic_tests: Dict[str, Dict], flaky_regressions: Dict[st
763
763
})
764
764
765
765
# Sort by failure rate descending
766
- sorted_cases = sorted (all_problem_cases ,
767
- key = lambda x : x ['failure_rate' ],
766
+ sorted_cases = sorted (all_problem_cases ,
767
+ key = lambda x : x ['failure_rate' ],
768
768
reverse = True )
769
769
770
770
# Group by class
@@ -790,15 +790,12 @@ def main():
790
790
token = None
791
791
if os .environ .get ("DEVELOCITY_ACCESS_TOKEN" ):
792
792
token = os .environ .get ("DEVELOCITY_ACCESS_TOKEN" )
793
- elif os .environ .get ("GE_ACCESS_TOKEN" ):
794
- # Special case for when we run in GHA
795
- token = os .environ .get ("GE_ACCESS_TOKEN" ).removeprefix ("ge.apache.org=" )
796
793
else :
797
794
print ("No auth token was specified. You must set DEVELOCITY_ACCESS_TOKEN to your personal access token." )
798
795
exit (1 )
799
796
800
797
# Configuration
801
- BASE_URL = "https://ge .apache.org"
798
+ BASE_URL = "https://develocity .apache.org"
802
799
PROJECT = "kafka"
803
800
QUARANTINE_THRESHOLD_DAYS = 7
804
801
MIN_FAILURE_RATE = 0.1
@@ -862,7 +859,7 @@ def main():
862
859
print (f"\n Recent Executions (last { len (details ['recent_executions' ])} runs):" )
863
860
for entry in sorted (details ['recent_executions' ], key = lambda x : x .timestamp )[- 5 :]:
864
861
print (f" { entry .timestamp .strftime ('%Y-%m-%d %H:%M' )} - { entry .outcome } " )
865
-
862
+
866
863
# Print Cleared Tests
867
864
print ("\n ## Cleared Tests (Ready for Unquarantine)" )
868
865
if not cleared_tests :
@@ -881,19 +878,19 @@ def main():
881
878
print (f"\n Recent Executions (last { len (details ['recent_executions' ])} runs):" )
882
879
for entry in sorted (details ['recent_executions' ], key = lambda x : x .timestamp ):
883
880
print (f" { entry .timestamp .strftime ('%Y-%m-%d %H:%M' )} - { entry .outcome } " )
884
-
881
+
885
882
# Print Defective Tests
886
883
print ("\n ## High-Priority Quarantined Tests" )
887
884
if not problematic_tests :
888
885
print ("No high-priority quarantined tests found." )
889
886
else :
890
887
print ("These are tests which have been quarantined for several days and need attention." )
891
888
sorted_tests = sorted (
892
- problematic_tests .items (),
889
+ problematic_tests .items (),
893
890
key = lambda x : (x [1 ]['failure_rate' ], x [1 ]['days_quarantined' ]),
894
891
reverse = True
895
892
)
896
-
893
+
897
894
print (f"\n Found { len (sorted_tests )} high-priority quarantined test classes:" )
898
895
for full_class_name , details in sorted_tests :
899
896
class_result = details ['container_result' ]
@@ -916,21 +913,21 @@ def main():
916
913
key = lambda x : (x .outcome_distribution .failed + x .outcome_distribution .flaky ) / x .outcome_distribution .total if x .outcome_distribution .total > 0 else 0 ,
917
914
reverse = True
918
915
)
919
-
916
+
920
917
for test_method in sorted_methods :
921
918
total_runs = test_method .outcome_distribution .total
922
919
if total_runs > 0 :
923
920
failure_rate = (test_method .outcome_distribution .failed + test_method .outcome_distribution .flaky ) / total_runs
924
-
921
+
925
922
# Extract the method name from the full test name
926
923
method_name = test_method .name .split ('.' )[- 1 ]
927
-
924
+
928
925
print (f"\n → { method_name } " )
929
926
print (f" Failure Rate: { failure_rate :.2%} " )
930
927
print (f" Runs: { total_runs :3d} | Failed: { test_method .outcome_distribution .failed :3d} | "
931
928
f"Flaky: { test_method .outcome_distribution .flaky :3d} | "
932
929
f"Passed: { test_method .outcome_distribution .passed :3d} " )
933
-
930
+
934
931
# Show test method timeline
935
932
if test_method .timeline :
936
933
print (f"\n Recent Executions (last { min (3 , len (test_method .timeline ))} of { len (test_method .timeline )} runs):" )
@@ -939,7 +936,7 @@ def main():
939
936
for entry in sorted (test_method .timeline , key = lambda x : x .timestamp )[- 3 :]:
940
937
date_str = entry .timestamp .strftime ('%Y-%m-%d %H:%M' )
941
938
print (f" { date_str :<17} { entry .outcome :<10} { entry .build_id } " )
942
-
939
+
943
940
except Exception as e :
944
941
logger .exception ("Error occurred during report generation" )
945
942
print (f"Error occurred: { str (e )} " )
0 commit comments