1313
1414import browser_common
1515import common
16+ from color_runner import BufferingMixin
1617from common import errlog
1718
1819from tools import emprofile , utils
19- from tools .colored_logger import CYAN , GREEN , RED , with_color
2020from tools .utils import WINDOWS
2121
2222EMTEST_VISUALIZE = os .getenv ('EMTEST_VISUALIZE' )
@@ -106,43 +106,11 @@ def __init__(self, max_cores, options):
106106 self .max_cores = max_cores
107107 self .max_failures = options .max_failures
108108 self .failing_and_slow_first = options .failing_and_slow_first
109- self .progress_counter = 0
110109
111110 def addTest (self , test ):
112111 super ().addTest (test )
113112 test .is_parallel = True
114113
115- def printOneResult (self , res ):
116- self .progress_counter += 1
117- progress = f'[{ self .progress_counter } /{ self .num_tests } ] '
118-
119- if res .test_result == 'success' :
120- msg = 'ok'
121- color = GREEN
122- elif res .test_result == 'errored' :
123- msg = 'ERROR'
124- color = RED
125- elif res .test_result == 'failed' :
126- msg = 'FAIL'
127- color = RED
128- elif res .test_result == 'skipped' :
129- reason = res .skipped [0 ][1 ]
130- msg = f"skipped '{ reason } '"
131- color = CYAN
132- elif res .test_result == 'unexpected success' :
133- msg = 'unexpected success'
134- color = RED
135- elif res .test_result == 'expected failure' :
136- color = RED
137- msg = 'expected failure'
138- else :
139- assert False , f'unhandled test result { res .test_result } '
140-
141- if res .test_result != 'skipped' :
142- msg += f' ({ res .elapsed :.2f} s)'
143-
144- errlog (f'{ with_color (CYAN , progress )} { res .test } ... { with_color (color , msg )} ' )
145-
146114 def run (self , result ):
147115 # The 'spawn' method is used on windows and it can be useful to set this on
148116 # all platforms when debugging multiprocessing issues. Without this we
@@ -151,6 +119,12 @@ def run(self, result):
151119 # issues.
152120 # multiprocessing.set_start_method('spawn')
153121
122+ # No need to worry about stdout/stderr buffering since are a not
123+ # actually running the test here, only setting the results.
124+ buffer = result .buffer
125+ result .buffer = False
126+
127+ result .core_time = 0
154128 tests = self .get_sorted_tests ()
155129 self .num_tests = self .countTestCases ()
156130 contains_browser_test = any (test .is_browser_test () for test in tests )
@@ -176,23 +150,23 @@ def run(self, result):
176150 allowed_failures_counter = manager .Value ('i' , self .max_failures )
177151
178152 results = []
179- args = ((t , allowed_failures_counter , result . buffer ) for t in tests )
153+ args = ((t , allowed_failures_counter , buffer ) for t in tests )
180154 for res in pool .imap_unordered (run_test , args , chunksize = 1 ):
181155 # results may be be None if # of allowed errors was exceeded
182156 # and the harness aborted.
183157 if res :
184158 if res .test_result not in ['success' , 'skipped' ] and allowed_failures_counter is not None :
185159 # Signal existing multiprocess pool runners so that they can exit early if needed.
186160 allowed_failures_counter .value -= 1
187- self . printOneResult ( res )
161+ res . integrate_result ( result )
188162 results .append (res )
189163
190164 # Send a task to each worker to tear down the browser and server. This
191165 # relies on the implementation detail in the worker pool that all workers
192166 # are cycled through once.
193167 num_tear_downs = sum ([pool .apply (tear_down , ()) for i in range (use_cores )])
194168 # Assert the assumed behavior above hasn't changed.
195- if num_tear_downs != use_cores :
169+ if num_tear_downs != use_cores and not buffer :
196170 errlog (f'Expected { use_cores } teardowns, got { num_tear_downs } ' )
197171
198172 if self .failing_and_slow_first :
@@ -218,7 +192,9 @@ def update_test_results_to(test_name):
218192
219193 json .dump (previous_test_run_results , open (common .PREVIOUS_TEST_RUN_RESULTS_FILE , 'w' ), indent = 2 )
220194
221- return self .combine_results (result , results )
195+ if EMTEST_VISUALIZE :
196+ self .visualize_results (results )
197+ return result
222198
223199 def get_sorted_tests (self ):
224200 """A list of this suite's tests, sorted with the @is_slow_test tests first.
@@ -237,45 +213,29 @@ def test_key(test):
237213
238214 return sorted (self , key = test_key , reverse = True )
239215
240- def combine_results (self , result , buffered_results ):
241- errlog ('' )
242- errlog ('DONE: combining results on main thread' )
243- errlog ('' )
216+ def visualize_results (self , results ):
217+ assert EMTEST_VISUALIZE
244218 # Sort the results back into alphabetical order. Running the tests in
245219 # parallel causes mis-orderings, this makes the results more readable.
246- results = sorted (buffered_results , key = lambda res : str (res .test ))
247- result .core_time = 0
220+ results = sorted (results , key = lambda res : str (res .test ))
248221
249222 # shared data structures are hard in the python multi-processing world, so
250223 # use a file to share the flaky test information across test processes.
251224 flaky_tests = open (common .flaky_tests_log_filename ).read ().split () if os .path .isfile (common .flaky_tests_log_filename ) else []
252225 # Extract only the test short names
253226 flaky_tests = [x .split ('.' )[- 1 ] for x in flaky_tests ]
254227
255- # The next integrateResult loop will print a *lot* of lines really fast. This
256- # will cause a Python exception being thrown when attempting to print to
257- # stderr, if stderr is in nonblocking mode, like it is on Buildbot CI:
258- # See https://github.com/buildbot/buildbot/issues/8659
259- # To work around that problem, set stderr to blocking mode before printing.
260- if not WINDOWS :
261- os .set_blocking (sys .stderr .fileno (), True )
262-
263228 for r in results :
264- # Integrate the test result to the global test result object
265- r .integrateResult (result )
266229 r .log_test_run_for_visualization (flaky_tests )
267230
268231 # Generate the parallel test run visualization
269- if EMTEST_VISUALIZE :
270- emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
271- # Cleanup temp files that were used for the visualization
272- emprofile .delete_profiler_logs ()
273- utils .delete_file (common .flaky_tests_log_filename )
274-
275- return result
232+ emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
233+ # Cleanup temp files that were used for the visualization
234+ emprofile .delete_profiler_logs ()
235+ utils .delete_file (common .flaky_tests_log_filename )
276236
277237
278- class BufferedParallelTestResult (unittest .TestResult ):
238+ class BufferedParallelTestResult (BufferingMixin , unittest .TestResult ):
279239 """A picklable struct used to communicate test results across processes
280240 """
281241 def __init__ (self ):
@@ -293,15 +253,12 @@ def test_short_name(self):
293253 def addDuration (self , test , elapsed ):
294254 self .test_duration = elapsed
295255
296- def integrateResult (self , overall_results ):
256+ def integrate_result (self , overall_results ):
297257 """This method get called on the main thread once the buffered result
298- is received. It add the buffered result to the overall result."""
258+ is received. It adds the buffered result to the overall result."""
299259 # The exception info objects that we are adding here have already
300260 # been turned into strings so make _exc_info_to_string into a no-op.
301261 overall_results ._exc_info_to_string = lambda x , _y : x
302- # No need to worry about stdout/stderr buffering since are a not
303- # actually running the test here, only setting the results.
304- overall_results .buffer = False
305262 overall_results .startTest (self .test )
306263 if self .test_result == 'success' :
307264 overall_results .addSuccess (self .test )
@@ -321,7 +278,8 @@ def integrateResult(self, overall_results):
321278 overall_results .core_time += self .test_duration
322279
323280 def log_test_run_for_visualization (self , flaky_tests ):
324- if EMTEST_VISUALIZE and (self .test_result != 'skipped' or self .test_duration > 0.2 ):
281+ assert EMTEST_VISUALIZE
282+ if self .test_result != 'skipped' or self .test_duration > 0.2 :
325283 test_result = self .test_result
326284 if test_result == 'success' and self .test_short_name () in flaky_tests :
327285 test_result = 'warnings'
0 commit comments