1313
1414import browser_common
1515import common
16+ from color_runner import BufferingMixin
1617from common import errlog
1718
1819from tools import emprofile , utils
19- from tools .colored_logger import CYAN , GREEN , RED , with_color
2020from tools .utils import WINDOWS
2121
2222EMTEST_VISUALIZE = os .getenv ('EMTEST_VISUALIZE' )
@@ -118,43 +118,11 @@ def __init__(self, max_cores, options):
118118 self .max_cores = max_cores
119119 self .max_failures = options .max_failures
120120 self .failing_and_slow_first = options .failing_and_slow_first
121- self .progress_counter = 0
122121
123122 def addTest (self , test ):
124123 super ().addTest (test )
125124 test .is_parallel = True
126125
127- def printOneResult (self , res ):
128- self .progress_counter += 1
129- progress = f'[{ self .progress_counter } /{ self .num_tests } ] '
130-
131- if res .test_result == 'success' :
132- msg = 'ok'
133- color = GREEN
134- elif res .test_result == 'errored' :
135- msg = 'ERROR'
136- color = RED
137- elif res .test_result == 'failed' :
138- msg = 'FAIL'
139- color = RED
140- elif res .test_result == 'skipped' :
141- reason = res .skipped [0 ][1 ]
142- msg = f"skipped '{ reason } '"
143- color = CYAN
144- elif res .test_result == 'unexpected success' :
145- msg = 'unexpected success'
146- color = RED
147- elif res .test_result == 'expected failure' :
148- color = RED
149- msg = 'expected failure'
150- else :
151- assert False , f'unhandled test result { res .test_result } '
152-
153- if res .test_result != 'skipped' :
154- msg += f' ({ res .elapsed :.2f} s)'
155-
156- errlog (f'{ with_color (CYAN , progress )} { res .test } ... { with_color (color , msg )} ' )
157-
158126 def run (self , result ):
159127 # The 'spawn' method is used on windows and it can be useful to set this on
160128 # all platforms when debugging multiprocessing issues. Without this we
@@ -163,6 +131,12 @@ def run(self, result):
163131 # issues.
164132 # multiprocessing.set_start_method('spawn')
165133
134+ # No need to worry about stdout/stderr buffering since are a not
135+ # actually running the test here, only setting the results.
136+ buffer = result .buffer
137+ result .buffer = False
138+
139+ result .core_time = 0
166140 tests = self .get_sorted_tests ()
167141 self .num_tests = self .countTestCases ()
168142 contains_browser_test = any (test .is_browser_test () for test in tests )
@@ -189,20 +163,20 @@ def run(self, result):
189163 lock = manager .Lock ()
190164
191165 results = []
192- args = ((t , allowed_failures_counter , lock , result . buffer ) for t in tests )
166+ args = ((t , allowed_failures_counter , lock , buffer ) for t in tests )
193167 for res in pool .imap_unordered (run_test , args , chunksize = 1 ):
194168 # results may be be None if # of allowed errors was exceeded
195169 # and the harness aborted.
196170 if res :
197- self . printOneResult ( res )
171+ res . integrate_result ( result )
198172 results .append (res )
199173
200174 # Send a task to each worker to tear down the browser and server. This
201175 # relies on the implementation detail in the worker pool that all workers
202176 # are cycled through once.
203177 num_tear_downs = sum ([pool .apply (tear_down , ()) for i in range (use_cores )])
204178 # Assert the assumed behavior above hasn't changed.
205- if num_tear_downs != use_cores :
179+ if num_tear_downs != use_cores and not buffer :
206180 errlog (f'Expected { use_cores } teardowns, got { num_tear_downs } ' )
207181
208182 if self .failing_and_slow_first :
@@ -228,7 +202,9 @@ def update_test_results_to(test_name):
228202
229203 json .dump (previous_test_run_results , open (common .PREVIOUS_TEST_RUN_RESULTS_FILE , 'w' ), indent = 2 )
230204
231- return self .combine_results (result , results )
205+ if EMTEST_VISUALIZE :
206+ self .visualize_results (results )
207+ return result
232208
233209 def get_sorted_tests (self ):
234210 """A list of this suite's tests, sorted with the @is_slow_test tests first.
@@ -247,45 +223,29 @@ def test_key(test):
247223
248224 return sorted (self , key = test_key , reverse = True )
249225
250- def combine_results (self , result , buffered_results ):
251- errlog ('' )
252- errlog ('DONE: combining results on main thread' )
253- errlog ('' )
226+ def visualize_results (self , results ):
227+ assert EMTEST_VISUALIZE
254228 # Sort the results back into alphabetical order. Running the tests in
255229 # parallel causes mis-orderings, this makes the results more readable.
256- results = sorted (buffered_results , key = lambda res : str (res .test ))
257- result .core_time = 0
230+ results = sorted (results , key = lambda res : str (res .test ))
258231
259232 # shared data structures are hard in the python multi-processing world, so
260233 # use a file to share the flaky test information across test processes.
261234 flaky_tests = open (common .flaky_tests_log_filename ).read ().split () if os .path .isfile (common .flaky_tests_log_filename ) else []
262235 # Extract only the test short names
263236 flaky_tests = [x .split ('.' )[- 1 ] for x in flaky_tests ]
264237
265- # The next integrateResult loop will print a *lot* of lines really fast. This
266- # will cause a Python exception being thrown when attempting to print to
267- # stderr, if stderr is in nonblocking mode, like it is on Buildbot CI:
268- # See https://github.com/buildbot/buildbot/issues/8659
269- # To work around that problem, set stderr to blocking mode before printing.
270- if not WINDOWS :
271- os .set_blocking (sys .stderr .fileno (), True )
272-
273238 for r in results :
274- # Integrate the test result to the global test result object
275- r .integrateResult (result )
276239 r .log_test_run_for_visualization (flaky_tests )
277240
278241 # Generate the parallel test run visualization
279- if EMTEST_VISUALIZE :
280- emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
281- # Cleanup temp files that were used for the visualization
282- emprofile .delete_profiler_logs ()
283- utils .delete_file (common .flaky_tests_log_filename )
284-
285- return result
242+ emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
243+ # Cleanup temp files that were used for the visualization
244+ emprofile .delete_profiler_logs ()
245+ utils .delete_file (common .flaky_tests_log_filename )
286246
287247
288- class BufferedParallelTestResult (unittest .TestResult ):
248+ class BufferedParallelTestResult (BufferingMixin , unittest .TestResult ):
289249 """A picklable struct used to communicate test results across processes
290250 """
291251 def __init__ (self ):
@@ -303,15 +263,12 @@ def test_short_name(self):
303263 def addDuration (self , test , elapsed ):
304264 self .test_duration = elapsed
305265
306- def integrateResult (self , overall_results ):
266+ def integrate_result (self , overall_results ):
307267 """This method get called on the main thread once the buffered result
308- is received. It add the buffered result to the overall result."""
268+ is received. It adds the buffered result to the overall result."""
309269 # The exception info objects that we are adding here have already
310270 # been turned into strings so make _exc_info_to_string into a no-op.
311271 overall_results ._exc_info_to_string = lambda x , _y : x
312- # No need to worry about stdout/stderr buffering since are a not
313- # actually running the test here, only setting the results.
314- overall_results .buffer = False
315272 overall_results .startTest (self .test )
316273 if self .test_result == 'success' :
317274 overall_results .addSuccess (self .test )
@@ -331,7 +288,8 @@ def integrateResult(self, overall_results):
331288 overall_results .core_time += self .test_duration
332289
333290 def log_test_run_for_visualization (self , flaky_tests ):
334- if EMTEST_VISUALIZE and (self .test_result != 'skipped' or self .test_duration > 0.2 ):
291+ assert EMTEST_VISUALIZE
292+ if self .test_result != 'skipped' or self .test_duration > 0.2 :
335293 test_result = self .test_result
336294 if test_result == 'success' and self .test_short_name () in flaky_tests :
337295 test_result = 'warnings'
0 commit comments