Skip to content

Commit 760872e

Browse files
authoredOct 16, 2024··
gh-125451: Fix deadlock in ProcessPoolExecutor shutdown (#125492)
There was a deadlock when `ProcessPoolExecutor` shuts down at the same time that a queueing thread handles an error processing a task. Don't use `_shutdown_lock` to protect the `_ThreadWakeup` pipes -- use an internal lock instead. This fixes the ordering deadlock where the `ExecutorManagerThread` holds the `_shutdown_lock` and joins the queueing thread, while the queueing thread is attempting to acquire the `_shutdown_lock` while closing the `_ThreadWakeup`.
1 parent d83fcf8 commit 760872e

File tree

3 files changed

+23
-32
lines changed

3 files changed

+23
-32
lines changed
 

‎Lib/concurrent/futures/process.py

+21-29
Original file line numberDiff line numberDiff line change
@@ -68,27 +68,31 @@
6868
class _ThreadWakeup:
6969
def __init__(self):
7070
self._closed = False
71+
self._lock = threading.Lock()
7172
self._reader, self._writer = mp.Pipe(duplex=False)
7273

7374
def close(self):
74-
# Please note that we do not take the shutdown lock when
75+
# Please note that we do not take the self._lock when
7576
# calling clear() (to avoid deadlocking) so this method can
7677
# only be called safely from the same thread as all calls to
77-
# clear() even if you hold the shutdown lock. Otherwise we
78+
# clear() even if you hold the lock. Otherwise we
7879
# might try to read from the closed pipe.
79-
if not self._closed:
80-
self._closed = True
81-
self._writer.close()
82-
self._reader.close()
80+
with self._lock:
81+
if not self._closed:
82+
self._closed = True
83+
self._writer.close()
84+
self._reader.close()
8385

8486
def wakeup(self):
85-
if not self._closed:
86-
self._writer.send_bytes(b"")
87+
with self._lock:
88+
if not self._closed:
89+
self._writer.send_bytes(b"")
8790

8891
def clear(self):
89-
if not self._closed:
90-
while self._reader.poll():
91-
self._reader.recv_bytes()
92+
if self._closed:
93+
raise RuntimeError('operation on closed _ThreadWakeup')
94+
while self._reader.poll():
95+
self._reader.recv_bytes()
9296

9397

9498
def _python_exit():
@@ -167,10 +171,8 @@ def __init__(self, work_id, fn, args, kwargs):
167171

168172
class _SafeQueue(Queue):
169173
"""Safe Queue set exception to the future object linked to a job"""
170-
def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock,
171-
thread_wakeup):
174+
def __init__(self, max_size=0, *, ctx, pending_work_items, thread_wakeup):
172175
self.pending_work_items = pending_work_items
173-
self.shutdown_lock = shutdown_lock
174176
self.thread_wakeup = thread_wakeup
175177
super().__init__(max_size, ctx=ctx)
176178

@@ -179,8 +181,7 @@ def _on_queue_feeder_error(self, e, obj):
179181
tb = format_exception(type(e), e, e.__traceback__)
180182
e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb)))
181183
work_item = self.pending_work_items.pop(obj.work_id, None)
182-
with self.shutdown_lock:
183-
self.thread_wakeup.wakeup()
184+
self.thread_wakeup.wakeup()
184185
# work_item can be None if another process terminated. In this
185186
# case, the executor_manager_thread fails all work_items
186187
# with BrokenProcessPool
@@ -296,12 +297,10 @@ def __init__(self, executor):
296297
# if there is no pending work item.
297298
def weakref_cb(_,
298299
thread_wakeup=self.thread_wakeup,
299-
shutdown_lock=self.shutdown_lock,
300300
mp_util_debug=mp.util.debug):
301301
mp_util_debug('Executor collected: triggering callback for'
302302
' QueueManager wakeup')
303-
with shutdown_lock:
304-
thread_wakeup.wakeup()
303+
thread_wakeup.wakeup()
305304

306305
self.executor_reference = weakref.ref(executor, weakref_cb)
307306

@@ -429,11 +428,6 @@ def wait_result_broken_or_wakeup(self):
429428
elif wakeup_reader in ready:
430429
is_broken = False
431430

432-
# No need to hold the _shutdown_lock here because:
433-
# 1. we're the only thread to use the wakeup reader
434-
# 2. we're also the only thread to call thread_wakeup.close()
435-
# 3. we want to avoid a possible deadlock when both reader and writer
436-
# would block (gh-105829)
437431
self.thread_wakeup.clear()
438432

439433
return result_item, is_broken, cause
@@ -721,10 +715,9 @@ def __init__(self, max_workers=None, mp_context=None,
721715
# as it could result in a deadlock if a worker process dies with the
722716
# _result_queue write lock still acquired.
723717
#
724-
# _shutdown_lock must be locked to access _ThreadWakeup.close() and
725-
# .wakeup(). Care must also be taken to not call clear or close from
726-
# more than one thread since _ThreadWakeup.clear() is not protected by
727-
# the _shutdown_lock
718+
# Care must be taken to only call clear and close from the
719+
# executor_manager_thread, since _ThreadWakeup.clear() is not protected
720+
# by a lock.
728721
self._executor_manager_thread_wakeup = _ThreadWakeup()
729722

730723
# Create communication channels for the executor
@@ -735,7 +728,6 @@ def __init__(self, max_workers=None, mp_context=None,
735728
self._call_queue = _SafeQueue(
736729
max_size=queue_size, ctx=self._mp_context,
737730
pending_work_items=self._pending_work_items,
738-
shutdown_lock=self._shutdown_lock,
739731
thread_wakeup=self._executor_manager_thread_wakeup)
740732
# Killed worker processes can produce spurious "broken pipe"
741733
# tracebacks in the queue's own worker thread. But we detect killed

‎Lib/test/test_concurrent_futures/test_shutdown.py

-3
Original file line numberDiff line numberDiff line change
@@ -253,9 +253,6 @@ def test_cancel_futures_wait_false(self):
253253

254254

255255
class ProcessPoolShutdownTest(ExecutorShutdownTest):
256-
# gh-125451: 'lock' cannot be serialized, the test is broken
257-
# and hangs randomly
258-
@unittest.skipIf(True, "broken test")
259256
def test_processes_terminate(self):
260257
def acquire_lock(lock):
261258
lock.acquire()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Fix deadlock when :class:`concurrent.futures.ProcessPoolExecutor` shuts down
2+
concurrently with an error when feeding a job to a worker process.

0 commit comments

Comments
 (0)
Please sign in to comment.