Fix jupyter kernel shutdown causing nested atexit callbacks leaving Task in running state

This commit is contained in:
allegroai 2021-11-17 19:08:24 +02:00
parent 05ae7ec47b
commit 3f7d271c10

View File

@ -470,7 +470,7 @@ class Task(_Task):
is_sub_process_task_id = cls.__get_master_id_task_id() is_sub_process_task_id = cls.__get_master_id_task_id()
# we could not find a task ID, revert to old stub behaviour # we could not find a task ID, revert to old stub behaviour
if not is_sub_process_task_id: if not is_sub_process_task_id:
return _TaskStub() return _TaskStub() # noqa
elif running_remotely() and not get_is_master_node(): elif running_remotely() and not get_is_master_node():
# make sure we only do it once per process # make sure we only do it once per process
@ -867,7 +867,6 @@ class Task(_Task):
:param str task_name: The full name or partial name of the Tasks to match within the specified :param str task_name: The full name or partial name of the Tasks to match within the specified
``project_name`` (or all projects if ``project_name`` is ``None``). ``project_name`` (or all projects if ``project_name`` is ``None``).
This method supports regular expressions for name matching. (Optional) This method supports regular expressions for name matching. (Optional)
:param list(str) task_ids: list of unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to (use None for all projects) :param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project :param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported Return any partial match of task_name, regular expressions matching is also supported
@ -3119,17 +3118,24 @@ class Task(_Task):
""" """
# protect sub-process at_exit # protect sub-process at_exit
if self._at_exit_called: if self._at_exit_called:
is_sub_process = self.__is_subprocess()
# if we are called twice (signal in the middle of the shutdown), # if we are called twice (signal in the middle of the shutdown),
# make sure we flush stdout, this is the best we can do. _nested_shutdown_call = bool(self._at_exit_called == get_current_thread_id())
if self._at_exit_called == get_current_thread_id() and self._logger and self.__is_subprocess(): if _nested_shutdown_call and not is_sub_process:
self._logger.set_flush_period(None) # if we were called again in the main thread on the main process, let's try again
# noinspection PyProtectedMember # make sure we only do this once
self._logger._close_stdout_handler(wait=True)
self._at_exit_called = True self._at_exit_called = True
return else:
# make sure we flush stdout, this is the best we can do.
# from here only a single thread can re-enter if _nested_shutdown_call and self._logger and is_sub_process:
self._at_exit_called = get_current_thread_id() # noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=True)
self._at_exit_called = True
# if we get here, we should do nothing and leave
return
else:
# from here only a single thread can re-enter
self._at_exit_called = get_current_thread_id()
# disable lock on signal callbacks, to avoid deadlocks. # disable lock on signal callbacks, to avoid deadlocks.
if self.__exit_hook and self.__exit_hook.signal is not None: if self.__exit_hook and self.__exit_hook.signal is not None: