forked from goodboy/tractor
Always cancel service nursery last
The channel server should be torn down *before* the rpc task/service nursery. Do this explicitly even in the root's main task to avoid a strange hang I found in the pubsub tests. Start dropping the `warnings.warn()` usage.drop_warn
parent
7115d6c3bd
commit
f60321a35a
|
@ -150,7 +150,7 @@ async def _invoke(
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# If we're cancelled before the task returns then the
|
# If we're cancelled before the task returns then the
|
||||||
# cancel scope will not have been inserted yet
|
# cancel scope will not have been inserted yet
|
||||||
log.warn(
|
log.warning(
|
||||||
f"Task {func} likely errored or cancelled before it started")
|
f"Task {func} likely errored or cancelled before it started")
|
||||||
finally:
|
finally:
|
||||||
if not actor._rpc_tasks:
|
if not actor._rpc_tasks:
|
||||||
|
@ -520,7 +520,7 @@ class Actor:
|
||||||
# deadlock and other weird behaviour)
|
# deadlock and other weird behaviour)
|
||||||
if func != self.cancel:
|
if func != self.cancel:
|
||||||
if isinstance(cs, Exception):
|
if isinstance(cs, Exception):
|
||||||
log.warn(f"Task for RPC func {func} failed with"
|
log.warning(f"Task for RPC func {func} failed with"
|
||||||
f"{cs}")
|
f"{cs}")
|
||||||
else:
|
else:
|
||||||
# mark that we have ongoing rpc tasks
|
# mark that we have ongoing rpc tasks
|
||||||
|
@ -547,7 +547,7 @@ class Actor:
|
||||||
log.debug(
|
log.debug(
|
||||||
f"{chan} for {chan.uid} disconnected, cancelling tasks"
|
f"{chan} for {chan.uid} disconnected, cancelling tasks"
|
||||||
)
|
)
|
||||||
self.cancel_rpc_tasks(chan)
|
await self.cancel_rpc_tasks(chan)
|
||||||
|
|
||||||
except trio.ClosedResourceError:
|
except trio.ClosedResourceError:
|
||||||
log.error(f"{chan} form {chan.uid} broke")
|
log.error(f"{chan} form {chan.uid} broke")
|
||||||
|
@ -757,7 +757,7 @@ class Actor:
|
||||||
|
|
||||||
# tear down all lifetime contexts
|
# tear down all lifetime contexts
|
||||||
# api idea: ``tractor.open_context()``
|
# api idea: ``tractor.open_context()``
|
||||||
log.warn("Closing all actor lifetime contexts")
|
log.warning("Closing all actor lifetime contexts")
|
||||||
self._lifetime_stack.close()
|
self._lifetime_stack.close()
|
||||||
|
|
||||||
# Unregister actor from the arbiter
|
# Unregister actor from the arbiter
|
||||||
|
@ -855,14 +855,14 @@ class Actor:
|
||||||
# kill all ongoing tasks
|
# kill all ongoing tasks
|
||||||
await self.cancel_rpc_tasks()
|
await self.cancel_rpc_tasks()
|
||||||
|
|
||||||
# cancel all rpc tasks permanently
|
|
||||||
if self._service_n:
|
|
||||||
self._service_n.cancel_scope.cancel()
|
|
||||||
|
|
||||||
# stop channel server
|
# stop channel server
|
||||||
self.cancel_server()
|
self.cancel_server()
|
||||||
await self._server_down.wait()
|
await self._server_down.wait()
|
||||||
|
|
||||||
|
# cancel all rpc tasks permanently
|
||||||
|
if self._service_n:
|
||||||
|
self._service_n.cancel_scope.cancel()
|
||||||
|
|
||||||
log.warning(f"{self.uid} was sucessfullly cancelled")
|
log.warning(f"{self.uid} was sucessfullly cancelled")
|
||||||
self._cancel_complete.set()
|
self._cancel_complete.set()
|
||||||
return True
|
return True
|
||||||
|
@ -1095,6 +1095,7 @@ async def _start_actor(
|
||||||
# XXX: the actor is cancelled when this context is complete
|
# XXX: the actor is cancelled when this context is complete
|
||||||
# given that there are no more active peer channels connected
|
# given that there are no more active peer channels connected
|
||||||
actor.cancel_server()
|
actor.cancel_server()
|
||||||
|
actor._service_n.cancel_scope.cancel()
|
||||||
|
|
||||||
# unset module state
|
# unset module state
|
||||||
_state._current_actor = None
|
_state._current_actor = None
|
||||||
|
|
Loading…
Reference in New Issue