forked from goodboy/tractor
1
0
Fork 0

Always cancel service nursery last

The channel server should be torn down *before* the rpc
task/service nursery. Do this explicitly even in the root's main task
to avoid a strange hang I found in the pubsub tests. Start dropping
the `warnings.warn()` usage.
drop_warn
Tyler Goodlet 2020-10-14 13:46:05 -04:00
parent 7115d6c3bd
commit f60321a35a
1 changed files with 9 additions and 8 deletions

View File

@ -150,7 +150,7 @@ async def _invoke(
except KeyError:
# If we're cancelled before the task returns then the
# cancel scope will not have been inserted yet
log.warn(
log.warning(
f"Task {func} likely errored or cancelled before it started")
finally:
if not actor._rpc_tasks:
@ -520,7 +520,7 @@ class Actor:
# deadlock and other weird behaviour)
if func != self.cancel:
if isinstance(cs, Exception):
log.warn(f"Task for RPC func {func} failed with"
log.warning(f"Task for RPC func {func} failed with"
f"{cs}")
else:
# mark that we have ongoing rpc tasks
@ -547,7 +547,7 @@ class Actor:
log.debug(
f"{chan} for {chan.uid} disconnected, cancelling tasks"
)
self.cancel_rpc_tasks(chan)
await self.cancel_rpc_tasks(chan)
except trio.ClosedResourceError:
log.error(f"{chan} form {chan.uid} broke")
@ -757,7 +757,7 @@ class Actor:
# tear down all lifetime contexts
# api idea: ``tractor.open_context()``
log.warn("Closing all actor lifetime contexts")
log.warning("Closing all actor lifetime contexts")
self._lifetime_stack.close()
# Unregister actor from the arbiter
@ -855,14 +855,14 @@ class Actor:
# kill all ongoing tasks
await self.cancel_rpc_tasks()
# cancel all rpc tasks permanently
if self._service_n:
self._service_n.cancel_scope.cancel()
# stop channel server
self.cancel_server()
await self._server_down.wait()
# cancel all rpc tasks permanently
if self._service_n:
self._service_n.cancel_scope.cancel()
log.warning(f"{self.uid} was sucessfullly cancelled")
self._cancel_complete.set()
return True
@ -1095,6 +1095,7 @@ async def _start_actor(
# XXX: the actor is cancelled when this context is complete
# given that there are no more active peer channels connected
actor.cancel_server()
actor._service_n.cancel_scope.cancel()
# unset module state
_state._current_actor = None