forked from goodboy/tractor
				
			
						commit
						7ddc4db041
					
				| 
						 | 
				
			
			@ -150,7 +150,7 @@ async def _invoke(
 | 
			
		|||
        except KeyError:
 | 
			
		||||
            # If we're cancelled before the task returns then the
 | 
			
		||||
            # cancel scope will not have been inserted yet
 | 
			
		||||
            log.warn(
 | 
			
		||||
            log.warning(
 | 
			
		||||
                f"Task {func} likely errored or cancelled before it started")
 | 
			
		||||
        finally:
 | 
			
		||||
            if not actor._rpc_tasks:
 | 
			
		||||
| 
						 | 
				
			
			@ -520,7 +520,7 @@ class Actor:
 | 
			
		|||
                    # deadlock and other weird behaviour)
 | 
			
		||||
                    if func != self.cancel:
 | 
			
		||||
                        if isinstance(cs, Exception):
 | 
			
		||||
                            log.warn(f"Task for RPC func {func} failed with"
 | 
			
		||||
                            log.warning(f"Task for RPC func {func} failed with"
 | 
			
		||||
                                     f"{cs}")
 | 
			
		||||
                        else:
 | 
			
		||||
                            # mark that we have ongoing rpc tasks
 | 
			
		||||
| 
						 | 
				
			
			@ -547,7 +547,7 @@ class Actor:
 | 
			
		|||
                    log.debug(
 | 
			
		||||
                        f"{chan} for {chan.uid} disconnected, cancelling tasks"
 | 
			
		||||
                    )
 | 
			
		||||
                    self.cancel_rpc_tasks(chan)
 | 
			
		||||
                    await self.cancel_rpc_tasks(chan)
 | 
			
		||||
 | 
			
		||||
        except trio.ClosedResourceError:
 | 
			
		||||
            log.error(f"{chan} form {chan.uid} broke")
 | 
			
		||||
| 
						 | 
				
			
			@ -757,7 +757,7 @@ class Actor:
 | 
			
		|||
 | 
			
		||||
            # tear down all lifetime contexts
 | 
			
		||||
            # api idea: ``tractor.open_context()``
 | 
			
		||||
            log.warn("Closing all actor lifetime contexts")
 | 
			
		||||
            log.warning("Closing all actor lifetime contexts")
 | 
			
		||||
            self._lifetime_stack.close()
 | 
			
		||||
 | 
			
		||||
            # Unregister actor from the arbiter
 | 
			
		||||
| 
						 | 
				
			
			@ -855,14 +855,14 @@ class Actor:
 | 
			
		|||
            # kill all ongoing tasks
 | 
			
		||||
            await self.cancel_rpc_tasks()
 | 
			
		||||
 | 
			
		||||
            # cancel all rpc tasks permanently
 | 
			
		||||
            if self._service_n:
 | 
			
		||||
                self._service_n.cancel_scope.cancel()
 | 
			
		||||
 | 
			
		||||
            # stop channel server
 | 
			
		||||
            self.cancel_server()
 | 
			
		||||
            await self._server_down.wait()
 | 
			
		||||
 | 
			
		||||
            # cancel all rpc tasks permanently
 | 
			
		||||
            if self._service_n:
 | 
			
		||||
                self._service_n.cancel_scope.cancel()
 | 
			
		||||
 | 
			
		||||
        log.warning(f"{self.uid} was sucessfullly cancelled")
 | 
			
		||||
        self._cancel_complete.set()
 | 
			
		||||
        return True
 | 
			
		||||
| 
						 | 
				
			
			@ -1083,18 +1083,12 @@ async def _start_actor(
 | 
			
		|||
        try:
 | 
			
		||||
            result = await main()
 | 
			
		||||
        except (Exception, trio.MultiError) as err:
 | 
			
		||||
            try:
 | 
			
		||||
                log.exception("Actor crashed:")
 | 
			
		||||
                await _debug._maybe_enter_pm(err)
 | 
			
		||||
            log.exception("Actor crashed:")
 | 
			
		||||
            await _debug._maybe_enter_pm(err)
 | 
			
		||||
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            finally:
 | 
			
		||||
                await actor.cancel()
 | 
			
		||||
 | 
			
		||||
        # XXX: the actor is cancelled when this context is complete
 | 
			
		||||
        # given that there are no more active peer channels connected
 | 
			
		||||
        actor.cancel_server()
 | 
			
		||||
            raise
 | 
			
		||||
        finally:
 | 
			
		||||
            await actor.cancel()
 | 
			
		||||
 | 
			
		||||
    # unset module state
 | 
			
		||||
    _state._current_actor = None
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -234,8 +234,8 @@ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
 | 
			
		|||
                            os.close(child_w)
 | 
			
		||||
                        else:
 | 
			
		||||
                            # This shouldn't happen really
 | 
			
		||||
                            warnings.warn('forkserver: waitpid returned '
 | 
			
		||||
                                          'unexpected pid %d' % pid)
 | 
			
		||||
                            warnings.warning('forkserver: waitpid returned '
 | 
			
		||||
                                             'unexpected pid %d' % pid)
 | 
			
		||||
 | 
			
		||||
                if listener in rfds:
 | 
			
		||||
                    # Incoming fork request
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -275,7 +275,7 @@ async def open_nursery() -> typing.AsyncGenerator[ActorNursery, None]:
 | 
			
		|||
                # ria_nursery scope end
 | 
			
		||||
 | 
			
		||||
        # XXX: do we need a `trio.Cancelled` catch here as well?
 | 
			
		||||
        except (Exception, trio.MultiError) as err:
 | 
			
		||||
        except (Exception, trio.MultiError, trio.Cancelled) as err:
 | 
			
		||||
            # If actor-local error was raised while waiting on
 | 
			
		||||
            # ".run_in_actor()" actors then we also want to cancel all
 | 
			
		||||
            # remaining sub-actors (due to our lone strategy:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue