Compare commits
No commits in common. "6120e99d7ec8a8721f1a1f96bd5df32f2f631158" and "649c5e75049bc110c6f06d5dc05552b8d840d423" have entirely different histories.
6120e99d7e
...
649c5e7504
|
@ -253,6 +253,7 @@ async def open_root_actor(
|
|||
logger.cancel("Shutting down root actor")
|
||||
await actor.cancel()
|
||||
finally:
|
||||
_state._current_actor = None
|
||||
logger.runtime("Root actor terminated")
|
||||
|
||||
|
||||
|
|
|
@ -199,8 +199,8 @@ async def _invoke(
|
|||
except BaseExceptionGroup:
|
||||
# if a context error was set then likely
|
||||
# thei multierror was raised due to that
|
||||
if ctx._remote_ctx_error is not None:
|
||||
raise ctx._remote_ctx_error from None
|
||||
if ctx._error is not None:
|
||||
raise ctx._error from None
|
||||
|
||||
raise
|
||||
|
||||
|
|
|
@ -27,8 +27,7 @@ from typing import (
|
|||
Optional,
|
||||
Callable,
|
||||
AsyncGenerator,
|
||||
AsyncIterator,
|
||||
TYPE_CHECKING,
|
||||
AsyncIterator
|
||||
)
|
||||
|
||||
import warnings
|
||||
|
@ -42,10 +41,6 @@ from .log import get_logger
|
|||
from .trionics import broadcast_receiver, BroadcastReceiver
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._portal import Portal
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
|
@ -75,7 +70,7 @@ class MsgStream(trio.abc.Channel):
|
|||
'''
|
||||
def __init__(
|
||||
self,
|
||||
ctx: Context, # typing: ignore # noqa
|
||||
ctx: 'Context', # typing: ignore # noqa
|
||||
rx_chan: trio.MemoryReceiveChannel,
|
||||
_broadcaster: Optional[BroadcastReceiver] = None,
|
||||
|
||||
|
@ -88,9 +83,6 @@ class MsgStream(trio.abc.Channel):
|
|||
self._eoc: bool = False
|
||||
self._closed: bool = False
|
||||
|
||||
def ctx(self) -> Context:
|
||||
return self._ctx
|
||||
|
||||
# delegate directly to underlying mem channel
|
||||
def receive_nowait(self):
|
||||
msg = self._rx_chan.receive_nowait()
|
||||
|
@ -286,6 +278,7 @@ class MsgStream(trio.abc.Channel):
|
|||
@asynccontextmanager
|
||||
async def subscribe(
|
||||
self,
|
||||
|
||||
) -> AsyncIterator[BroadcastReceiver]:
|
||||
'''
|
||||
Allocate and return a ``BroadcastReceiver`` which delegates
|
||||
|
@ -342,8 +335,8 @@ class MsgStream(trio.abc.Channel):
|
|||
Send a message over this stream to the far end.
|
||||
|
||||
'''
|
||||
if self._ctx._remote_ctx_error:
|
||||
raise self._ctx._remote_ctx_error # from None
|
||||
if self._ctx._error:
|
||||
raise self._ctx._error # from None
|
||||
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError('This stream was already closed')
|
||||
|
@ -382,10 +375,9 @@ class Context:
|
|||
_remote_func_type: Optional[str] = None
|
||||
|
||||
# only set on the caller side
|
||||
_portal: Optional[Portal] = None # type: ignore # noqa
|
||||
_stream: Optional[MsgStream] = None
|
||||
_portal: Optional['Portal'] = None # type: ignore # noqa
|
||||
_result: Optional[Any] = False
|
||||
_remote_ctx_error: Optional[BaseException] = None
|
||||
_error: Optional[BaseException] = None
|
||||
|
||||
# status flags
|
||||
_cancel_called: bool = False
|
||||
|
@ -398,7 +390,7 @@ class Context:
|
|||
# only set on the callee side
|
||||
_scope_nursery: Optional[trio.Nursery] = None
|
||||
|
||||
_backpressure: bool = True
|
||||
_backpressure: bool = False
|
||||
|
||||
async def send_yield(self, data: Any) -> None:
|
||||
|
||||
|
@ -443,26 +435,21 @@ class Context:
|
|||
# (currently) that other portal APIs (``Portal.run()``,
|
||||
# ``.run_in_actor()``) do their own error checking at the point
|
||||
# of the call and result processing.
|
||||
log.error(
|
||||
f'Remote context error for {self.chan.uid}:{self.cid}:\n'
|
||||
f'{msg["error"]["tb_str"]}'
|
||||
)
|
||||
error = unpack_error(msg, self.chan)
|
||||
if (
|
||||
isinstance(error, ContextCancelled)
|
||||
isinstance(error, ContextCancelled) and
|
||||
self._cancel_called
|
||||
):
|
||||
log.cancel(
|
||||
f'Remote context error for {self.chan.uid}:{self.cid}:\n'
|
||||
f'{msg["error"]["tb_str"]}'
|
||||
)
|
||||
if self._cancel_called:
|
||||
# this is an expected cancel request response message
|
||||
# and we don't need to raise it in scope since it will
|
||||
# potentially override a real error
|
||||
return
|
||||
else:
|
||||
log.error(
|
||||
f'Remote context error for {self.chan.uid}:{self.cid}:\n'
|
||||
f'{msg["error"]["tb_str"]}'
|
||||
)
|
||||
# this is an expected cancel request response message
|
||||
# and we don't need to raise it in scope since it will
|
||||
# potentially override a real error
|
||||
return
|
||||
|
||||
self._remote_ctx_error = error
|
||||
self._error = error
|
||||
|
||||
# TODO: tempted to **not** do this by-reraising in a
|
||||
# nursery and instead cancel a surrounding scope, detect
|
||||
|
@ -470,7 +457,7 @@ class Context:
|
|||
if self._scope_nursery:
|
||||
|
||||
async def raiser():
|
||||
raise self._remote_ctx_error from None
|
||||
raise self._error from None
|
||||
|
||||
# from trio.testing import wait_all_tasks_blocked
|
||||
# await wait_all_tasks_blocked()
|
||||
|
@ -496,7 +483,6 @@ class Context:
|
|||
log.cancel(f'Cancelling {side} side of context to {self.chan.uid}')
|
||||
|
||||
self._cancel_called = True
|
||||
ipc_broken: bool = False
|
||||
|
||||
if side == 'caller':
|
||||
if not self._portal:
|
||||
|
@ -514,14 +500,7 @@ class Context:
|
|||
# NOTE: we're telling the far end actor to cancel a task
|
||||
# corresponding to *this actor*. The far end local channel
|
||||
# instance is passed to `Actor._cancel_task()` implicitly.
|
||||
try:
|
||||
await self._portal.run_from_ns(
|
||||
'self',
|
||||
'_cancel_task',
|
||||
cid=cid,
|
||||
)
|
||||
except trio.BrokenResourceError:
|
||||
ipc_broken = True
|
||||
await self._portal.run_from_ns('self', '_cancel_task', cid=cid)
|
||||
|
||||
if cs.cancelled_caught:
|
||||
# XXX: there's no way to know if the remote task was indeed
|
||||
|
@ -537,10 +516,7 @@ class Context:
|
|||
"Timed out on cancelling remote task "
|
||||
f"{cid} for {self._portal.channel.uid}")
|
||||
|
||||
elif ipc_broken:
|
||||
log.cancel(
|
||||
"Transport layer was broken before cancel request "
|
||||
f"{cid} for {self._portal.channel.uid}")
|
||||
# callee side remote task
|
||||
else:
|
||||
self._cancel_msg = msg
|
||||
|
||||
|
@ -628,7 +604,6 @@ class Context:
|
|||
ctx=self,
|
||||
rx_chan=ctx._recv_chan,
|
||||
) as stream:
|
||||
self._stream = stream
|
||||
|
||||
if self._portal:
|
||||
self._portal._streams.add(stream)
|
||||
|
@ -670,22 +645,25 @@ class Context:
|
|||
|
||||
if not self._recv_chan._closed: # type: ignore
|
||||
|
||||
def consume(
|
||||
msg: dict,
|
||||
# wait for a final context result consuming
|
||||
# and discarding any bi dir stream msgs still
|
||||
# in transit from the far end.
|
||||
while True:
|
||||
|
||||
) -> Optional[dict]:
|
||||
msg = await self._recv_chan.receive()
|
||||
try:
|
||||
return msg['return']
|
||||
self._result = msg['return']
|
||||
break
|
||||
except KeyError as msgerr:
|
||||
|
||||
if 'yield' in msg:
|
||||
# far end task is still streaming to us so discard
|
||||
log.warning(f'Discarding stream delivered {msg}')
|
||||
return
|
||||
continue
|
||||
|
||||
elif 'stop' in msg:
|
||||
log.debug('Remote stream terminated')
|
||||
return
|
||||
continue
|
||||
|
||||
# internal error should never get here
|
||||
assert msg.get('cid'), (
|
||||
|
@ -695,25 +673,6 @@ class Context:
|
|||
msg, self._portal.channel
|
||||
) from msgerr
|
||||
|
||||
# wait for a final context result consuming
|
||||
# and discarding any bi dir stream msgs still
|
||||
# in transit from the far end.
|
||||
if self._stream:
|
||||
async with self._stream.subscribe() as bstream:
|
||||
async for msg in bstream:
|
||||
result = consume(msg)
|
||||
if result:
|
||||
self._result = result
|
||||
break
|
||||
|
||||
if not self._result:
|
||||
while True:
|
||||
msg = await self._recv_chan.receive()
|
||||
result = consume(msg)
|
||||
if result:
|
||||
self._result = result
|
||||
break
|
||||
|
||||
return self._result
|
||||
|
||||
async def started(
|
||||
|
|
Loading…
Reference in New Issue