2021-11-28 23:51:34 +00:00
|
|
|
'''
|
|
|
|
``async with ():`` inlined context-stream cancellation testing.
|
|
|
|
|
2023-10-05 23:45:46 +00:00
|
|
|
Verify the we raise errors when streams are opened prior to
|
|
|
|
sync-opening a ``tractor.Context`` beforehand.
|
2021-11-28 23:51:34 +00:00
|
|
|
|
|
|
|
'''
|
2021-12-06 00:50:39 +00:00
|
|
|
from itertools import count
|
2024-04-02 17:33:06 +00:00
|
|
|
import math
|
2021-12-06 21:45:44 +00:00
|
|
|
import platform
|
2024-02-21 18:24:33 +00:00
|
|
|
from pprint import pformat
|
2024-02-20 20:14:58 +00:00
|
|
|
from typing import (
|
|
|
|
Callable,
|
|
|
|
)
|
2021-12-06 00:50:39 +00:00
|
|
|
|
2021-12-03 03:47:31 +00:00
|
|
|
import pytest
|
2021-11-28 23:51:34 +00:00
|
|
|
import trio
|
|
|
|
import tractor
|
2023-10-23 21:49:02 +00:00
|
|
|
from tractor import (
|
|
|
|
Actor,
|
|
|
|
Context,
|
|
|
|
current_actor,
|
|
|
|
)
|
2023-04-12 22:13:30 +00:00
|
|
|
from tractor._exceptions import (
|
|
|
|
StreamOverrun,
|
|
|
|
ContextCancelled,
|
|
|
|
)
|
2024-05-07 13:20:43 +00:00
|
|
|
from tractor._state import current_ipc_ctx
|
2021-11-28 23:51:34 +00:00
|
|
|
|
2024-03-12 19:48:20 +00:00
|
|
|
from tractor._testing import (
|
2024-03-08 02:26:57 +00:00
|
|
|
tractor_test,
|
|
|
|
expect_ctxc,
|
|
|
|
)
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2021-12-07 14:48:35 +00:00
|
|
|
# ``Context`` semantics are as follows,
|
|
|
|
# ------------------------------------
|
|
|
|
|
|
|
|
# - standard setup/teardown:
|
|
|
|
# ``Portal.open_context()`` starts a new
|
|
|
|
# remote task context in another actor. The target actor's task must
|
|
|
|
# call ``Context.started()`` to unblock this entry on the caller side.
|
|
|
|
# the callee task executes until complete and returns a final value
|
|
|
|
# which is delivered to the caller side and retreived via
|
|
|
|
# ``Context.result()``.
|
|
|
|
|
|
|
|
# - cancel termination:
|
|
|
|
# context can be cancelled on either side where either end's task can
|
|
|
|
# call ``Context.cancel()`` which raises a local ``trio.Cancelled``
|
|
|
|
# and sends a task cancel request to the remote task which in turn
|
|
|
|
# raises a ``trio.Cancelled`` in that scope, catches it, and re-raises
|
|
|
|
# as ``ContextCancelled``. This is then caught by
|
|
|
|
# ``Portal.open_context()``'s exit and we get a graceful termination
|
|
|
|
# of the linked tasks.
|
|
|
|
|
|
|
|
# - error termination:
|
|
|
|
# error is caught after all context-cancel-scope tasks are cancelled
|
|
|
|
# via regular ``trio`` cancel scope semantics, error is sent to other
|
|
|
|
# side and unpacked as a `RemoteActorError`.
|
|
|
|
|
|
|
|
|
|
|
|
# ``Context.open_stream() as stream: MsgStream:`` msg semantics are:
|
|
|
|
# -----------------------------------------------------------------
|
|
|
|
|
|
|
|
# - either side can ``.send()`` which emits a 'yield' msgs and delivers
|
|
|
|
# a value to the a ``MsgStream.receive()`` call.
|
|
|
|
|
|
|
|
# - stream closure: one end relays a 'stop' message which terminates an
|
|
|
|
# ongoing ``MsgStream`` iteration.
|
|
|
|
|
|
|
|
# - cancel/error termination: as per the context semantics above but
|
|
|
|
# with implicit stream closure on the cancelling end.
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
_state: bool = False
|
|
|
|
|
|
|
|
|
2021-12-07 14:48:35 +00:00
|
|
|
@tractor.context
|
|
|
|
async def too_many_starteds(
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-07 14:48:35 +00:00
|
|
|
) -> None:
|
|
|
|
'''
|
|
|
|
Call ``Context.started()`` more then once (an error).
|
|
|
|
|
|
|
|
'''
|
|
|
|
await ctx.started()
|
|
|
|
try:
|
|
|
|
await ctx.started()
|
|
|
|
except RuntimeError:
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def not_started_but_stream_opened(
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-07 14:48:35 +00:00
|
|
|
) -> None:
|
|
|
|
'''
|
|
|
|
Enter ``Context.open_stream()`` without calling ``.started()``.
|
|
|
|
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
async with ctx.open_stream():
|
|
|
|
assert 0
|
|
|
|
except RuntimeError:
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'target',
|
2023-04-12 22:13:30 +00:00
|
|
|
[
|
|
|
|
too_many_starteds,
|
|
|
|
not_started_but_stream_opened,
|
|
|
|
],
|
2021-12-07 14:48:35 +00:00
|
|
|
ids='misuse_type={}'.format,
|
|
|
|
)
|
2024-02-20 20:14:58 +00:00
|
|
|
def test_started_misuse(
|
|
|
|
target: Callable,
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
2021-12-07 14:48:35 +00:00
|
|
|
async def main():
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-07 14:48:35 +00:00
|
|
|
target.__name__,
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
|
|
|
async with portal.open_context(target) as (ctx, sent):
|
|
|
|
await trio.sleep(1)
|
|
|
|
|
|
|
|
with pytest.raises(tractor.RemoteActorError):
|
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
@tractor.context
|
|
|
|
async def simple_setup_teardown(
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-06 21:45:44 +00:00
|
|
|
data: int,
|
|
|
|
block_forever: bool = False,
|
|
|
|
|
|
|
|
) -> None:
|
|
|
|
|
|
|
|
# startup phase
|
|
|
|
global _state
|
|
|
|
_state = True
|
|
|
|
|
2024-05-07 13:20:43 +00:00
|
|
|
assert current_ipc_ctx() is ctx
|
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
# signal to parent that we're up
|
|
|
|
await ctx.started(data + 1)
|
|
|
|
|
|
|
|
try:
|
|
|
|
if block_forever:
|
|
|
|
# block until cancelled
|
|
|
|
await trio.sleep_forever()
|
|
|
|
else:
|
|
|
|
return 'yo'
|
|
|
|
finally:
|
|
|
|
_state = False
|
|
|
|
|
|
|
|
|
|
|
|
async def assert_state(value: bool):
|
|
|
|
global _state
|
|
|
|
assert _state == value
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'error_parent',
|
|
|
|
[False, ValueError, KeyboardInterrupt],
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'callee_blocks_forever',
|
|
|
|
[False, True],
|
|
|
|
ids=lambda item: f'callee_blocks_forever={item}'
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'pointlessly_open_stream',
|
|
|
|
[False, True],
|
|
|
|
ids=lambda item: f'open_stream={item}'
|
|
|
|
)
|
|
|
|
def test_simple_context(
|
|
|
|
error_parent,
|
|
|
|
callee_blocks_forever,
|
|
|
|
pointlessly_open_stream,
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode: bool,
|
2021-12-06 21:45:44 +00:00
|
|
|
):
|
|
|
|
|
|
|
|
timeout = 1.5 if not platform.system() == 'Windows' else 4
|
|
|
|
|
|
|
|
async def main():
|
|
|
|
|
|
|
|
with trio.fail_after(timeout):
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:45:44 +00:00
|
|
|
'simple_context',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
2024-03-08 02:26:57 +00:00
|
|
|
async with (
|
|
|
|
portal.open_context(
|
|
|
|
simple_setup_teardown,
|
|
|
|
data=10,
|
|
|
|
block_forever=callee_blocks_forever,
|
|
|
|
) as (ctx, sent),
|
|
|
|
):
|
2024-05-07 13:20:43 +00:00
|
|
|
assert current_ipc_ctx() is ctx
|
2021-12-06 21:45:44 +00:00
|
|
|
assert sent == 11
|
|
|
|
|
|
|
|
if callee_blocks_forever:
|
|
|
|
await portal.run(assert_state, value=True)
|
|
|
|
else:
|
|
|
|
assert await ctx.result() == 'yo'
|
|
|
|
|
|
|
|
if pointlessly_open_stream:
|
|
|
|
async with ctx.open_stream():
|
|
|
|
if error_parent:
|
|
|
|
raise error_parent
|
|
|
|
|
|
|
|
if callee_blocks_forever:
|
|
|
|
await ctx.cancel()
|
|
|
|
else:
|
|
|
|
# in this case the stream will send a
|
|
|
|
# 'stop' msg to the far end which needs
|
|
|
|
# to be ignored
|
|
|
|
pass
|
2023-10-23 21:49:02 +00:00
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
else:
|
|
|
|
if error_parent:
|
|
|
|
raise error_parent
|
|
|
|
|
2023-10-23 21:49:02 +00:00
|
|
|
# cancel AFTER we open a stream
|
|
|
|
# to avoid a cancel raised inside
|
|
|
|
# `.open_stream()`
|
|
|
|
await ctx.cancel()
|
2021-12-06 21:45:44 +00:00
|
|
|
finally:
|
|
|
|
|
|
|
|
# after cancellation
|
|
|
|
if not error_parent:
|
|
|
|
await portal.run(assert_state, value=False)
|
|
|
|
|
|
|
|
# shut down daemon
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
|
|
|
if error_parent:
|
|
|
|
try:
|
|
|
|
trio.run(main)
|
|
|
|
except error_parent:
|
|
|
|
pass
|
2024-04-14 23:39:57 +00:00
|
|
|
except BaseExceptionGroup as beg:
|
2021-12-06 21:45:44 +00:00
|
|
|
# XXX: on windows it seems we may have to expect the group error
|
|
|
|
from tractor._exceptions import is_multi_cancelled
|
2024-04-14 23:39:57 +00:00
|
|
|
assert is_multi_cancelled(beg)
|
2021-12-06 21:45:44 +00:00
|
|
|
else:
|
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
|
2023-04-13 19:14:49 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'callee_returns_early',
|
|
|
|
[True, False],
|
|
|
|
ids=lambda item: f'callee_returns_early={item}'
|
|
|
|
)
|
2023-04-12 22:13:30 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'cancel_method',
|
|
|
|
['ctx', 'portal'],
|
|
|
|
ids=lambda item: f'cancel_method={item}'
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize(
|
2023-04-13 19:14:49 +00:00
|
|
|
'chk_ctx_result_before_exit',
|
2023-04-12 22:13:30 +00:00
|
|
|
[True, False],
|
2023-04-13 19:14:49 +00:00
|
|
|
ids=lambda item: f'chk_ctx_result_before_exit={item}'
|
2023-04-12 22:13:30 +00:00
|
|
|
)
|
|
|
|
def test_caller_cancels(
|
|
|
|
cancel_method: str,
|
2023-04-13 19:14:49 +00:00
|
|
|
chk_ctx_result_before_exit: bool,
|
|
|
|
callee_returns_early: bool,
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode: bool,
|
2023-04-12 22:13:30 +00:00
|
|
|
):
|
|
|
|
'''
|
|
|
|
Verify that when the opening side of a context (aka the caller)
|
|
|
|
cancels that context, the ctx does not raise a cancelled when
|
|
|
|
either calling `.result()` or on context exit.
|
|
|
|
|
|
|
|
'''
|
|
|
|
async def check_canceller(
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2023-04-12 22:13:30 +00:00
|
|
|
) -> None:
|
2024-02-28 22:13:01 +00:00
|
|
|
actor: Actor = current_actor()
|
|
|
|
uid: tuple = actor.uid
|
2024-03-08 02:26:57 +00:00
|
|
|
_ctxc: ContextCancelled|None = None
|
2024-02-28 22:13:01 +00:00
|
|
|
|
|
|
|
if (
|
|
|
|
cancel_method == 'portal'
|
|
|
|
and not callee_returns_early
|
|
|
|
):
|
|
|
|
try:
|
|
|
|
res = await ctx.result()
|
|
|
|
assert 0, 'Portal cancel should raise!'
|
|
|
|
|
|
|
|
except ContextCancelled as ctxc:
|
2024-03-08 02:26:57 +00:00
|
|
|
# with trio.CancelScope(shield=True):
|
|
|
|
# await tractor.pause()
|
|
|
|
_ctxc = ctxc
|
2024-02-28 22:13:01 +00:00
|
|
|
assert ctx.chan._cancel_called
|
|
|
|
assert ctxc.canceller == uid
|
|
|
|
assert ctxc is ctx.maybe_error
|
|
|
|
|
|
|
|
# NOTE: should not ever raise even in the `ctx`
|
|
|
|
# case since self-cancellation should swallow the ctxc
|
|
|
|
# silently!
|
|
|
|
else:
|
2024-03-08 02:26:57 +00:00
|
|
|
try:
|
|
|
|
res = await ctx.result()
|
|
|
|
except ContextCancelled as ctxc:
|
|
|
|
pytest.fail(f'should not have raised ctxc\n{ctxc}')
|
2023-04-13 19:14:49 +00:00
|
|
|
|
2024-02-28 22:13:01 +00:00
|
|
|
# we actually get a result
|
2023-04-13 19:14:49 +00:00
|
|
|
if callee_returns_early:
|
|
|
|
assert res == 'yo'
|
2024-02-28 22:13:01 +00:00
|
|
|
assert ctx.outcome is res
|
|
|
|
assert ctx.maybe_error is None
|
2023-04-13 19:14:49 +00:00
|
|
|
|
|
|
|
else:
|
2024-02-28 22:13:01 +00:00
|
|
|
err: Exception = ctx.outcome
|
2023-04-13 19:14:49 +00:00
|
|
|
assert isinstance(err, ContextCancelled)
|
|
|
|
assert (
|
|
|
|
tuple(err.canceller)
|
|
|
|
==
|
2024-02-28 22:13:01 +00:00
|
|
|
uid
|
2023-04-13 19:14:49 +00:00
|
|
|
)
|
2024-02-28 22:13:01 +00:00
|
|
|
assert (
|
|
|
|
err
|
|
|
|
is ctx.maybe_error
|
|
|
|
is ctx._remote_error
|
|
|
|
)
|
|
|
|
if le := ctx._local_error:
|
|
|
|
assert err is le
|
|
|
|
|
|
|
|
# else:
|
|
|
|
# TODO: what should this be then?
|
|
|
|
# not defined until block closes right?
|
|
|
|
#
|
|
|
|
# await tractor.pause()
|
|
|
|
# assert ctx._local_error is None
|
|
|
|
|
2024-03-08 02:26:57 +00:00
|
|
|
# TODO: don't need this right?
|
|
|
|
# if _ctxc:
|
|
|
|
# raise _ctxc
|
|
|
|
|
2023-04-12 22:13:30 +00:00
|
|
|
|
|
|
|
async def main():
|
2024-02-28 22:13:01 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2023-04-12 22:13:30 +00:00
|
|
|
'simple_context',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
2024-03-08 02:26:57 +00:00
|
|
|
timeout: float = (
|
|
|
|
0.5
|
|
|
|
if not callee_returns_early
|
|
|
|
else 2
|
|
|
|
)
|
2023-04-13 19:14:49 +00:00
|
|
|
with trio.fail_after(timeout):
|
2024-02-28 22:13:01 +00:00
|
|
|
async with (
|
2024-03-08 02:26:57 +00:00
|
|
|
expect_ctxc(
|
|
|
|
yay=(
|
|
|
|
not callee_returns_early
|
|
|
|
and cancel_method == 'portal'
|
|
|
|
)
|
|
|
|
),
|
2024-02-28 22:13:01 +00:00
|
|
|
|
|
|
|
portal.open_context(
|
|
|
|
simple_setup_teardown,
|
|
|
|
data=10,
|
|
|
|
block_forever=not callee_returns_early,
|
|
|
|
) as (ctx, sent),
|
|
|
|
):
|
2023-04-12 22:13:30 +00:00
|
|
|
|
2023-04-13 19:14:49 +00:00
|
|
|
if callee_returns_early:
|
|
|
|
# ensure we block long enough before sending
|
|
|
|
# a cancel such that the callee has already
|
|
|
|
# returned it's result.
|
|
|
|
await trio.sleep(0.5)
|
|
|
|
|
2023-04-12 22:13:30 +00:00
|
|
|
if cancel_method == 'ctx':
|
2024-03-08 02:26:57 +00:00
|
|
|
print('cancelling with `Context.cancel()`')
|
2023-04-12 22:13:30 +00:00
|
|
|
await ctx.cancel()
|
2024-03-08 02:26:57 +00:00
|
|
|
|
|
|
|
elif cancel_method == 'portal':
|
|
|
|
print('cancelling with `Portal.cancel_actor()`')
|
2023-04-12 22:13:30 +00:00
|
|
|
await portal.cancel_actor()
|
|
|
|
|
2024-03-08 02:26:57 +00:00
|
|
|
else:
|
|
|
|
pytest.fail(
|
|
|
|
f'Unknown `cancel_method={cancel_method} ?'
|
|
|
|
)
|
|
|
|
|
2023-04-13 19:14:49 +00:00
|
|
|
if chk_ctx_result_before_exit:
|
2023-04-12 22:13:30 +00:00
|
|
|
await check_canceller(ctx)
|
|
|
|
|
2023-04-13 19:14:49 +00:00
|
|
|
if not chk_ctx_result_before_exit:
|
2023-04-12 22:13:30 +00:00
|
|
|
await check_canceller(ctx)
|
|
|
|
|
|
|
|
if cancel_method != 'portal':
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
2024-03-08 02:26:57 +00:00
|
|
|
# XXX NOTE XXX: non-normal yet purposeful
|
|
|
|
# test-specific ctxc suppression is implemented!
|
|
|
|
#
|
|
|
|
# WHY: the `.cancel_actor()` case (cancel_method='portal')
|
|
|
|
# will cause both:
|
|
|
|
# * the `ctx.result()` inside `.open_context().__aexit__()`
|
|
|
|
# * AND the `ctx.result()` inside `check_canceller()`
|
|
|
|
# to raise ctxc.
|
|
|
|
#
|
|
|
|
# which should in turn cause `ctx._scope` to
|
2024-02-28 22:13:01 +00:00
|
|
|
# catch any cancellation?
|
|
|
|
if (
|
|
|
|
not callee_returns_early
|
2024-03-08 02:26:57 +00:00
|
|
|
and cancel_method != 'portal'
|
2024-02-28 22:13:01 +00:00
|
|
|
):
|
2024-03-08 02:26:57 +00:00
|
|
|
assert not ctx._scope.cancelled_caught
|
2024-02-28 22:13:01 +00:00
|
|
|
|
2023-04-12 22:13:30 +00:00
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
# basic stream terminations:
|
|
|
|
# - callee context closes without using stream
|
|
|
|
# - caller context closes without using stream
|
|
|
|
# - caller context calls `Context.cancel()` while streaming
|
|
|
|
# is ongoing resulting in callee being cancelled
|
|
|
|
# - callee calls `Context.cancel()` while streaming and caller
|
|
|
|
# sees stream terminated in `RemoteActorError`
|
|
|
|
|
|
|
|
# TODO: future possible features
|
|
|
|
# - restart request: far end raises `ContextRestart`
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def close_ctx_immediately(
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
) -> None:
|
|
|
|
|
|
|
|
await ctx.started()
|
|
|
|
global _state
|
|
|
|
|
|
|
|
async with ctx.open_stream():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
@tractor_test
|
2024-02-20 20:14:58 +00:00
|
|
|
async def test_callee_closes_ctx_after_stream_open(
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
|
|
|
'''
|
|
|
|
callee context closes without using stream.
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
This should result in a msg sequence
|
|
|
|
|_<root>_
|
|
|
|
|_<fast_stream_closer>
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
<= {'started': <Any>, 'cid': <str>}
|
|
|
|
<= {'stop': True, 'cid': <str>}
|
|
|
|
<= {'result': Any, ..}
|
|
|
|
|
|
|
|
(ignored by child)
|
|
|
|
=> {'stop': True, 'cid': <str>}
|
|
|
|
|
|
|
|
'''
|
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:45:44 +00:00
|
|
|
'fast_stream_closer',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
with trio.fail_after(0.5):
|
2022-07-27 19:13:00 +00:00
|
|
|
async with portal.open_context(
|
|
|
|
close_ctx_immediately,
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2022-07-27 19:13:00 +00:00
|
|
|
# flag to avoid waiting the final result
|
|
|
|
# cancel_on_exit=True,
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2022-07-27 19:13:00 +00:00
|
|
|
) as (ctx, sent):
|
|
|
|
assert sent is None
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
with trio.fail_after(0.4):
|
2022-07-27 19:13:00 +00:00
|
|
|
async with ctx.open_stream() as stream:
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2022-07-27 19:13:00 +00:00
|
|
|
# should fall through since ``StopAsyncIteration``
|
|
|
|
# should be raised through translation of
|
|
|
|
# a ``trio.EndOfChannel`` by
|
|
|
|
# ``trio.abc.ReceiveChannel.__anext__()``
|
|
|
|
async for _ in stream:
|
2024-02-20 20:14:58 +00:00
|
|
|
# trigger failure if we DO NOT
|
|
|
|
# get an EOC!
|
2022-07-27 19:13:00 +00:00
|
|
|
assert 0
|
|
|
|
else:
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2022-07-27 19:13:00 +00:00
|
|
|
# verify stream is now closed
|
|
|
|
try:
|
2024-02-20 20:14:58 +00:00
|
|
|
with trio.fail_after(0.3):
|
|
|
|
await stream.receive()
|
2022-07-27 19:13:00 +00:00
|
|
|
except trio.EndOfChannel:
|
|
|
|
pass
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2022-07-27 19:13:00 +00:00
|
|
|
# TODO: should be just raise the closed resource err
|
|
|
|
# directly here to enforce not allowing a re-open
|
|
|
|
# of a stream to the context (at least until a time of
|
|
|
|
# if/when we decide that's a good idea?)
|
|
|
|
try:
|
|
|
|
with trio.fail_after(0.5):
|
|
|
|
async with ctx.open_stream() as stream:
|
|
|
|
pass
|
|
|
|
except trio.ClosedResourceError:
|
2021-12-06 21:45:44 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def expect_cancelled(
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
) -> None:
|
|
|
|
global _state
|
|
|
|
_state = True
|
|
|
|
|
|
|
|
await ctx.started()
|
|
|
|
|
|
|
|
try:
|
|
|
|
async with ctx.open_stream() as stream:
|
|
|
|
async for msg in stream:
|
|
|
|
await stream.send(msg) # echo server
|
|
|
|
|
|
|
|
except trio.Cancelled:
|
2024-03-08 02:26:57 +00:00
|
|
|
|
|
|
|
# on ctx.cancel() the internal RPC scope is cancelled but
|
|
|
|
# never caught until the func exits.
|
|
|
|
assert ctx._scope.cancel_called
|
|
|
|
assert not ctx._scope.cancelled_caught
|
|
|
|
|
|
|
|
# should be the RPC cmd request for `._cancel_task()`
|
|
|
|
assert ctx._cancel_msg
|
|
|
|
# which, has not yet resolved to an error outcome
|
|
|
|
# since this rpc func has not yet exited.
|
|
|
|
assert not ctx.maybe_error
|
|
|
|
assert not ctx._final_result_is_set()
|
|
|
|
|
|
|
|
# debug REPL if needed
|
|
|
|
# with trio.CancelScope(shield=True):
|
|
|
|
# await tractor.pause()
|
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
# expected case
|
|
|
|
_state = False
|
|
|
|
raise
|
|
|
|
|
|
|
|
else:
|
2024-02-28 22:13:01 +00:00
|
|
|
assert 0, "callee wasn't cancelled !?"
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'use_ctx_cancel_method',
|
|
|
|
[False, True],
|
|
|
|
)
|
|
|
|
@tractor_test
|
|
|
|
async def test_caller_closes_ctx_after_callee_opens_stream(
|
|
|
|
use_ctx_cancel_method: bool,
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode: bool,
|
2021-12-06 21:45:44 +00:00
|
|
|
):
|
2024-02-20 20:14:58 +00:00
|
|
|
'''
|
|
|
|
caller context closes without using/opening stream
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
'''
|
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
2023-10-23 21:49:02 +00:00
|
|
|
|
2024-02-28 22:13:01 +00:00
|
|
|
root: Actor = current_actor()
|
2023-10-23 21:49:02 +00:00
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:45:44 +00:00
|
|
|
'ctx_cancelled',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
|
|
|
async with portal.open_context(
|
|
|
|
expect_cancelled,
|
|
|
|
) as (ctx, sent):
|
|
|
|
assert sent is None
|
|
|
|
|
2023-10-23 21:49:02 +00:00
|
|
|
await portal.run(assert_state, value=True)
|
|
|
|
|
2024-02-28 22:13:01 +00:00
|
|
|
# call `ctx.cancel()` explicitly
|
2021-12-06 21:45:44 +00:00
|
|
|
if use_ctx_cancel_method:
|
|
|
|
await ctx.cancel()
|
|
|
|
|
2024-02-28 22:13:01 +00:00
|
|
|
# NOTE: means the local side `ctx._scope` will
|
|
|
|
# have been cancelled by an ctxc ack and thus
|
|
|
|
# `._scope.cancelled_caught` should be set.
|
2021-12-06 21:45:44 +00:00
|
|
|
try:
|
|
|
|
async with ctx.open_stream() as stream:
|
|
|
|
async for msg in stream:
|
|
|
|
pass
|
|
|
|
|
2023-10-23 21:49:02 +00:00
|
|
|
except tractor.ContextCancelled as ctxc:
|
|
|
|
# XXX: the cause is US since we call
|
|
|
|
# `Context.cancel()` just above!
|
|
|
|
assert (
|
|
|
|
ctxc.canceller
|
|
|
|
==
|
|
|
|
current_actor().uid
|
|
|
|
==
|
|
|
|
root.uid
|
|
|
|
)
|
|
|
|
|
|
|
|
# XXX: must be propagated to __aexit__
|
|
|
|
# and should be silently absorbed there
|
|
|
|
# since we called `.cancel()` just above ;)
|
|
|
|
raise
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
assert 0, "Should have context cancelled?"
|
|
|
|
|
|
|
|
# channel should still be up
|
|
|
|
assert portal.channel.connected()
|
|
|
|
|
|
|
|
# ctx is closed here
|
2024-02-28 22:13:01 +00:00
|
|
|
await portal.run(
|
|
|
|
assert_state,
|
|
|
|
value=False,
|
|
|
|
)
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
with trio.fail_after(0.2):
|
|
|
|
await ctx.result()
|
|
|
|
assert 0, "Callee should have blocked!?"
|
|
|
|
except trio.TooSlowError:
|
2023-10-23 21:49:02 +00:00
|
|
|
# NO-OP -> since already called above
|
2021-12-06 21:45:44 +00:00
|
|
|
await ctx.cancel()
|
2023-10-23 21:49:02 +00:00
|
|
|
|
2024-02-28 22:13:01 +00:00
|
|
|
# NOTE: local scope should have absorbed the cancellation since
|
|
|
|
# in this case we call `ctx.cancel()` and the local
|
2024-03-08 02:26:57 +00:00
|
|
|
# `._scope` does not get `.cancel_called` and thus
|
|
|
|
# `.cancelled_caught` neither will ever bet set.
|
2024-02-28 22:13:01 +00:00
|
|
|
if use_ctx_cancel_method:
|
2024-03-08 02:26:57 +00:00
|
|
|
assert not ctx._scope.cancelled_caught
|
2024-02-28 22:13:01 +00:00
|
|
|
|
|
|
|
# rxed ctxc response from far end
|
|
|
|
assert ctx.cancel_acked
|
|
|
|
assert (
|
|
|
|
ctx._remote_error
|
|
|
|
is ctx._local_error
|
|
|
|
is ctx.maybe_error
|
|
|
|
is ctx.outcome
|
|
|
|
)
|
2023-10-23 21:49:02 +00:00
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
try:
|
|
|
|
async with ctx.open_stream() as stream:
|
|
|
|
async for msg in stream:
|
|
|
|
pass
|
|
|
|
except tractor.ContextCancelled:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
assert 0, "Should have received closed resource error?"
|
|
|
|
|
|
|
|
# ctx is closed here
|
|
|
|
await portal.run(assert_state, value=False)
|
|
|
|
|
|
|
|
# channel should not have been destroyed yet, only the
|
|
|
|
# inter-actor-task context
|
|
|
|
assert portal.channel.connected()
|
|
|
|
|
|
|
|
# teardown the actor
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
|
|
|
|
|
|
|
@tractor_test
|
2024-02-20 20:14:58 +00:00
|
|
|
async def test_multitask_caller_cancels_from_nonroot_task(
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:45:44 +00:00
|
|
|
'ctx_cancelled',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
|
|
|
async with portal.open_context(
|
|
|
|
expect_cancelled,
|
|
|
|
) as (ctx, sent):
|
|
|
|
|
|
|
|
await portal.run(assert_state, value=True)
|
|
|
|
assert sent is None
|
|
|
|
|
|
|
|
async with ctx.open_stream() as stream:
|
|
|
|
|
|
|
|
async def send_msg_then_cancel():
|
|
|
|
await stream.send('yo')
|
|
|
|
await portal.run(assert_state, value=True)
|
|
|
|
await ctx.cancel()
|
|
|
|
await portal.run(assert_state, value=False)
|
|
|
|
|
|
|
|
async with trio.open_nursery() as n:
|
|
|
|
n.start_soon(send_msg_then_cancel)
|
|
|
|
|
|
|
|
try:
|
|
|
|
async for msg in stream:
|
|
|
|
assert msg == 'yo'
|
|
|
|
|
|
|
|
except tractor.ContextCancelled:
|
|
|
|
raise # XXX: must be propagated to __aexit__
|
|
|
|
|
|
|
|
# channel should still be up
|
|
|
|
assert portal.channel.connected()
|
|
|
|
|
|
|
|
# ctx is closed here
|
|
|
|
await portal.run(assert_state, value=False)
|
|
|
|
|
|
|
|
# channel should not have been destroyed yet, only the
|
|
|
|
# inter-actor-task context
|
|
|
|
assert portal.channel.connected()
|
|
|
|
|
|
|
|
# teardown the actor
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def cancel_self(
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-06 21:45:44 +00:00
|
|
|
|
|
|
|
) -> None:
|
|
|
|
global _state
|
|
|
|
_state = True
|
|
|
|
|
2023-10-23 21:49:02 +00:00
|
|
|
# since we call this the below `.open_stream()` should always
|
|
|
|
# error!
|
2021-12-06 21:45:44 +00:00
|
|
|
await ctx.cancel()
|
2021-12-07 14:48:35 +00:00
|
|
|
|
|
|
|
# should inline raise immediately
|
|
|
|
try:
|
|
|
|
async with ctx.open_stream():
|
|
|
|
pass
|
2023-10-23 21:49:02 +00:00
|
|
|
# except tractor.ContextCancelled:
|
|
|
|
except RuntimeError:
|
2021-12-14 21:16:57 +00:00
|
|
|
# suppress for now so we can do checkpoint tests below
|
2023-10-23 21:49:02 +00:00
|
|
|
print('Got expected runtime error for stream-after-cancel')
|
|
|
|
|
2021-12-14 21:16:57 +00:00
|
|
|
else:
|
|
|
|
raise RuntimeError('Context didnt cancel itself?!')
|
2021-12-07 14:48:35 +00:00
|
|
|
|
2023-10-23 21:49:02 +00:00
|
|
|
# check that``trio.Cancelled`` is now raised on any further
|
|
|
|
# checkpoints since the self cancel above will have cancelled
|
|
|
|
# the `Context._scope.cancel_scope: trio.CancelScope`
|
2021-12-06 21:45:44 +00:00
|
|
|
try:
|
|
|
|
with trio.fail_after(0.1):
|
|
|
|
await trio.sleep_forever()
|
|
|
|
except trio.Cancelled:
|
|
|
|
raise
|
|
|
|
|
|
|
|
except trio.TooSlowError:
|
|
|
|
# should never get here
|
|
|
|
assert 0
|
|
|
|
|
2023-10-23 21:49:02 +00:00
|
|
|
raise RuntimeError('Context didnt cancel itself?!')
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
@tractor_test
|
2024-02-20 20:14:58 +00:00
|
|
|
async def test_callee_cancels_before_started(
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
2021-12-06 21:45:44 +00:00
|
|
|
'''
|
|
|
|
Callee calls `Context.cancel()` while streaming and caller
|
|
|
|
sees stream terminated in `ContextCancelled`.
|
|
|
|
|
|
|
|
'''
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:45:44 +00:00
|
|
|
'cancels_self',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
try:
|
|
|
|
|
|
|
|
async with portal.open_context(
|
|
|
|
cancel_self,
|
|
|
|
) as (ctx, sent):
|
|
|
|
async with ctx.open_stream():
|
|
|
|
await trio.sleep_forever()
|
|
|
|
|
|
|
|
# raises a special cancel signal
|
|
|
|
except tractor.ContextCancelled as ce:
|
2024-04-09 12:44:06 +00:00
|
|
|
_ce = ce # for debug on crash
|
2024-03-19 22:08:54 +00:00
|
|
|
ce.boxed_type == trio.Cancelled
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2021-12-14 21:16:57 +00:00
|
|
|
# the traceback should be informative
|
2024-04-09 12:44:06 +00:00
|
|
|
assert 'itself' in ce.tb_str
|
|
|
|
assert ce.tb_str == ce.msgdata['tb_str']
|
2021-12-14 21:16:57 +00:00
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
# teardown the actor
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
2021-11-28 23:51:34 +00:00
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def never_open_stream(
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-11-28 23:51:34 +00:00
|
|
|
|
|
|
|
) -> None:
|
2021-12-06 00:50:39 +00:00
|
|
|
'''
|
|
|
|
Context which never opens a stream and blocks.
|
2021-11-28 23:51:34 +00:00
|
|
|
|
|
|
|
'''
|
|
|
|
await ctx.started()
|
|
|
|
await trio.sleep_forever()
|
|
|
|
|
|
|
|
|
2021-12-06 00:50:39 +00:00
|
|
|
@tractor.context
|
|
|
|
async def keep_sending_from_callee(
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
|
|
|
msg_buffer_size: int|None = None,
|
2021-12-06 00:50:39 +00:00
|
|
|
|
|
|
|
) -> None:
|
|
|
|
'''
|
|
|
|
Send endlessly on the calleee stream.
|
|
|
|
|
|
|
|
'''
|
|
|
|
await ctx.started()
|
2021-12-06 21:29:57 +00:00
|
|
|
async with ctx.open_stream(
|
|
|
|
msg_buffer_size=msg_buffer_size,
|
|
|
|
) as stream:
|
2021-12-06 00:50:39 +00:00
|
|
|
for msg in count():
|
2021-12-06 21:29:57 +00:00
|
|
|
print(f'callee sending {msg}')
|
2021-12-06 00:50:39 +00:00
|
|
|
await stream.send(msg)
|
|
|
|
await trio.sleep(0.01)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'overrun_by',
|
|
|
|
[
|
|
|
|
('caller', 1, never_open_stream),
|
|
|
|
('callee', 0, keep_sending_from_callee),
|
|
|
|
],
|
2024-04-02 17:33:06 +00:00
|
|
|
ids=[
|
|
|
|
('caller_1buf_never_open_stream'),
|
|
|
|
('callee_0buf_keep_sending_from_callee'),
|
|
|
|
]
|
2021-12-06 00:50:39 +00:00
|
|
|
)
|
2024-02-20 20:14:58 +00:00
|
|
|
def test_one_end_stream_not_opened(
|
|
|
|
overrun_by: tuple[str, int, Callable],
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
2021-11-28 23:51:34 +00:00
|
|
|
'''
|
|
|
|
This should exemplify the bug from:
|
|
|
|
https://github.com/goodboy/tractor/issues/265
|
|
|
|
|
|
|
|
'''
|
2021-12-06 00:50:39 +00:00
|
|
|
overrunner, buf_size_increase, entrypoint = overrun_by
|
2022-08-03 19:38:05 +00:00
|
|
|
from tractor._runtime import Actor
|
2021-12-06 00:50:39 +00:00
|
|
|
buf_size = buf_size_increase + Actor.msg_buffer_size
|
|
|
|
|
2021-11-28 23:51:34 +00:00
|
|
|
async def main():
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:29:57 +00:00
|
|
|
entrypoint.__name__,
|
2021-11-28 23:51:34 +00:00
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
2024-04-02 18:34:08 +00:00
|
|
|
with trio.fail_after(1):
|
2024-04-02 17:33:06 +00:00
|
|
|
async with portal.open_context(
|
|
|
|
entrypoint,
|
|
|
|
) as (ctx, sent):
|
|
|
|
assert sent is None
|
2021-11-28 23:51:34 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
if 'caller' in overrunner:
|
2021-12-06 00:50:39 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
async with ctx.open_stream() as stream:
|
2023-04-14 19:46:21 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
# itersend +1 msg more then the buffer size
|
|
|
|
# to cause the most basic overrun.
|
|
|
|
for i in range(buf_size):
|
|
|
|
print(f'sending {i}')
|
|
|
|
await stream.send(i)
|
2021-12-06 16:53:15 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
else:
|
|
|
|
# expect overrun error to be relayed back
|
|
|
|
# and this sleep interrupted
|
|
|
|
await trio.sleep_forever()
|
2021-12-06 00:50:39 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
else:
|
|
|
|
# callee overruns caller case so we do nothing here
|
|
|
|
await trio.sleep_forever()
|
2021-11-28 23:51:34 +00:00
|
|
|
|
2021-12-03 19:49:08 +00:00
|
|
|
await portal.cancel_actor()
|
|
|
|
|
2021-12-06 00:50:39 +00:00
|
|
|
# 2 overrun cases and the no overrun case (which pushes right up to
|
|
|
|
# the msg limit)
|
2023-04-14 19:46:21 +00:00
|
|
|
if (
|
|
|
|
overrunner == 'caller'
|
|
|
|
):
|
2021-12-06 00:50:39 +00:00
|
|
|
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
|
|
|
trio.run(main)
|
|
|
|
|
2024-03-19 22:08:54 +00:00
|
|
|
assert excinfo.value.boxed_type == StreamOverrun
|
2021-12-06 21:00:21 +00:00
|
|
|
|
2021-12-06 00:50:39 +00:00
|
|
|
elif overrunner == 'callee':
|
|
|
|
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
|
|
|
trio.run(main)
|
|
|
|
|
2021-12-06 21:00:21 +00:00
|
|
|
# TODO: embedded remote errors so that we can verify the source
|
2021-12-15 22:35:28 +00:00
|
|
|
# error? the callee delivers an error which is an overrun
|
|
|
|
# wrapped in a remote actor error.
|
2024-03-19 22:08:54 +00:00
|
|
|
assert excinfo.value.boxed_type == tractor.RemoteActorError
|
2021-12-06 00:50:39 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
trio.run(main)
|
2021-12-06 21:29:57 +00:00
|
|
|
|
2021-12-06 21:45:44 +00:00
|
|
|
|
2021-12-06 21:29:57 +00:00
|
|
|
@tractor.context
|
|
|
|
async def echo_back_sequence(
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
ctx: Context,
|
2021-12-06 21:29:57 +00:00
|
|
|
seq: list[int],
|
2023-04-13 19:14:49 +00:00
|
|
|
wait_for_cancel: bool,
|
2023-04-13 23:46:35 +00:00
|
|
|
allow_overruns_side: str,
|
|
|
|
be_slow: bool = False,
|
|
|
|
msg_buffer_size: int = 1,
|
2021-12-06 21:29:57 +00:00
|
|
|
|
|
|
|
) -> None:
|
|
|
|
'''
|
2023-04-13 19:14:49 +00:00
|
|
|
Send endlessly on the calleee stream using a small buffer size
|
|
|
|
setting on the contex to simulate backlogging that would normally
|
|
|
|
cause overruns.
|
2021-12-06 21:29:57 +00:00
|
|
|
|
|
|
|
'''
|
2023-04-13 19:14:49 +00:00
|
|
|
# NOTE: ensure that if the caller is expecting to cancel this task
|
|
|
|
# that we stay echoing much longer then they are so we don't
|
|
|
|
# return early instead of receive the cancel msg.
|
2024-02-21 18:24:33 +00:00
|
|
|
total_batches: int = (
|
|
|
|
1000 if wait_for_cancel
|
|
|
|
else 6
|
|
|
|
)
|
2023-04-13 19:14:49 +00:00
|
|
|
|
2021-12-06 21:29:57 +00:00
|
|
|
await ctx.started()
|
2024-12-09 21:08:55 +00:00
|
|
|
# await tractor.pause()
|
2021-12-06 21:29:57 +00:00
|
|
|
async with ctx.open_stream(
|
|
|
|
msg_buffer_size=msg_buffer_size,
|
2023-04-13 23:46:35 +00:00
|
|
|
|
|
|
|
# literally the point of this test XD
|
|
|
|
allow_overruns=(allow_overruns_side in {'child', 'both'}),
|
2021-12-06 21:29:57 +00:00
|
|
|
) as stream:
|
|
|
|
|
2023-04-13 23:46:35 +00:00
|
|
|
# ensure mem chan settings are correct
|
|
|
|
assert (
|
|
|
|
ctx._send_chan._state.max_buffer_size
|
|
|
|
==
|
|
|
|
msg_buffer_size
|
|
|
|
)
|
|
|
|
|
|
|
|
seq = list(seq) # bleh, msgpack sometimes ain't decoded right
|
2024-02-21 18:24:33 +00:00
|
|
|
for i in range(total_batches):
|
|
|
|
print(f'starting new stream batch {i} iter in child')
|
2021-12-06 21:29:57 +00:00
|
|
|
batch = []
|
2024-02-21 18:24:33 +00:00
|
|
|
|
|
|
|
# EoC case, delay a little instead of hot
|
|
|
|
# iter-stopping (since apparently py3.11+ can do that
|
|
|
|
# faster then a ctxc can be sent) on the async for
|
|
|
|
# loop when child was requested to ctxc.
|
|
|
|
if (
|
|
|
|
stream.closed
|
|
|
|
or
|
|
|
|
ctx.cancel_called
|
|
|
|
):
|
|
|
|
print('child stream already closed!?!')
|
|
|
|
await trio.sleep(0.05)
|
|
|
|
continue
|
|
|
|
|
2021-12-06 21:29:57 +00:00
|
|
|
async for msg in stream:
|
|
|
|
batch.append(msg)
|
|
|
|
if batch == seq:
|
|
|
|
break
|
|
|
|
|
2023-04-13 23:46:35 +00:00
|
|
|
if be_slow:
|
|
|
|
await trio.sleep(0.05)
|
|
|
|
|
2023-04-13 19:14:49 +00:00
|
|
|
print('callee waiting on next')
|
|
|
|
|
2024-02-21 18:24:33 +00:00
|
|
|
print(f'callee echoing back latest batch\n{batch}')
|
2021-12-06 21:29:57 +00:00
|
|
|
for msg in batch:
|
2024-02-21 18:24:33 +00:00
|
|
|
print(f'callee sending msg\n{msg}')
|
2021-12-06 21:29:57 +00:00
|
|
|
await stream.send(msg)
|
|
|
|
|
2024-02-21 18:24:33 +00:00
|
|
|
try:
|
|
|
|
return 'yo'
|
|
|
|
finally:
|
|
|
|
print(
|
|
|
|
'exiting callee with context:\n'
|
|
|
|
f'{pformat(ctx)}\n'
|
|
|
|
)
|
2021-12-06 21:29:57 +00:00
|
|
|
|
|
|
|
|
2023-04-13 23:46:35 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
# aka the side that will / should raise
|
|
|
|
# and overrun under normal conditions.
|
|
|
|
'allow_overruns_side',
|
|
|
|
['parent', 'child', 'none', 'both'],
|
|
|
|
ids=lambda item: f'allow_overruns_side={item}'
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
# aka the side that will / should raise
|
|
|
|
# and overrun under normal conditions.
|
|
|
|
'slow_side',
|
|
|
|
['parent', 'child'],
|
|
|
|
ids=lambda item: f'slow_side={item}'
|
|
|
|
)
|
2023-04-13 19:14:49 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'cancel_ctx',
|
|
|
|
[True, False],
|
|
|
|
ids=lambda item: f'cancel_ctx={item}'
|
|
|
|
)
|
2023-04-13 23:46:35 +00:00
|
|
|
def test_maybe_allow_overruns_stream(
|
2023-04-13 19:14:49 +00:00
|
|
|
cancel_ctx: bool,
|
2023-04-13 23:46:35 +00:00
|
|
|
slow_side: str,
|
|
|
|
allow_overruns_side: str,
|
2024-03-11 14:24:44 +00:00
|
|
|
|
|
|
|
# conftest wide
|
2023-04-12 22:13:30 +00:00
|
|
|
loglevel: str,
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode: bool,
|
2023-04-12 22:13:30 +00:00
|
|
|
):
|
2021-12-06 21:29:57 +00:00
|
|
|
'''
|
|
|
|
Demonstrate small overruns of each task back and forth
|
2023-04-12 22:13:30 +00:00
|
|
|
on a stream not raising any errors by default by setting
|
2023-04-13 19:14:49 +00:00
|
|
|
the ``allow_overruns=True``.
|
|
|
|
|
|
|
|
The original idea here was to show that if you set the feeder mem
|
|
|
|
chan to a size smaller then the # of msgs sent you could could not
|
|
|
|
get a `StreamOverrun` crash plus maybe get all the msgs that were
|
|
|
|
sent. The problem with the "real backpressure" case is that due to
|
|
|
|
the current arch it can result in the msg loop being blocked and thus
|
|
|
|
blocking cancellation - which is like super bad. So instead this test
|
|
|
|
had to be adjusted to more or less just "not send overrun errors" so
|
|
|
|
as to handle the case where the sender just moreso cares about not getting
|
|
|
|
errored out when it send to fast..
|
2021-12-06 21:29:57 +00:00
|
|
|
|
|
|
|
'''
|
|
|
|
async def main():
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as an:
|
|
|
|
portal = await an.start_actor(
|
2021-12-06 21:29:57 +00:00
|
|
|
'callee_sends_forever',
|
|
|
|
enable_modules=[__name__],
|
2023-04-12 22:13:30 +00:00
|
|
|
loglevel=loglevel,
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode=debug_mode,
|
2021-12-06 21:29:57 +00:00
|
|
|
)
|
2023-04-12 22:13:30 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
# stream-sequence batch info with send delay to determine
|
|
|
|
# approx timeout determining whether test has hung.
|
|
|
|
total_batches: int = 2
|
|
|
|
num_items: int = 10
|
|
|
|
seq = list(range(num_items))
|
|
|
|
parent_send_delay: float = 0.16
|
|
|
|
timeout: float = math.ceil(
|
|
|
|
total_batches * num_items * parent_send_delay
|
|
|
|
)
|
|
|
|
with trio.fail_after(timeout):
|
|
|
|
async with portal.open_context(
|
|
|
|
echo_back_sequence,
|
|
|
|
seq=seq,
|
|
|
|
wait_for_cancel=cancel_ctx,
|
|
|
|
be_slow=(slow_side == 'child'),
|
|
|
|
allow_overruns_side=allow_overruns_side,
|
|
|
|
|
|
|
|
) as (ctx, sent):
|
|
|
|
assert sent is None
|
|
|
|
|
|
|
|
async with ctx.open_stream(
|
|
|
|
msg_buffer_size=1 if slow_side == 'parent' else None,
|
|
|
|
allow_overruns=(allow_overruns_side in {'parent', 'both'}),
|
|
|
|
) as stream:
|
|
|
|
|
|
|
|
for _ in range(total_batches):
|
|
|
|
for msg in seq:
|
|
|
|
# print(f'root tx {msg}')
|
|
|
|
await stream.send(msg)
|
|
|
|
if slow_side == 'parent':
|
|
|
|
# NOTE: we make the parent slightly
|
|
|
|
# slower, when it is slow, to make sure
|
|
|
|
# that in the overruns everywhere case
|
|
|
|
await trio.sleep(parent_send_delay)
|
|
|
|
|
|
|
|
batch = []
|
|
|
|
async for msg in stream:
|
|
|
|
print(f'root rx {msg}')
|
|
|
|
batch.append(msg)
|
|
|
|
if batch == seq:
|
|
|
|
break
|
|
|
|
|
|
|
|
if cancel_ctx:
|
|
|
|
# cancel the remote task
|
|
|
|
print('Requesting `ctx.cancel()` in parent!')
|
|
|
|
await ctx.cancel()
|
2021-12-06 21:29:57 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
res: str|ContextCancelled = await ctx.result()
|
2021-12-06 21:29:57 +00:00
|
|
|
|
2023-04-13 19:14:49 +00:00
|
|
|
if cancel_ctx:
|
2024-04-02 17:33:06 +00:00
|
|
|
assert isinstance(res, ContextCancelled)
|
|
|
|
assert tuple(res.canceller) == current_actor().uid
|
2023-04-13 19:14:49 +00:00
|
|
|
|
2024-04-02 17:33:06 +00:00
|
|
|
else:
|
|
|
|
print(f'RX ROOT SIDE RESULT {res}')
|
|
|
|
assert res == 'yo'
|
2021-12-06 21:29:57 +00:00
|
|
|
|
|
|
|
# cancel the daemon
|
|
|
|
await portal.cancel_actor()
|
|
|
|
|
2023-04-13 23:46:35 +00:00
|
|
|
if (
|
|
|
|
allow_overruns_side == 'both'
|
|
|
|
or slow_side == allow_overruns_side
|
|
|
|
):
|
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
elif (
|
|
|
|
slow_side != allow_overruns_side
|
|
|
|
):
|
|
|
|
|
|
|
|
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
err = excinfo.value
|
|
|
|
|
|
|
|
if (
|
|
|
|
allow_overruns_side == 'none'
|
|
|
|
):
|
|
|
|
# depends on timing is is racy which side will
|
|
|
|
# overrun first :sadkitty:
|
|
|
|
|
|
|
|
# NOTE: i tried to isolate to a deterministic case here
|
|
|
|
# based on timeing, but i was kinda wasted, and i don't
|
|
|
|
# think it's sane to catch them..
|
2024-03-19 22:08:54 +00:00
|
|
|
assert err.boxed_type in (
|
2023-04-13 23:46:35 +00:00
|
|
|
tractor.RemoteActorError,
|
|
|
|
StreamOverrun,
|
|
|
|
)
|
|
|
|
|
|
|
|
elif (
|
|
|
|
slow_side == 'child'
|
|
|
|
):
|
2024-03-19 22:08:54 +00:00
|
|
|
assert err.boxed_type == StreamOverrun
|
2023-04-13 23:46:35 +00:00
|
|
|
|
|
|
|
elif slow_side == 'parent':
|
2024-03-19 22:08:54 +00:00
|
|
|
assert err.boxed_type == tractor.RemoteActorError
|
2024-04-09 12:44:06 +00:00
|
|
|
assert 'StreamOverrun' in err.tb_str
|
|
|
|
assert err.tb_str == err.msgdata['tb_str']
|
2023-04-13 23:46:35 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
# if this hits the logic blocks from above are not
|
|
|
|
# exhaustive..
|
|
|
|
pytest.fail('PARAMETRIZED CASE GEN PROBLEM YO')
|
2024-03-11 14:24:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_ctx_with_self_actor(
|
|
|
|
loglevel: str,
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
|
|
|
'''
|
|
|
|
NOTE: for now this is an INVALID OP!
|
|
|
|
|
|
|
|
BUT, eventually presuming we add a "side" key to `Actor.get_context()`,
|
|
|
|
we might be able to get this working symmetrically, but should we??
|
|
|
|
|
|
|
|
Open a context back to the same actor and ensure all cancellation
|
|
|
|
and error semantics hold the same.
|
|
|
|
|
|
|
|
'''
|
|
|
|
async def main():
|
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
enable_modules=[__name__],
|
|
|
|
) as an:
|
|
|
|
assert an
|
|
|
|
async with (
|
|
|
|
tractor.find_actor('root') as portal,
|
|
|
|
portal.open_context(
|
|
|
|
expect_cancelled,
|
|
|
|
# echo_back_sequence,
|
|
|
|
# seq=seq,
|
|
|
|
# wait_for_cancel=cancel_ctx,
|
|
|
|
# be_slow=(slow_side == 'child'),
|
|
|
|
# allow_overruns_side=allow_overruns_side,
|
|
|
|
|
|
|
|
) as (ctx, sent),
|
|
|
|
ctx.open_stream() as ipc,
|
|
|
|
):
|
|
|
|
assert sent is None
|
|
|
|
|
|
|
|
seq = list(range(10))
|
|
|
|
for i in seq:
|
|
|
|
await ipc.send(i)
|
|
|
|
rx: int = await ipc.receive()
|
|
|
|
assert rx == i
|
|
|
|
|
|
|
|
await ctx.cancel()
|
|
|
|
|
|
|
|
with pytest.raises(RuntimeError) as excinfo:
|
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
assert 'Invalid Operation' in repr(excinfo.value)
|