2023-10-05 23:45:46 +00:00
|
|
|
'''
|
|
|
|
Codify the cancellation request semantics in terms
|
|
|
|
of one remote actor cancelling another.
|
|
|
|
|
|
|
|
'''
|
2023-10-18 17:59:08 +00:00
|
|
|
# from contextlib import asynccontextmanager as acm
|
|
|
|
import itertools
|
2023-10-05 23:45:46 +00:00
|
|
|
|
|
|
|
import pytest
|
|
|
|
import trio
|
|
|
|
import tractor
|
2023-10-18 17:59:08 +00:00
|
|
|
from tractor import ( # typing
|
2024-02-29 19:21:45 +00:00
|
|
|
Actor,
|
|
|
|
current_actor,
|
|
|
|
open_nursery,
|
2023-10-18 17:59:08 +00:00
|
|
|
Portal,
|
|
|
|
Context,
|
2023-10-05 23:45:46 +00:00
|
|
|
ContextCancelled,
|
|
|
|
)
|
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
# XXX TODO cases:
|
|
|
|
# - [ ] peer cancelled itself - so other peers should
|
|
|
|
# get errors reflecting that the peer was itself the .canceller?
|
|
|
|
|
|
|
|
# - [x] WE cancelled the peer and thus should not see any raised
|
|
|
|
# `ContextCancelled` as it should be reaped silently?
|
|
|
|
# => pretty sure `test_context_stream_semantics::test_caller_cancels()`
|
|
|
|
# already covers this case?
|
|
|
|
|
|
|
|
# - [x] INTER-PEER: some arbitrary remote peer cancels via
|
|
|
|
# Portal.cancel_actor().
|
|
|
|
# => all other connected peers should get that cancel requesting peer's
|
|
|
|
# uid in the ctx-cancelled error msg raised in all open ctxs
|
|
|
|
# with that peer.
|
|
|
|
|
|
|
|
# - [ ] PEER-FAILS-BY-CHILD-ERROR: peer spawned a sub-actor which
|
|
|
|
# (also) spawned a failing task which was unhandled and
|
|
|
|
# propagated up to the immediate parent - the peer to the actor
|
|
|
|
# that also spawned a remote task task in that same peer-parent.
|
|
|
|
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
# def test_self_cancel():
|
|
|
|
# '''
|
|
|
|
# 2 cases:
|
|
|
|
# - calls `Actor.cancel()` locally in some task
|
|
|
|
# - calls LocalPortal.cancel_actor()` ?
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
# '''
|
|
|
|
# ...
|
2023-10-05 23:45:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def sleep_forever(
|
2023-10-18 17:59:08 +00:00
|
|
|
ctx: Context,
|
2023-10-23 22:24:20 +00:00
|
|
|
expect_ctxc: bool = False,
|
2023-10-05 23:45:46 +00:00
|
|
|
) -> None:
|
|
|
|
'''
|
|
|
|
Sync the context, open a stream then just sleep.
|
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
Allow checking for (context) cancellation locally.
|
|
|
|
|
2023-10-05 23:45:46 +00:00
|
|
|
'''
|
2023-10-23 22:24:20 +00:00
|
|
|
try:
|
|
|
|
await ctx.started()
|
|
|
|
async with ctx.open_stream():
|
|
|
|
await trio.sleep_forever()
|
|
|
|
|
|
|
|
except BaseException as berr:
|
|
|
|
|
|
|
|
# TODO: it'd sure be nice to be able to inject our own
|
|
|
|
# `ContextCancelled` here instead of of `trio.Cancelled`
|
|
|
|
# so that our runtime can expect it and this "user code"
|
|
|
|
# would be able to tell the diff between a generic trio
|
|
|
|
# cancel and a tractor runtime-IPC cancel.
|
|
|
|
if expect_ctxc:
|
|
|
|
assert isinstance(berr, trio.Cancelled)
|
|
|
|
|
|
|
|
raise
|
2023-10-05 23:45:46 +00:00
|
|
|
|
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
@tractor.context
|
|
|
|
async def error_before_started(
|
|
|
|
ctx: Context,
|
|
|
|
) -> None:
|
2023-10-05 23:45:46 +00:00
|
|
|
'''
|
2023-10-18 17:59:08 +00:00
|
|
|
This simulates exactly an original bug discovered in:
|
|
|
|
https://github.com/pikers/piker/issues/244
|
|
|
|
|
|
|
|
Cancel a context **before** any underlying error is raised so
|
|
|
|
as to trigger a local reception of a ``ContextCancelled`` which
|
|
|
|
SHOULD NOT be re-raised in the local surrounding ``Context``
|
|
|
|
*iff* the cancel was requested by **this** (callee) side of
|
|
|
|
the context.
|
2023-10-05 23:45:46 +00:00
|
|
|
|
|
|
|
'''
|
|
|
|
async with tractor.wait_for_actor('sleeper') as p2:
|
|
|
|
async with (
|
|
|
|
p2.open_context(sleep_forever) as (peer_ctx, first),
|
|
|
|
peer_ctx.open_stream(),
|
|
|
|
):
|
2023-10-18 17:59:08 +00:00
|
|
|
# NOTE: this WAS inside an @acm body but i factored it
|
|
|
|
# out and just put it inline here since i don't think
|
|
|
|
# the mngr part really matters, though maybe it could?
|
2023-10-05 23:45:46 +00:00
|
|
|
try:
|
2023-10-18 17:59:08 +00:00
|
|
|
# XXX NOTE XXX: THIS sends an UNSERIALIZABLE TYPE which
|
|
|
|
# should raise a `TypeError` and **NOT BE SWALLOWED** by
|
|
|
|
# the surrounding try/finally (normally inside the
|
|
|
|
# body of some acm)..
|
|
|
|
await ctx.started(object())
|
|
|
|
# yield
|
2023-10-05 23:45:46 +00:00
|
|
|
finally:
|
|
|
|
# XXX: previously this would trigger local
|
|
|
|
# ``ContextCancelled`` to be received and raised in the
|
|
|
|
# local context overriding any local error due to logic
|
|
|
|
# inside ``_invoke()`` which checked for an error set on
|
|
|
|
# ``Context._error`` and raised it in a cancellation
|
|
|
|
# scenario.
|
|
|
|
# ------
|
|
|
|
# The problem is you can have a remote cancellation that
|
|
|
|
# is part of a local error and we shouldn't raise
|
|
|
|
# ``ContextCancelled`` **iff** we **were not** the side
|
|
|
|
# of the context to initiate it, i.e.
|
|
|
|
# ``Context._cancel_called`` should **NOT** have been
|
|
|
|
# set. The special logic to handle this case is now
|
|
|
|
# inside ``Context._maybe_raise_from_remote_msg()`` XD
|
|
|
|
await peer_ctx.cancel()
|
|
|
|
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
def test_do_not_swallow_error_before_started_by_remote_contextcancelled(
|
|
|
|
debug_mode: bool,
|
|
|
|
):
|
2023-10-05 23:45:46 +00:00
|
|
|
'''
|
|
|
|
Verify that an error raised in a remote context which itself
|
|
|
|
opens YET ANOTHER remote context, which it then cancels, does not
|
|
|
|
override the original error that caused the cancellation of the
|
|
|
|
secondary context.
|
|
|
|
|
|
|
|
'''
|
|
|
|
async def main():
|
2024-02-20 20:14:58 +00:00
|
|
|
async with tractor.open_nursery(
|
|
|
|
debug_mode=debug_mode,
|
|
|
|
) as n:
|
2023-10-05 23:45:46 +00:00
|
|
|
portal = await n.start_actor(
|
|
|
|
'errorer',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
await n.start_actor(
|
|
|
|
'sleeper',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
|
|
|
|
async with (
|
|
|
|
portal.open_context(
|
|
|
|
error_before_started
|
|
|
|
) as (ctx, sent),
|
|
|
|
):
|
|
|
|
await trio.sleep_forever()
|
|
|
|
|
|
|
|
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
|
|
|
trio.run(main)
|
|
|
|
|
|
|
|
assert excinfo.value.type == TypeError
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
2023-10-18 17:59:08 +00:00
|
|
|
async def sleep_a_bit_then_cancel_peer(
|
|
|
|
ctx: Context,
|
|
|
|
peer_name: str = 'sleeper',
|
|
|
|
cancel_after: float = .5,
|
|
|
|
|
2023-10-05 23:45:46 +00:00
|
|
|
) -> None:
|
2023-10-18 17:59:08 +00:00
|
|
|
'''
|
|
|
|
Connect to peer, sleep as per input delay, cancel the peer.
|
|
|
|
|
|
|
|
'''
|
|
|
|
peer: Portal
|
|
|
|
async with tractor.wait_for_actor(peer_name) as peer:
|
2023-10-05 23:45:46 +00:00
|
|
|
await ctx.started()
|
2023-10-18 17:59:08 +00:00
|
|
|
await trio.sleep(cancel_after)
|
|
|
|
await peer.cancel_actor()
|
2023-10-05 23:45:46 +00:00
|
|
|
|
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
@tractor.context
|
|
|
|
async def stream_ints(
|
|
|
|
ctx: Context,
|
|
|
|
):
|
|
|
|
await ctx.started()
|
|
|
|
async with ctx.open_stream() as stream:
|
|
|
|
for i in itertools.count():
|
|
|
|
await stream.send(i)
|
2023-10-23 22:24:20 +00:00
|
|
|
await trio.sleep(0.01)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
@tractor.context
|
|
|
|
async def stream_from_peer(
|
|
|
|
ctx: Context,
|
|
|
|
peer_name: str = 'sleeper',
|
|
|
|
) -> None:
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
peer: Portal
|
|
|
|
try:
|
|
|
|
async with (
|
|
|
|
tractor.wait_for_actor(peer_name) as peer,
|
2023-10-25 19:21:41 +00:00
|
|
|
peer.open_context(stream_ints) as (peer_ctx, first),
|
|
|
|
peer_ctx.open_stream() as stream,
|
2023-10-18 17:59:08 +00:00
|
|
|
):
|
2023-10-25 19:21:41 +00:00
|
|
|
await ctx.started()
|
|
|
|
# XXX QUESTIONS & TODO: for further details around this
|
|
|
|
# in the longer run..
|
|
|
|
# https://github.com/goodboy/tractor/issues/368
|
|
|
|
# - should we raise `ContextCancelled` or `Cancelled` (rn
|
|
|
|
# it does latter) and should/could it be implemented
|
|
|
|
# as a general injection override for `trio` such
|
|
|
|
# that ANY next checkpoint would raise the "cancel
|
|
|
|
# error type" of choice?
|
|
|
|
# - should the `ContextCancelled` bubble from
|
|
|
|
# all `Context` and `MsgStream` apis wherein it
|
|
|
|
# prolly makes the most sense to make it
|
|
|
|
# a `trio.Cancelled` subtype?
|
|
|
|
# - what about IPC-transport specific errors, should
|
|
|
|
# they bubble from the async for and trigger
|
|
|
|
# other special cases?
|
2024-03-06 15:13:41 +00:00
|
|
|
#
|
2023-10-25 19:21:41 +00:00
|
|
|
# NOTE: current ctl flow:
|
|
|
|
# - stream raises `trio.EndOfChannel` and
|
|
|
|
# exits the loop
|
2024-03-06 15:13:41 +00:00
|
|
|
# - `.open_context()` will raise the ctxc received
|
|
|
|
# from the sleeper.
|
2023-10-25 19:21:41 +00:00
|
|
|
async for msg in stream:
|
|
|
|
assert msg is not None
|
|
|
|
print(msg)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# NOTE: cancellation of the (sleeper) peer should always
|
|
|
|
# cause a `ContextCancelled` raise in this streaming
|
|
|
|
# actor.
|
2024-02-20 20:14:58 +00:00
|
|
|
except ContextCancelled as ctxc:
|
|
|
|
ctxerr = ctxc
|
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
assert peer_ctx._remote_error is ctxerr
|
2024-02-20 20:14:58 +00:00
|
|
|
assert peer_ctx._remote_error.msgdata == ctxerr.msgdata
|
2024-03-08 02:26:57 +00:00
|
|
|
|
|
|
|
# the peer ctx is the canceller even though it's canceller
|
|
|
|
# is the "canceller" XD
|
|
|
|
assert peer_name in peer_ctx.canceller
|
|
|
|
|
|
|
|
assert "canceller" in ctxerr.canceller
|
2023-10-23 22:24:20 +00:00
|
|
|
|
|
|
|
# caller peer should not be the cancel requester
|
|
|
|
assert not ctx.cancel_called
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not ctx.cancel_acked
|
2024-02-20 20:14:58 +00:00
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# XXX can NEVER BE TRUE since `._invoke` only
|
2023-10-23 22:24:20 +00:00
|
|
|
# sets this AFTER the nursery block this task
|
|
|
|
# was started in, exits.
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not ctx._scope.cancelled_caught
|
2023-10-23 22:24:20 +00:00
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# we never requested cancellation, it was the 'canceller'
|
|
|
|
# peer.
|
2023-10-23 22:24:20 +00:00
|
|
|
assert not peer_ctx.cancel_called
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not peer_ctx.cancel_acked
|
|
|
|
|
2023-10-25 19:21:41 +00:00
|
|
|
# the `.open_context()` exit definitely caught
|
|
|
|
# a cancellation in the internal `Context._scope` since
|
|
|
|
# likely the runtime called `_deliver_msg()` after
|
|
|
|
# receiving the remote error from the streaming task.
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not peer_ctx._scope.cancelled_caught
|
2023-10-23 22:24:20 +00:00
|
|
|
|
|
|
|
# TODO / NOTE `.canceller` won't have been set yet
|
|
|
|
# here because that machinery is inside
|
|
|
|
# `.open_context().__aexit__()` BUT, if we had
|
|
|
|
# a way to know immediately (from the last
|
|
|
|
# checkpoint) that cancellation was due to
|
|
|
|
# a remote, we COULD assert this here..see,
|
|
|
|
# https://github.com/goodboy/tractor/issues/368
|
2024-02-29 19:21:45 +00:00
|
|
|
#
|
|
|
|
# assert 'canceller' in ctx.canceller
|
2023-10-23 22:24:20 +00:00
|
|
|
|
|
|
|
# root/parent actor task should NEVER HAVE cancelled us!
|
|
|
|
assert not ctx.canceller
|
|
|
|
|
2023-10-25 19:21:41 +00:00
|
|
|
raise
|
2023-10-23 22:24:20 +00:00
|
|
|
# TODO: IN THEORY we could have other cases depending on
|
2023-10-25 19:21:41 +00:00
|
|
|
# who cancels first, the root actor or the canceller peer?.
|
2023-10-23 22:24:20 +00:00
|
|
|
#
|
|
|
|
# 1- when the peer request is first then the `.canceller`
|
|
|
|
# field should obvi be set to the 'canceller' uid,
|
|
|
|
#
|
|
|
|
# 2-if the root DOES req cancel then we should see the same
|
|
|
|
# `trio.Cancelled` implicitly raised
|
|
|
|
# assert ctx.canceller[0] == 'root'
|
|
|
|
# assert peer_ctx.canceller[0] == 'sleeper'
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
raise RuntimeError('Never triggered local `ContextCancelled` ?!?')
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2023-10-25 19:21:41 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'error_during_ctxerr_handling',
|
|
|
|
[False, True],
|
2024-03-06 21:07:30 +00:00
|
|
|
ids=lambda item: f'rte_during_ctxerr={item}',
|
2023-10-18 17:59:08 +00:00
|
|
|
)
|
|
|
|
def test_peer_canceller(
|
|
|
|
error_during_ctxerr_handling: bool,
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode: bool,
|
2023-10-18 17:59:08 +00:00
|
|
|
):
|
|
|
|
'''
|
|
|
|
Verify that a cancellation triggered by an in-actor-tree peer
|
|
|
|
results in a cancelled errors with all other actors which have
|
|
|
|
opened contexts to that same actor.
|
|
|
|
|
|
|
|
legend:
|
|
|
|
name>
|
|
|
|
a "play button" that indicates a new runtime instance,
|
|
|
|
an individual actor with `name`.
|
|
|
|
|
|
|
|
.subname>
|
|
|
|
a subactor who's parent should be on some previous
|
|
|
|
line and be less indented.
|
|
|
|
|
|
|
|
.actor0> ()-> .actor1>
|
2023-10-23 22:24:20 +00:00
|
|
|
a inter-actor task context opened (by `async with
|
|
|
|
`Portal.open_context()`) from actor0 *into* actor1.
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
.actor0> ()<=> .actor1>
|
|
|
|
a inter-actor task context opened (as above)
|
|
|
|
from actor0 *into* actor1 which INCLUDES an additional
|
|
|
|
stream open using `async with Context.open_stream()`.
|
|
|
|
|
|
|
|
|
|
|
|
------ - ------
|
|
|
|
supervision view
|
|
|
|
------ - ------
|
|
|
|
root>
|
|
|
|
.sleeper> TODO: SOME SYNTAX SHOWING JUST SLEEPING
|
|
|
|
.just_caller> ()=> .sleeper>
|
|
|
|
.canceller> ()-> .sleeper>
|
|
|
|
TODO: how define calling `Portal.cancel_actor()`
|
|
|
|
|
|
|
|
In this case a `ContextCancelled` with `.errorer` set to the
|
|
|
|
requesting actor, in this case 'canceller', should be relayed
|
|
|
|
to all other actors who have also opened a (remote task)
|
|
|
|
context with that now cancelled actor.
|
|
|
|
|
|
|
|
------ - ------
|
|
|
|
task view
|
|
|
|
------ - ------
|
|
|
|
So there are 5 context open in total with 3 from the root to
|
|
|
|
its children and 2 from children to their peers:
|
|
|
|
1. root> ()-> .sleeper>
|
|
|
|
2. root> ()-> .streamer>
|
|
|
|
3. root> ()-> .canceller>
|
|
|
|
|
|
|
|
4. .streamer> ()<=> .sleep>
|
|
|
|
5. .canceller> ()-> .sleeper>
|
|
|
|
- calls `Portal.cancel_actor()`
|
2023-10-05 23:45:46 +00:00
|
|
|
|
|
|
|
'''
|
|
|
|
async def main():
|
2023-10-23 22:24:20 +00:00
|
|
|
async with tractor.open_nursery(
|
2023-10-25 19:21:41 +00:00
|
|
|
# NOTE: to halt the peer tasks on ctxc, uncomment this.
|
2024-02-20 20:14:58 +00:00
|
|
|
debug_mode=debug_mode,
|
2023-10-23 22:24:20 +00:00
|
|
|
) as an:
|
2023-10-18 17:59:08 +00:00
|
|
|
canceller: Portal = await an.start_actor(
|
2023-10-05 23:45:46 +00:00
|
|
|
'canceller',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
sleeper: Portal = await an.start_actor(
|
2023-10-05 23:45:46 +00:00
|
|
|
'sleeper',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
just_caller: Portal = await an.start_actor(
|
|
|
|
'just_caller', # but i just met her?
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
2024-02-29 19:21:45 +00:00
|
|
|
root: Actor = current_actor()
|
2023-10-23 22:24:20 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
try:
|
|
|
|
async with (
|
|
|
|
sleeper.open_context(
|
|
|
|
sleep_forever,
|
2023-10-23 22:24:20 +00:00
|
|
|
expect_ctxc=True,
|
2023-10-18 17:59:08 +00:00
|
|
|
) as (sleeper_ctx, sent),
|
|
|
|
|
|
|
|
just_caller.open_context(
|
|
|
|
stream_from_peer,
|
|
|
|
) as (caller_ctx, sent),
|
|
|
|
|
|
|
|
canceller.open_context(
|
|
|
|
sleep_a_bit_then_cancel_peer,
|
|
|
|
) as (canceller_ctx, sent),
|
|
|
|
|
|
|
|
):
|
2024-03-06 15:13:41 +00:00
|
|
|
ctxs: dict[str, Context] = {
|
|
|
|
'sleeper': sleeper_ctx,
|
|
|
|
'caller': caller_ctx,
|
|
|
|
'canceller': canceller_ctx,
|
|
|
|
}
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
print('PRE CONTEXT RESULT')
|
2024-02-20 20:14:58 +00:00
|
|
|
res = await sleeper_ctx.result()
|
|
|
|
assert res
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# should never get here
|
|
|
|
pytest.fail(
|
|
|
|
'Context.result() did not raise ctx-cancelled?'
|
|
|
|
)
|
|
|
|
|
2023-10-25 19:21:41 +00:00
|
|
|
# should always raise since this root task does
|
|
|
|
# not request the sleeper cancellation ;)
|
2023-10-18 17:59:08 +00:00
|
|
|
except ContextCancelled as ctxerr:
|
2024-02-20 20:14:58 +00:00
|
|
|
print(
|
2024-02-29 19:21:45 +00:00
|
|
|
'CAUGHT REMOTE CONTEXT CANCEL\n\n'
|
|
|
|
f'{ctxerr}\n'
|
2024-02-20 20:14:58 +00:00
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# canceller and caller peers should not
|
|
|
|
# have been remotely cancelled.
|
2023-10-23 22:24:20 +00:00
|
|
|
assert canceller_ctx.canceller is None
|
|
|
|
assert caller_ctx.canceller is None
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# we were not the actor, our peer was
|
|
|
|
assert not sleeper_ctx.cancel_acked
|
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
assert ctxerr.canceller[0] == 'canceller'
|
|
|
|
|
|
|
|
# XXX NOTE XXX: since THIS `ContextCancelled`
|
|
|
|
# HAS NOT YET bubbled up to the
|
|
|
|
# `sleeper.open_context().__aexit__()` this
|
|
|
|
# value is not yet set, however outside this
|
|
|
|
# block it should be.
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not sleeper_ctx._scope.cancelled_caught
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# CASE_1: error-during-ctxc-handling,
|
2023-10-18 17:59:08 +00:00
|
|
|
if error_during_ctxerr_handling:
|
|
|
|
raise RuntimeError('Simulated error during teardown')
|
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# CASE_2: standard teardown inside in `.open_context()` block
|
2023-10-18 17:59:08 +00:00
|
|
|
raise
|
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
# XXX SHOULD NEVER EVER GET HERE XXX
|
|
|
|
except BaseException as berr:
|
2024-02-20 20:14:58 +00:00
|
|
|
raise
|
|
|
|
|
|
|
|
# XXX if needed to debug failure
|
|
|
|
# _err = berr
|
|
|
|
# await tractor.pause()
|
|
|
|
# await trio.sleep_forever()
|
|
|
|
|
|
|
|
pytest.fail(
|
|
|
|
'did not rx ctxc ?!?\n\n'
|
|
|
|
|
|
|
|
f'{berr}\n'
|
|
|
|
)
|
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
else:
|
2024-02-20 20:14:58 +00:00
|
|
|
pytest.fail(
|
|
|
|
'did not rx ctxc ?!?\n\n'
|
|
|
|
f'{ctxs}\n'
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
except (
|
|
|
|
ContextCancelled,
|
|
|
|
RuntimeError,
|
2024-02-20 20:14:58 +00:00
|
|
|
)as loc_err:
|
|
|
|
_loc_err = loc_err
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
# NOTE: the main state to check on `Context` is:
|
|
|
|
# - `.cancel_called` (bool of whether this side
|
|
|
|
# requested)
|
2024-02-29 19:21:45 +00:00
|
|
|
# - `.cancel_acked` (bool of whether a ctxc
|
|
|
|
# response was received due to cancel req).
|
|
|
|
# - `.maybe_error` (highest prio error to raise
|
|
|
|
# locally)
|
|
|
|
# - `.outcome` (final error or result value)
|
2023-10-23 22:24:20 +00:00
|
|
|
# - `.canceller` (uid of cancel-causing actor-task)
|
|
|
|
# - `._remote_error` (any `RemoteActorError`
|
|
|
|
# instance from other side of context)
|
2024-02-29 19:21:45 +00:00
|
|
|
# - `._local_error` (any error caught inside the
|
|
|
|
# `.open_context()` block).
|
|
|
|
#
|
|
|
|
# XXX: Deprecated and internal only
|
|
|
|
# - `.cancelled_caught` (maps to nursery cs)
|
|
|
|
# - now just use `._scope.cancelled_caught`
|
|
|
|
# since it maps to the internal (maps to nursery cs)
|
|
|
|
#
|
2023-10-25 19:21:41 +00:00
|
|
|
# TODO: are we really planning to use this tho?
|
2023-10-23 22:24:20 +00:00
|
|
|
# - `._cancel_msg` (any msg that caused the
|
|
|
|
# cancel)
|
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# CASE_1: error-during-ctxc-handling,
|
|
|
|
# - far end cancels due to peer 'canceller',
|
|
|
|
# - `ContextCancelled` relayed to this scope,
|
|
|
|
# - inside `.open_context()` ctxc is caught and
|
|
|
|
# a rte raised instead
|
|
|
|
#
|
|
|
|
# => block should raise the rte but all peers
|
|
|
|
# should be cancelled by US.
|
|
|
|
#
|
2023-10-18 17:59:08 +00:00
|
|
|
if error_during_ctxerr_handling:
|
2024-03-06 21:07:30 +00:00
|
|
|
# since we do a rte reraise above, the
|
|
|
|
# `.open_context()` error handling should have
|
|
|
|
# raised a local rte, thus the internal
|
|
|
|
# `.open_context()` enterer task's
|
|
|
|
# cancel-scope should have raised the RTE, NOT
|
|
|
|
# a `trio.Cancelled` due to a local
|
|
|
|
# `._scope.cancel()` call.
|
|
|
|
assert not sleeper_ctx._scope.cancelled_caught
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
assert isinstance(loc_err, RuntimeError)
|
2024-02-29 19:21:45 +00:00
|
|
|
print(f'_loc_err: {_loc_err}\n')
|
|
|
|
# assert sleeper_ctx._local_error is _loc_err
|
|
|
|
# assert sleeper_ctx._local_error is _loc_err
|
|
|
|
assert not (
|
|
|
|
loc_err
|
|
|
|
is sleeper_ctx.maybe_error
|
|
|
|
is sleeper_ctx.outcome
|
|
|
|
is sleeper_ctx._remote_error
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# NOTE: this root actor task should have
|
|
|
|
# called `Context.cancel()` on the
|
|
|
|
# `.__aexit__()` to every opened ctx.
|
2024-03-06 15:13:41 +00:00
|
|
|
for name, ctx in ctxs.items():
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
# this root actor task should have
|
|
|
|
# cancelled all opened contexts except the
|
|
|
|
# sleeper which is obvi by the "canceller"
|
|
|
|
# peer.
|
|
|
|
re = ctx._remote_error
|
2024-03-06 15:13:41 +00:00
|
|
|
le = ctx._local_error
|
|
|
|
|
|
|
|
assert ctx.cancel_called
|
|
|
|
|
2024-03-08 02:26:57 +00:00
|
|
|
if ctx is sleeper_ctx:
|
|
|
|
assert 'canceller' in re.canceller
|
|
|
|
assert 'sleeper' in ctx.canceller
|
|
|
|
|
|
|
|
if ctx is canceller_ctx:
|
2023-10-25 19:21:41 +00:00
|
|
|
assert (
|
|
|
|
re.canceller
|
|
|
|
==
|
2024-03-08 02:26:57 +00:00
|
|
|
root.uid
|
2023-10-25 19:21:41 +00:00
|
|
|
)
|
2023-10-23 22:24:20 +00:00
|
|
|
|
2024-03-08 02:26:57 +00:00
|
|
|
else: # the other 2 ctxs
|
2023-10-25 19:21:41 +00:00
|
|
|
assert (
|
|
|
|
re.canceller
|
|
|
|
==
|
2024-03-08 02:26:57 +00:00
|
|
|
canceller.channel.uid
|
2023-10-25 19:21:41 +00:00
|
|
|
)
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# since the sleeper errors while handling a
|
|
|
|
# peer-cancelled (by ctxc) scenario, we expect
|
|
|
|
# that the `.open_context()` block DOES call
|
|
|
|
# `.cancel() (despite in this test case it
|
|
|
|
# being unecessary).
|
|
|
|
assert (
|
|
|
|
sleeper_ctx.cancel_called
|
|
|
|
and
|
|
|
|
not sleeper_ctx.cancel_acked
|
|
|
|
)
|
|
|
|
|
|
|
|
# CASE_2: standard teardown inside in `.open_context()` block
|
|
|
|
# - far end cancels due to peer 'canceller',
|
|
|
|
# - `ContextCancelled` relayed to this scope and
|
|
|
|
# raised locally without any raise-during-handle,
|
|
|
|
#
|
|
|
|
# => inside `.open_context()` ctxc is raised and
|
|
|
|
# propagated
|
|
|
|
#
|
2023-10-05 23:45:46 +00:00
|
|
|
else:
|
2024-03-06 21:07:30 +00:00
|
|
|
# since sleeper_ctx.result() IS called above
|
|
|
|
# we should have (silently) absorbed the
|
|
|
|
# corresponding `ContextCancelled` for it and
|
|
|
|
# `._scope.cancel()` should never have been
|
|
|
|
# called.
|
|
|
|
assert not sleeper_ctx._scope.cancelled_caught
|
|
|
|
|
2024-02-20 20:14:58 +00:00
|
|
|
assert isinstance(loc_err, ContextCancelled)
|
2024-03-08 02:26:57 +00:00
|
|
|
|
|
|
|
# the received remote error's `.canceller`
|
|
|
|
# will of course be the "canceller" actor BUT
|
|
|
|
# the canceller set on the local handle to
|
|
|
|
# `sleeper_ctx` will be the "sleeper" uid
|
|
|
|
# since it's the actor that relayed us the
|
|
|
|
# error which was **caused** by the
|
|
|
|
# "canceller".
|
|
|
|
assert 'sleeper' in sleeper_ctx.canceller
|
|
|
|
assert 'canceller' == loc_err.canceller[0]
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# the sleeper's remote error is the error bubbled
|
|
|
|
# out of the context-stack above!
|
2024-03-06 15:13:41 +00:00
|
|
|
final_err = sleeper_ctx.outcome
|
2024-02-29 19:21:45 +00:00
|
|
|
assert (
|
2024-03-06 15:13:41 +00:00
|
|
|
final_err is loc_err
|
2024-02-29 19:21:45 +00:00
|
|
|
is sleeper_ctx.maybe_error
|
|
|
|
is sleeper_ctx._remote_error
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2024-03-06 15:13:41 +00:00
|
|
|
for name, ctx in ctxs.items():
|
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
re: BaseException|None = ctx._remote_error
|
2024-03-06 15:13:41 +00:00
|
|
|
le: BaseException|None = ctx._local_error
|
|
|
|
err = ctx.maybe_error
|
|
|
|
out = ctx.outcome
|
|
|
|
|
|
|
|
# every ctx should error!
|
|
|
|
assert out is err
|
|
|
|
|
|
|
|
# the recorded local erro should always be
|
|
|
|
# the same as the one raised by the
|
|
|
|
# `sleeper_ctx.result()` call
|
2024-02-29 19:21:45 +00:00
|
|
|
assert (
|
|
|
|
le
|
2024-03-06 15:13:41 +00:00
|
|
|
and
|
|
|
|
le is loc_err
|
2024-02-29 19:21:45 +00:00
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
# root doesn't cancel sleeper since it's
|
|
|
|
# cancelled by its peer.
|
2023-10-18 17:59:08 +00:00
|
|
|
if ctx is sleeper_ctx:
|
2024-03-06 15:13:41 +00:00
|
|
|
assert re
|
|
|
|
assert (
|
|
|
|
ctx._remote_error
|
|
|
|
is ctx.maybe_error
|
|
|
|
is ctx.outcome
|
|
|
|
is ctx._local_error
|
|
|
|
)
|
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
assert not ctx.cancel_called
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not ctx.cancel_acked
|
|
|
|
|
2023-10-25 19:21:41 +00:00
|
|
|
# since sleeper_ctx.result() IS called
|
|
|
|
# above we should have (silently)
|
|
|
|
# absorbed the corresponding
|
|
|
|
# `ContextCancelled` for it and thus
|
|
|
|
# the logic inside `.cancelled_caught`
|
|
|
|
# should trigger!
|
2024-03-06 15:13:41 +00:00
|
|
|
assert not ctx._scope.cancelled_caught
|
|
|
|
|
|
|
|
elif ctx in (
|
|
|
|
caller_ctx,
|
|
|
|
canceller_ctx,
|
|
|
|
):
|
|
|
|
|
|
|
|
assert not ctx._remote_error
|
2023-10-23 22:24:20 +00:00
|
|
|
|
2024-03-06 21:07:30 +00:00
|
|
|
# neither of the `caller/canceller_ctx` should
|
|
|
|
# have called `ctx.cancel()` bc the
|
|
|
|
# canceller's task internally issues
|
|
|
|
# a `Portal.cancel_actor()` to the
|
|
|
|
# sleeper and thus never should call
|
|
|
|
# `ctx.cancel()` per say UNLESS the
|
|
|
|
# sleeper's `.result()` call above
|
|
|
|
# ctxc exception results in the
|
|
|
|
# canceller's
|
|
|
|
# `.open_context().__aexit__()` error
|
|
|
|
# handling to kick in BEFORE a remote
|
|
|
|
# error is delivered - which since
|
|
|
|
# we're asserting what we are above,
|
|
|
|
# that should normally be the case
|
|
|
|
# right?
|
2024-03-06 15:13:41 +00:00
|
|
|
#
|
|
|
|
assert not ctx.cancel_called
|
2024-03-06 21:07:30 +00:00
|
|
|
#
|
|
|
|
# assert ctx.cancel_called
|
|
|
|
# orig ^
|
2023-10-23 22:24:20 +00:00
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
# TODO: figure out the details of this..?
|
2023-10-25 19:21:41 +00:00
|
|
|
# if you look the `._local_error` here
|
|
|
|
# is a multi of ctxc + 2 Cancelleds?
|
2024-03-06 21:07:30 +00:00
|
|
|
# assert not ctx._scope.cancelled_caught
|
2023-10-25 19:21:41 +00:00
|
|
|
|
2024-03-06 15:13:41 +00:00
|
|
|
assert (
|
|
|
|
not ctx.cancel_called
|
|
|
|
and not ctx.cancel_acked
|
|
|
|
)
|
|
|
|
assert not ctx._scope.cancelled_caught
|
|
|
|
|
|
|
|
# elif ctx is canceller_ctx:
|
|
|
|
# assert not ctx._remote_error
|
2024-02-29 19:21:45 +00:00
|
|
|
|
|
|
|
# XXX NOTE XXX: ONLY the canceller
|
|
|
|
# will get a self-cancelled outcome
|
|
|
|
# whilst everyone else gets
|
|
|
|
# a peer-caused cancellation!
|
|
|
|
#
|
|
|
|
# TODO: really we should avoid calling
|
|
|
|
# .cancel() whenever an interpeer
|
|
|
|
# cancel takes place since each
|
|
|
|
# reception of a ctxc
|
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
else:
|
2024-02-29 19:21:45 +00:00
|
|
|
pytest.fail(
|
|
|
|
'Uhh wut ctx is this?\n'
|
|
|
|
f'{ctx}\n'
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
2023-10-23 22:24:20 +00:00
|
|
|
# TODO: do we even need this flag?
|
|
|
|
# -> each context should have received
|
2023-10-18 17:59:08 +00:00
|
|
|
# a silently absorbed context cancellation
|
2023-10-23 22:24:20 +00:00
|
|
|
# in its remote nursery scope.
|
|
|
|
# assert ctx.chan.uid == ctx.canceller
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# NOTE: when an inter-peer cancellation
|
|
|
|
# occurred, we DO NOT expect this
|
|
|
|
# root-actor-task to have requested a cancel of
|
|
|
|
# the context since cancellation was caused by
|
|
|
|
# the "canceller" peer and thus
|
|
|
|
# `Context.cancel()` SHOULD NOT have been
|
|
|
|
# called inside
|
|
|
|
# `Portal.open_context().__aexit__()`.
|
2024-02-29 19:21:45 +00:00
|
|
|
assert not (
|
|
|
|
sleeper_ctx.cancel_called
|
|
|
|
or
|
|
|
|
sleeper_ctx.cancel_acked
|
|
|
|
)
|
2023-10-18 17:59:08 +00:00
|
|
|
|
|
|
|
# XXX NOTE XXX: and see matching comment above but,
|
2024-02-29 19:21:45 +00:00
|
|
|
# the `._scope` is only set by `trio` AFTER the
|
|
|
|
# `.open_context()` block has exited and should be
|
|
|
|
# set in both outcomes including the case where
|
|
|
|
# ctx-cancel handling itself errors.
|
2024-03-06 15:13:41 +00:00
|
|
|
assert not sleeper_ctx._scope.cancelled_caught
|
2024-02-29 19:21:45 +00:00
|
|
|
assert _loc_err is sleeper_ctx._local_error
|
|
|
|
assert (
|
|
|
|
sleeper_ctx.outcome
|
|
|
|
is sleeper_ctx.maybe_error
|
|
|
|
is sleeper_ctx._remote_error
|
|
|
|
)
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
raise # always to ensure teardown
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
if error_during_ctxerr_handling:
|
|
|
|
with pytest.raises(RuntimeError) as excinfo:
|
|
|
|
trio.run(main)
|
|
|
|
else:
|
|
|
|
|
|
|
|
with pytest.raises(ContextCancelled) as excinfo:
|
|
|
|
trio.run(main)
|
2023-10-05 23:45:46 +00:00
|
|
|
|
2023-10-18 17:59:08 +00:00
|
|
|
assert excinfo.value.type == ContextCancelled
|
|
|
|
assert excinfo.value.canceller[0] == 'canceller'
|
2024-02-20 20:14:58 +00:00
|
|
|
|
|
|
|
|
2024-02-29 19:21:45 +00:00
|
|
|
@tractor.context
|
|
|
|
async def basic_echo_server(
|
|
|
|
ctx: Context,
|
|
|
|
peer_name: str = 'stepbro',
|
|
|
|
|
|
|
|
) -> None:
|
|
|
|
'''
|
|
|
|
Just the simplest `MsgStream` echo server which resays what
|
|
|
|
you told it but with its uid in front ;)
|
|
|
|
|
|
|
|
'''
|
|
|
|
actor: Actor = tractor.current_actor()
|
|
|
|
uid: tuple = actor.uid
|
|
|
|
await ctx.started(uid)
|
|
|
|
async with ctx.open_stream() as ipc:
|
|
|
|
async for msg in ipc:
|
|
|
|
|
|
|
|
# repack msg pair with our uid
|
|
|
|
# as first element.
|
|
|
|
(
|
|
|
|
client_uid,
|
|
|
|
i,
|
|
|
|
) = msg
|
|
|
|
resp: tuple = (
|
|
|
|
uid,
|
|
|
|
i,
|
|
|
|
)
|
|
|
|
# OOF! looks like my runtime-error is causing a lockup
|
|
|
|
# assert 0
|
|
|
|
await ipc.send(resp)
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def serve_subactors(
|
|
|
|
ctx: Context,
|
|
|
|
peer_name: str,
|
|
|
|
|
|
|
|
) -> None:
|
|
|
|
async with open_nursery() as an:
|
|
|
|
await ctx.started(peer_name)
|
|
|
|
async with ctx.open_stream() as reqs:
|
|
|
|
async for msg in reqs:
|
|
|
|
peer_name: str = msg
|
|
|
|
peer: Portal = await an.start_actor(
|
|
|
|
name=peer_name,
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
print(
|
|
|
|
'Spawning new subactor\n'
|
|
|
|
f'{peer_name}\n'
|
|
|
|
f'|_{peer}\n'
|
|
|
|
)
|
|
|
|
await reqs.send((
|
|
|
|
peer.chan.uid,
|
|
|
|
peer.chan.raddr,
|
|
|
|
))
|
|
|
|
|
|
|
|
print('Spawner exiting spawn serve loop!')
|
|
|
|
|
|
|
|
|
|
|
|
@tractor.context
|
|
|
|
async def client_req_subactor(
|
|
|
|
ctx: Context,
|
|
|
|
peer_name: str,
|
|
|
|
|
|
|
|
# used to simulate a user causing an error to be raised
|
|
|
|
# directly in thread (like a KBI) to better replicate the
|
|
|
|
# case where a `modden` CLI client would hang afer requesting
|
|
|
|
# a `Context.cancel()` to `bigd`'s wks spawner.
|
|
|
|
reraise_on_cancel: str|None = None,
|
|
|
|
|
|
|
|
) -> None:
|
|
|
|
# TODO: other cases to do with sub lifetimes:
|
|
|
|
# -[ ] test that we can have the server spawn a sub
|
|
|
|
# that lives longer then ctx with this client.
|
|
|
|
# -[ ] test that
|
|
|
|
|
|
|
|
# open ctx with peer spawn server and ask it to spawn a little
|
|
|
|
# bro which we'll then connect and stream with.
|
|
|
|
async with (
|
|
|
|
tractor.find_actor(
|
|
|
|
name='spawn_server',
|
|
|
|
raise_on_none=True,
|
|
|
|
|
|
|
|
# TODO: we should be isolating this from other runs!
|
|
|
|
# => ideally so we can eventually use something like
|
|
|
|
# `pytest-xdist` Bo
|
|
|
|
# registry_addrs=bigd._reg_addrs,
|
|
|
|
) as spawner,
|
|
|
|
|
|
|
|
spawner.open_context(
|
|
|
|
serve_subactors,
|
|
|
|
peer_name=peer_name,
|
|
|
|
) as (spawner_ctx, first),
|
|
|
|
):
|
|
|
|
assert first == peer_name
|
|
|
|
await ctx.started(
|
|
|
|
'yup i had brudder',
|
|
|
|
)
|
|
|
|
|
|
|
|
async with spawner_ctx.open_stream() as reqs:
|
|
|
|
|
|
|
|
# send single spawn request to the server
|
|
|
|
await reqs.send(peer_name)
|
|
|
|
with trio.fail_after(3):
|
|
|
|
(
|
|
|
|
sub_uid,
|
|
|
|
sub_raddr,
|
|
|
|
) = await reqs.receive()
|
|
|
|
|
|
|
|
|
|
|
|
await tell_little_bro(
|
|
|
|
actor_name=sub_uid[0],
|
|
|
|
caller='client',
|
|
|
|
)
|
|
|
|
|
|
|
|
# TODO: test different scope-layers of
|
|
|
|
# cancellation?
|
|
|
|
# with trio.CancelScope() as cs:
|
|
|
|
try:
|
|
|
|
await trio.sleep_forever()
|
|
|
|
|
|
|
|
# TODO: would be super nice to have a special injected
|
|
|
|
# cancel type here (maybe just our ctxc) but using
|
|
|
|
# some native mechanism in `trio` :p
|
|
|
|
except (
|
|
|
|
trio.Cancelled
|
|
|
|
) as err:
|
|
|
|
_err = err
|
|
|
|
if reraise_on_cancel:
|
|
|
|
errtype = globals()['__builtins__'][reraise_on_cancel]
|
|
|
|
assert errtype
|
|
|
|
to_reraise: BaseException = errtype()
|
|
|
|
print(f'client re-raising on cancel: {repr(to_reraise)}')
|
|
|
|
raise err
|
|
|
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
# if cs.cancelled_caught:
|
|
|
|
# print('client handling expected KBI!')
|
|
|
|
# await ctx.
|
|
|
|
# await trio.sleep(
|
|
|
|
# await tractor.pause()
|
|
|
|
# await spawner_ctx.cancel()
|
|
|
|
|
|
|
|
# cancel spawned sub-actor directly?
|
|
|
|
# await sub_ctx.cancel()
|
|
|
|
|
|
|
|
# maybe cancel runtime?
|
|
|
|
# await sub.cancel_actor()
|
|
|
|
|
|
|
|
|
|
|
|
async def tell_little_bro(
|
|
|
|
actor_name: str,
|
|
|
|
caller: str = ''
|
|
|
|
):
|
|
|
|
# contact target actor, do a stream dialog.
|
|
|
|
async with (
|
|
|
|
tractor.wait_for_actor(
|
|
|
|
name=actor_name
|
|
|
|
) as lb,
|
|
|
|
lb.open_context(
|
|
|
|
basic_echo_server,
|
|
|
|
) as (sub_ctx, first),
|
|
|
|
sub_ctx.open_stream(
|
|
|
|
basic_echo_server,
|
|
|
|
) as echo_ipc,
|
|
|
|
):
|
|
|
|
actor: Actor = current_actor()
|
|
|
|
uid: tuple = actor.uid
|
|
|
|
for i in range(100):
|
|
|
|
msg: tuple = (
|
|
|
|
uid,
|
|
|
|
i,
|
|
|
|
)
|
|
|
|
await echo_ipc.send(msg)
|
|
|
|
resp = await echo_ipc.receive()
|
|
|
|
print(
|
|
|
|
f'{caller} => {actor_name}: {msg}\n'
|
|
|
|
f'{caller} <= {actor_name}: {resp}\n'
|
|
|
|
)
|
|
|
|
(
|
|
|
|
sub_uid,
|
|
|
|
_i,
|
|
|
|
) = resp
|
|
|
|
assert sub_uid != uid
|
|
|
|
assert _i == i
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'raise_client_error',
|
|
|
|
[None, 'KeyboardInterrupt'],
|
|
|
|
)
|
|
|
|
def test_peer_spawns_and_cancels_service_subactor(
|
|
|
|
debug_mode: bool,
|
|
|
|
raise_client_error: str,
|
2024-03-08 20:34:20 +00:00
|
|
|
reg_addr: tuple[str, int],
|
2024-02-29 19:21:45 +00:00
|
|
|
):
|
|
|
|
# NOTE: this tests for the modden `mod wks open piker` bug
|
|
|
|
# discovered as part of implementing workspace ctx
|
|
|
|
# open-.pause()-ctx.cancel() as part of the CLI..
|
|
|
|
|
|
|
|
# -> start actor-tree (server) that offers sub-actor spawns via
|
|
|
|
# context API
|
|
|
|
# -> start another full actor-tree (client) which requests to the first to
|
|
|
|
# spawn over its `@context` ep / api.
|
|
|
|
# -> client actor cancels the context and should exit gracefully
|
|
|
|
# and the server's spawned child should cancel and terminate!
|
|
|
|
peer_name: str = 'little_bro'
|
|
|
|
|
|
|
|
async def main():
|
|
|
|
async with tractor.open_nursery(
|
|
|
|
# NOTE: to halt the peer tasks on ctxc, uncomment this.
|
|
|
|
debug_mode=debug_mode,
|
2024-03-08 20:34:20 +00:00
|
|
|
registry_addrs=[reg_addr],
|
2024-02-29 19:21:45 +00:00
|
|
|
) as an:
|
|
|
|
server: Portal = await an.start_actor(
|
|
|
|
(server_name := 'spawn_server'),
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
print(f'Spawned `{server_name}`')
|
|
|
|
|
|
|
|
client: Portal = await an.start_actor(
|
|
|
|
client_name := 'client',
|
|
|
|
enable_modules=[__name__],
|
|
|
|
)
|
|
|
|
print(f'Spawned `{client_name}`')
|
|
|
|
|
|
|
|
try:
|
|
|
|
async with (
|
|
|
|
server.open_context(
|
|
|
|
serve_subactors,
|
|
|
|
peer_name=peer_name,
|
|
|
|
) as (spawn_ctx, first),
|
|
|
|
|
|
|
|
client.open_context(
|
|
|
|
client_req_subactor,
|
|
|
|
peer_name=peer_name,
|
|
|
|
reraise_on_cancel=raise_client_error,
|
|
|
|
) as (client_ctx, client_says),
|
|
|
|
):
|
|
|
|
print(
|
|
|
|
f'Server says: {first}\n'
|
|
|
|
f'Client says: {client_says}\n'
|
|
|
|
)
|
|
|
|
|
|
|
|
# attach to client-requested-to-spawn
|
|
|
|
# (grandchild of this root actor) "little_bro"
|
|
|
|
# and ensure we can also use it as an echo
|
|
|
|
# server.
|
|
|
|
async with tractor.wait_for_actor(
|
|
|
|
name=peer_name,
|
|
|
|
) as sub:
|
|
|
|
assert sub
|
|
|
|
|
|
|
|
print(
|
|
|
|
'Sub-spawn came online\n'
|
|
|
|
f'portal: {sub}\n'
|
|
|
|
f'.uid: {sub.actor.uid}\n'
|
|
|
|
f'chan.raddr: {sub.chan.raddr}\n'
|
|
|
|
)
|
|
|
|
await tell_little_bro(
|
|
|
|
actor_name=peer_name,
|
|
|
|
caller='root',
|
|
|
|
)
|
|
|
|
|
|
|
|
# signal client to raise a KBI
|
|
|
|
await client_ctx.cancel()
|
|
|
|
print('root cancelled client, checking that sub-spawn is down')
|
|
|
|
|
|
|
|
async with tractor.find_actor(
|
|
|
|
name=peer_name,
|
|
|
|
) as sub:
|
|
|
|
assert not sub
|
|
|
|
|
|
|
|
print('root cancelling server/client sub-actors')
|
|
|
|
|
|
|
|
# await tractor.pause()
|
|
|
|
res = await client_ctx.result(hide_tb=False)
|
|
|
|
assert isinstance(res, ContextCancelled)
|
|
|
|
assert client_ctx.cancel_acked
|
|
|
|
assert res.canceller == current_actor().uid
|
|
|
|
|
|
|
|
await spawn_ctx.cancel()
|
|
|
|
# await server.cancel_actor()
|
|
|
|
|
|
|
|
# since we called `.cancel_actor()`, `.cancel_ack`
|
|
|
|
# will not be set on the ctx bc `ctx.cancel()` was not
|
|
|
|
# called directly fot this confext.
|
|
|
|
except ContextCancelled as ctxc:
|
|
|
|
print('caught ctxc from contexts!')
|
|
|
|
assert ctxc.canceller == current_actor().uid
|
|
|
|
assert ctxc is spawn_ctx.outcome
|
|
|
|
assert ctxc is spawn_ctx.maybe_error
|
|
|
|
raise
|
|
|
|
|
|
|
|
# assert spawn_ctx.cancel_acked
|
|
|
|
assert spawn_ctx.cancel_acked
|
|
|
|
assert client_ctx.cancel_acked
|
|
|
|
|
|
|
|
await client.cancel_actor()
|
|
|
|
await server.cancel_actor()
|
|
|
|
|
|
|
|
# WOA WOA WOA! we need this to close..!!!??
|
|
|
|
# that's super bad XD
|
|
|
|
|
|
|
|
# TODO: why isn't this working!?!?
|
|
|
|
# we're now outside the `.open_context()` block so
|
|
|
|
# the internal `Context._scope: CancelScope` should be
|
|
|
|
# gracefully "closed" ;)
|
|
|
|
|
|
|
|
# assert spawn_ctx.cancelled_caught
|
|
|
|
|
|
|
|
trio.run(main)
|