forked from goodboy/tractor
Merge pull request #333 from goodboy/exceptiongroups
`ExceptiongGroup`s and `trio>=0.22`dun_unset_current_actor
commit
a0f6668ce8
|
@ -27,7 +27,18 @@ async def main():
|
||||||
|
|
||||||
# retreive results
|
# retreive results
|
||||||
async with p0.open_stream_from(breakpoint_forever) as stream:
|
async with p0.open_stream_from(breakpoint_forever) as stream:
|
||||||
await p1.run(name_error)
|
|
||||||
|
# triggers the first name error
|
||||||
|
try:
|
||||||
|
await p1.run(name_error)
|
||||||
|
except tractor.RemoteActorError as rae:
|
||||||
|
assert rae.type is NameError
|
||||||
|
|
||||||
|
async for i in stream:
|
||||||
|
|
||||||
|
# a second time try the failing subactor and this tie
|
||||||
|
# let error propagate up to the parent/nursery.
|
||||||
|
await p1.run(name_error)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -12,18 +12,31 @@ async def breakpoint_forever():
|
||||||
while True:
|
while True:
|
||||||
await tractor.breakpoint()
|
await tractor.breakpoint()
|
||||||
|
|
||||||
|
# NOTE: if the test never sent 'q'/'quit' commands
|
||||||
|
# on the pdb repl, without this checkpoint line the
|
||||||
|
# repl would spin in this actor forever.
|
||||||
|
# await trio.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
async def spawn_until(depth=0):
|
async def spawn_until(depth=0):
|
||||||
""""A nested nursery that triggers another ``NameError``.
|
""""A nested nursery that triggers another ``NameError``.
|
||||||
"""
|
"""
|
||||||
async with tractor.open_nursery() as n:
|
async with tractor.open_nursery() as n:
|
||||||
if depth < 1:
|
if depth < 1:
|
||||||
# await n.run_in_actor('breakpoint_forever', breakpoint_forever)
|
|
||||||
await n.run_in_actor(
|
await n.run_in_actor(breakpoint_forever)
|
||||||
|
|
||||||
|
p = await n.run_in_actor(
|
||||||
name_error,
|
name_error,
|
||||||
name='name_error'
|
name='name_error'
|
||||||
)
|
)
|
||||||
|
await trio.sleep(0.5)
|
||||||
|
# rx and propagate error from child
|
||||||
|
await p.result()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
# recusrive call to spawn another process branching layer of
|
||||||
|
# the tree
|
||||||
depth -= 1
|
depth -= 1
|
||||||
await n.run_in_actor(
|
await n.run_in_actor(
|
||||||
spawn_until,
|
spawn_until,
|
||||||
|
@ -53,6 +66,7 @@ async def main():
|
||||||
"""
|
"""
|
||||||
async with tractor.open_nursery(
|
async with tractor.open_nursery(
|
||||||
debug_mode=True,
|
debug_mode=True,
|
||||||
|
# loglevel='cancel',
|
||||||
) as n:
|
) as n:
|
||||||
|
|
||||||
# spawn both actors
|
# spawn both actors
|
||||||
|
@ -67,8 +81,16 @@ async def main():
|
||||||
name='spawner1',
|
name='spawner1',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# TODO: test this case as well where the parent don't see
|
||||||
|
# the sub-actor errors by default and instead expect a user
|
||||||
|
# ctrl-c to kill the root.
|
||||||
|
with trio.move_on_after(3):
|
||||||
|
await trio.sleep_forever()
|
||||||
|
|
||||||
# gah still an issue here.
|
# gah still an issue here.
|
||||||
await portal.result()
|
await portal.result()
|
||||||
|
|
||||||
|
# should never get here
|
||||||
await portal1.result()
|
await portal1.result()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
Add support for ``trio >= 0.22`` and support for the new Python 3.11
|
||||||
|
``[Base]ExceptionGroup`` from `pep 654`_ via the backported
|
||||||
|
`exceptiongroup`_ package and some final fixes to the debug mode
|
||||||
|
subsystem.
|
||||||
|
|
||||||
|
This port ended up driving some (hopefully) final fixes to our debugger
|
||||||
|
subsystem including the solution to all lingering stdstreams locking
|
||||||
|
race-conditions and deadlock scenarios. This includes extending the
|
||||||
|
debugger tests suite as well as cancellation and ``asyncio`` mode cases.
|
||||||
|
Some of the notable details:
|
||||||
|
|
||||||
|
- always reverting to the ``trio`` SIGINT handler when leaving debug
|
||||||
|
mode.
|
||||||
|
- bypassing child attempts to acquire the debug lock when detected
|
||||||
|
to be amdist actor-runtime-cancellation.
|
||||||
|
- allowing the root actor to cancel local but IPC-stale subactor
|
||||||
|
requests-tasks for the debug lock when in a "no IPC peers" state.
|
||||||
|
|
||||||
|
Further we refined our ``ActorNursery`` semantics to be more similar to
|
||||||
|
``trio`` in the sense that parent task errors are always packed into the
|
||||||
|
actor-nursery emitted exception group and adjusted all tests and
|
||||||
|
examples accordingly.
|
||||||
|
|
||||||
|
.. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups
|
||||||
|
.. _exceptiongroup: https://github.com/python-trio/exceptiongroup
|
3
setup.py
3
setup.py
|
@ -44,9 +44,10 @@ setup(
|
||||||
# trio related
|
# trio related
|
||||||
# proper range spec:
|
# proper range spec:
|
||||||
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
|
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
|
||||||
'trio >= 0.20, < 0.22',
|
'trio >= 0.22',
|
||||||
'async_generator',
|
'async_generator',
|
||||||
'trio_typing',
|
'trio_typing',
|
||||||
|
'exceptiongroup',
|
||||||
|
|
||||||
# tooling
|
# tooling
|
||||||
'tricycle',
|
'tricycle',
|
||||||
|
|
|
@ -8,6 +8,10 @@ import platform
|
||||||
import time
|
import time
|
||||||
from itertools import repeat
|
from itertools import repeat
|
||||||
|
|
||||||
|
from exceptiongroup import (
|
||||||
|
BaseExceptionGroup,
|
||||||
|
ExceptionGroup,
|
||||||
|
)
|
||||||
import pytest
|
import pytest
|
||||||
import trio
|
import trio
|
||||||
import tractor
|
import tractor
|
||||||
|
@ -56,29 +60,49 @@ def test_remote_error(arb_addr, args_err):
|
||||||
arbiter_addr=arb_addr,
|
arbiter_addr=arb_addr,
|
||||||
) as nursery:
|
) as nursery:
|
||||||
|
|
||||||
|
# on a remote type error caused by bad input args
|
||||||
|
# this should raise directly which means we **don't** get
|
||||||
|
# an exception group outside the nursery since the error
|
||||||
|
# here and the far end task error are one in the same?
|
||||||
portal = await nursery.run_in_actor(
|
portal = await nursery.run_in_actor(
|
||||||
assert_err, name='errorer', **args
|
assert_err, name='errorer', **args
|
||||||
)
|
)
|
||||||
|
|
||||||
# get result(s) from main task
|
# get result(s) from main task
|
||||||
try:
|
try:
|
||||||
|
# this means the root actor will also raise a local
|
||||||
|
# parent task error and thus an eg will propagate out
|
||||||
|
# of this actor nursery.
|
||||||
await portal.result()
|
await portal.result()
|
||||||
except tractor.RemoteActorError as err:
|
except tractor.RemoteActorError as err:
|
||||||
assert err.type == errtype
|
assert err.type == errtype
|
||||||
print("Look Maa that actor failed hard, hehh")
|
print("Look Maa that actor failed hard, hehh")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
# ensure boxed errors
|
||||||
trio.run(main)
|
if args:
|
||||||
|
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
# ensure boxed error is correct
|
assert excinfo.value.type == errtype
|
||||||
assert excinfo.value.type == errtype
|
|
||||||
|
else:
|
||||||
|
# the root task will also error on the `.result()` call
|
||||||
|
# so we expect an error from there AND the child.
|
||||||
|
with pytest.raises(BaseExceptionGroup) as excinfo:
|
||||||
|
trio.run(main)
|
||||||
|
|
||||||
|
# ensure boxed errors
|
||||||
|
for exc in excinfo.value.exceptions:
|
||||||
|
assert exc.type == errtype
|
||||||
|
|
||||||
|
|
||||||
def test_multierror(arb_addr):
|
def test_multierror(arb_addr):
|
||||||
"""Verify we raise a ``trio.MultiError`` out of a nursery where
|
'''
|
||||||
|
Verify we raise a ``BaseExceptionGroup`` out of a nursery where
|
||||||
more then one actor errors.
|
more then one actor errors.
|
||||||
"""
|
|
||||||
|
'''
|
||||||
async def main():
|
async def main():
|
||||||
async with tractor.open_nursery(
|
async with tractor.open_nursery(
|
||||||
arbiter_addr=arb_addr,
|
arbiter_addr=arb_addr,
|
||||||
|
@ -95,10 +119,10 @@ def test_multierror(arb_addr):
|
||||||
print("Look Maa that first actor failed hard, hehh")
|
print("Look Maa that first actor failed hard, hehh")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# here we should get a `trio.MultiError` containing exceptions
|
# here we should get a ``BaseExceptionGroup`` containing exceptions
|
||||||
# from both subactors
|
# from both subactors
|
||||||
|
|
||||||
with pytest.raises(trio.MultiError):
|
with pytest.raises(BaseExceptionGroup):
|
||||||
trio.run(main)
|
trio.run(main)
|
||||||
|
|
||||||
|
|
||||||
|
@ -107,7 +131,7 @@ def test_multierror(arb_addr):
|
||||||
'num_subactors', range(25, 26),
|
'num_subactors', range(25, 26),
|
||||||
)
|
)
|
||||||
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
|
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
|
||||||
"""Verify we raise a ``trio.MultiError`` out of a nursery where
|
"""Verify we raise a ``BaseExceptionGroup`` out of a nursery where
|
||||||
more then one actor errors and also with a delay before failure
|
more then one actor errors and also with a delay before failure
|
||||||
to test failure during an ongoing spawning.
|
to test failure during an ongoing spawning.
|
||||||
"""
|
"""
|
||||||
|
@ -123,10 +147,11 @@ def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
|
||||||
delay=delay
|
delay=delay
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(trio.MultiError) as exc_info:
|
# with pytest.raises(trio.MultiError) as exc_info:
|
||||||
|
with pytest.raises(BaseExceptionGroup) as exc_info:
|
||||||
trio.run(main)
|
trio.run(main)
|
||||||
|
|
||||||
assert exc_info.type == tractor.MultiError
|
assert exc_info.type == ExceptionGroup
|
||||||
err = exc_info.value
|
err = exc_info.value
|
||||||
exceptions = err.exceptions
|
exceptions = err.exceptions
|
||||||
|
|
||||||
|
@ -214,8 +239,8 @@ async def test_cancel_infinite_streamer(start_method):
|
||||||
[
|
[
|
||||||
# daemon actors sit idle while single task actors error out
|
# daemon actors sit idle while single task actors error out
|
||||||
(1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None),
|
(1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None),
|
||||||
(2, tractor.MultiError, AssertionError, (assert_err, {}), None),
|
(2, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
|
||||||
(3, tractor.MultiError, AssertionError, (assert_err, {}), None),
|
(3, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
|
||||||
|
|
||||||
# 1 daemon actor errors out while single task actors sleep forever
|
# 1 daemon actor errors out while single task actors sleep forever
|
||||||
(3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}),
|
(3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}),
|
||||||
|
@ -226,7 +251,7 @@ async def test_cancel_infinite_streamer(start_method):
|
||||||
(do_nuthin, {}), (assert_err, {'delay': 1}, True)),
|
(do_nuthin, {}), (assert_err, {'delay': 1}, True)),
|
||||||
# daemon complete quickly delay while single task
|
# daemon complete quickly delay while single task
|
||||||
# actors error after brief delay
|
# actors error after brief delay
|
||||||
(3, tractor.MultiError, AssertionError,
|
(3, BaseExceptionGroup, AssertionError,
|
||||||
(assert_err, {'delay': 1}), (do_nuthin, {}, False)),
|
(assert_err, {'delay': 1}), (do_nuthin, {}, False)),
|
||||||
],
|
],
|
||||||
ids=[
|
ids=[
|
||||||
|
@ -293,7 +318,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
|
||||||
# should error here with a ``RemoteActorError`` or ``MultiError``
|
# should error here with a ``RemoteActorError`` or ``MultiError``
|
||||||
|
|
||||||
except first_err as err:
|
except first_err as err:
|
||||||
if isinstance(err, tractor.MultiError):
|
if isinstance(err, BaseExceptionGroup):
|
||||||
assert len(err.exceptions) == num_actors
|
assert len(err.exceptions) == num_actors
|
||||||
for exc in err.exceptions:
|
for exc in err.exceptions:
|
||||||
if isinstance(exc, tractor.RemoteActorError):
|
if isinstance(exc, tractor.RemoteActorError):
|
||||||
|
@ -337,7 +362,7 @@ async def spawn_and_error(breadth, depth) -> None:
|
||||||
@tractor_test
|
@tractor_test
|
||||||
async def test_nested_multierrors(loglevel, start_method):
|
async def test_nested_multierrors(loglevel, start_method):
|
||||||
'''
|
'''
|
||||||
Test that failed actor sets are wrapped in `trio.MultiError`s. This
|
Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This
|
||||||
test goes only 2 nurseries deep but we should eventually have tests
|
test goes only 2 nurseries deep but we should eventually have tests
|
||||||
for arbitrary n-depth actor trees.
|
for arbitrary n-depth actor trees.
|
||||||
|
|
||||||
|
@ -365,7 +390,7 @@ async def test_nested_multierrors(loglevel, start_method):
|
||||||
breadth=subactor_breadth,
|
breadth=subactor_breadth,
|
||||||
depth=depth,
|
depth=depth,
|
||||||
)
|
)
|
||||||
except trio.MultiError as err:
|
except BaseExceptionGroup as err:
|
||||||
assert len(err.exceptions) == subactor_breadth
|
assert len(err.exceptions) == subactor_breadth
|
||||||
for subexc in err.exceptions:
|
for subexc in err.exceptions:
|
||||||
|
|
||||||
|
@ -383,10 +408,10 @@ async def test_nested_multierrors(loglevel, start_method):
|
||||||
assert subexc.type in (
|
assert subexc.type in (
|
||||||
tractor.RemoteActorError,
|
tractor.RemoteActorError,
|
||||||
trio.Cancelled,
|
trio.Cancelled,
|
||||||
trio.MultiError
|
BaseExceptionGroup,
|
||||||
)
|
)
|
||||||
|
|
||||||
elif isinstance(subexc, trio.MultiError):
|
elif isinstance(subexc, BaseExceptionGroup):
|
||||||
for subsub in subexc.exceptions:
|
for subsub in subexc.exceptions:
|
||||||
|
|
||||||
if subsub in (tractor.RemoteActorError,):
|
if subsub in (tractor.RemoteActorError,):
|
||||||
|
@ -394,7 +419,7 @@ async def test_nested_multierrors(loglevel, start_method):
|
||||||
|
|
||||||
assert type(subsub) in (
|
assert type(subsub) in (
|
||||||
trio.Cancelled,
|
trio.Cancelled,
|
||||||
trio.MultiError,
|
BaseExceptionGroup,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
assert isinstance(subexc, tractor.RemoteActorError)
|
assert isinstance(subexc, tractor.RemoteActorError)
|
||||||
|
@ -406,13 +431,13 @@ async def test_nested_multierrors(loglevel, start_method):
|
||||||
if is_win():
|
if is_win():
|
||||||
if isinstance(subexc, tractor.RemoteActorError):
|
if isinstance(subexc, tractor.RemoteActorError):
|
||||||
assert subexc.type in (
|
assert subexc.type in (
|
||||||
trio.MultiError,
|
BaseExceptionGroup,
|
||||||
tractor.RemoteActorError
|
tractor.RemoteActorError
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
assert isinstance(subexc, trio.MultiError)
|
assert isinstance(subexc, BaseExceptionGroup)
|
||||||
else:
|
else:
|
||||||
assert subexc.type is trio.MultiError
|
assert subexc.type is ExceptionGroup
|
||||||
else:
|
else:
|
||||||
assert subexc.type in (
|
assert subexc.type in (
|
||||||
tractor.RemoteActorError,
|
tractor.RemoteActorError,
|
||||||
|
|
|
@ -10,6 +10,7 @@ TODO:
|
||||||
- wonder if any of it'll work on OS X?
|
- wonder if any of it'll work on OS X?
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
import itertools
|
||||||
from os import path
|
from os import path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import platform
|
import platform
|
||||||
|
@ -485,10 +486,12 @@ def test_multi_subactors(
|
||||||
# 2nd name_error failure
|
# 2nd name_error failure
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
|
||||||
assert_before(child, [
|
# TODO: will we ever get the race where this crash will show up?
|
||||||
"Attaching to pdb in crashed actor: ('name_error_1'",
|
# blocklist strat now prevents this crash
|
||||||
"NameError",
|
# assert_before(child, [
|
||||||
])
|
# "Attaching to pdb in crashed actor: ('name_error_1'",
|
||||||
|
# "NameError",
|
||||||
|
# ])
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
@ -580,14 +583,14 @@ def test_multi_daemon_subactors(
|
||||||
|
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
|
||||||
# there is a race for which subactor will acquire
|
# there can be a race for which subactor will acquire
|
||||||
# the root's tty lock first
|
# the root's tty lock first so anticipate either crash
|
||||||
|
# message on the first entry.
|
||||||
before = str(child.before.decode())
|
|
||||||
|
|
||||||
bp_forever_msg = "Attaching pdb to actor: ('bp_forever'"
|
bp_forever_msg = "Attaching pdb to actor: ('bp_forever'"
|
||||||
name_error_msg = "NameError"
|
name_error_msg = "NameError: name 'doggypants' is not defined"
|
||||||
|
|
||||||
|
before = str(child.before.decode())
|
||||||
if bp_forever_msg in before:
|
if bp_forever_msg in before:
|
||||||
next_msg = name_error_msg
|
next_msg = name_error_msg
|
||||||
|
|
||||||
|
@ -609,9 +612,7 @@ def test_multi_daemon_subactors(
|
||||||
|
|
||||||
child.sendline('c')
|
child.sendline('c')
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
before = str(child.before.decode())
|
assert_before(child, [next_msg])
|
||||||
|
|
||||||
assert next_msg in before
|
|
||||||
|
|
||||||
# XXX: hooray the root clobbering the child here was fixed!
|
# XXX: hooray the root clobbering the child here was fixed!
|
||||||
# IMO, this demonstrates the true power of SC system design.
|
# IMO, this demonstrates the true power of SC system design.
|
||||||
|
@ -630,32 +631,50 @@ def test_multi_daemon_subactors(
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
|
||||||
|
# expect another breakpoint actor entry
|
||||||
|
child.sendline('c')
|
||||||
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
assert_before(child, [bp_forever_msg])
|
||||||
|
|
||||||
|
if ctlc:
|
||||||
|
do_ctlc(child)
|
||||||
|
|
||||||
|
# should crash with the 2nd name error (simulates
|
||||||
|
# a retry) and then the root eventually (boxed) errors
|
||||||
|
# after 1 or more further bp actor entries.
|
||||||
|
|
||||||
|
child.sendline('c')
|
||||||
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
assert_before(child, [name_error_msg])
|
||||||
|
|
||||||
# wait for final error in root
|
# wait for final error in root
|
||||||
|
# where it crashs with boxed error
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
child.sendline('c')
|
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
|
||||||
before = str(child.before.decode())
|
|
||||||
try:
|
try:
|
||||||
|
child.sendline('c')
|
||||||
# root error should be packed as remote error
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
assert "_exceptions.RemoteActorError: ('name_error'" in before
|
assert_before(
|
||||||
|
child,
|
||||||
|
[bp_forever_msg]
|
||||||
|
)
|
||||||
|
except AssertionError:
|
||||||
break
|
break
|
||||||
|
|
||||||
except AssertionError:
|
# child.sendline('c')
|
||||||
assert bp_forever_msg in before
|
# assert_before(
|
||||||
|
|
||||||
if ctlc:
|
# child.sendline('c')
|
||||||
do_ctlc(child)
|
assert_before(
|
||||||
|
child,
|
||||||
|
[
|
||||||
|
# boxed error raised in root task
|
||||||
|
"Attaching to pdb in crashed actor: ('root'",
|
||||||
|
"_exceptions.RemoteActorError: ('name_error'",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
child.sendline('c')
|
||||||
child.sendline('c')
|
child.expect(pexpect.EOF)
|
||||||
child.expect(pexpect.EOF)
|
|
||||||
|
|
||||||
except TIMEOUT:
|
|
||||||
# Failed to exit using continue..?
|
|
||||||
child.sendline('q')
|
|
||||||
child.expect(pexpect.EOF)
|
|
||||||
|
|
||||||
|
|
||||||
@has_nested_actors
|
@has_nested_actors
|
||||||
|
@ -683,49 +702,64 @@ def test_multi_subactors_root_errors(
|
||||||
# continue again to catch 2nd name error from
|
# continue again to catch 2nd name error from
|
||||||
# actor 'name_error_1' (which is 2nd depth).
|
# actor 'name_error_1' (which is 2nd depth).
|
||||||
child.sendline('c')
|
child.sendline('c')
|
||||||
|
|
||||||
|
# due to block list strat from #337, this will no longer
|
||||||
|
# propagate before the root errors and cancels the spawner sub-tree.
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
|
||||||
|
# only if the blocking condition doesn't kick in fast enough
|
||||||
|
before = str(child.before.decode())
|
||||||
|
if "Debug lock blocked for ['name_error_1'" not in before:
|
||||||
|
|
||||||
|
assert_before(child, [
|
||||||
|
"Attaching to pdb in crashed actor: ('name_error_1'",
|
||||||
|
"NameError",
|
||||||
|
])
|
||||||
|
|
||||||
|
if ctlc:
|
||||||
|
do_ctlc(child)
|
||||||
|
|
||||||
|
child.sendline('c')
|
||||||
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
|
||||||
|
# check if the spawner crashed or was blocked from debug
|
||||||
|
# and if this intermediary attached check the boxed error
|
||||||
|
before = str(child.before.decode())
|
||||||
|
if "Attaching to pdb in crashed actor: ('spawn_error'" in before:
|
||||||
|
|
||||||
|
assert_before(child, [
|
||||||
|
# boxed error from spawner's child
|
||||||
|
"RemoteActorError: ('name_error_1'",
|
||||||
|
"NameError",
|
||||||
|
])
|
||||||
|
|
||||||
|
if ctlc:
|
||||||
|
do_ctlc(child)
|
||||||
|
|
||||||
|
child.sendline('c')
|
||||||
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
|
|
||||||
|
# expect a root actor crash
|
||||||
assert_before(child, [
|
assert_before(child, [
|
||||||
"Attaching to pdb in crashed actor: ('name_error_1'",
|
|
||||||
"NameError",
|
|
||||||
])
|
|
||||||
|
|
||||||
if ctlc:
|
|
||||||
do_ctlc(child)
|
|
||||||
|
|
||||||
child.sendline('c')
|
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
|
||||||
assert_before(child, [
|
|
||||||
"Attaching to pdb in crashed actor: ('spawn_error'",
|
|
||||||
# boxed error from previous step
|
|
||||||
"RemoteActorError: ('name_error_1'",
|
|
||||||
"NameError",
|
|
||||||
])
|
|
||||||
|
|
||||||
if ctlc:
|
|
||||||
do_ctlc(child)
|
|
||||||
|
|
||||||
child.sendline('c')
|
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
|
||||||
assert_before(child, [
|
|
||||||
"Attaching to pdb in crashed actor: ('root'",
|
|
||||||
# boxed error from previous step
|
|
||||||
"RemoteActorError: ('name_error'",
|
"RemoteActorError: ('name_error'",
|
||||||
"NameError",
|
"NameError",
|
||||||
|
|
||||||
|
# error from root actor and root task that created top level nursery
|
||||||
|
"Attaching to pdb in crashed actor: ('root'",
|
||||||
|
"AssertionError",
|
||||||
])
|
])
|
||||||
|
|
||||||
# warnings assert we probably don't need
|
|
||||||
# assert "Cancelling nursery in ('spawn_error'," in before
|
|
||||||
|
|
||||||
if ctlc:
|
|
||||||
do_ctlc(child)
|
|
||||||
|
|
||||||
# continue again
|
|
||||||
child.sendline('c')
|
child.sendline('c')
|
||||||
child.expect(pexpect.EOF)
|
child.expect(pexpect.EOF)
|
||||||
|
|
||||||
before = str(child.before.decode())
|
assert_before(child, [
|
||||||
# error from root actor and root task that created top level nursery
|
# "Attaching to pdb in crashed actor: ('root'",
|
||||||
assert "AssertionError" in before
|
# boxed error from previous step
|
||||||
|
"RemoteActorError: ('name_error'",
|
||||||
|
"NameError",
|
||||||
|
"AssertionError",
|
||||||
|
'assert 0',
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
@has_nested_actors
|
@has_nested_actors
|
||||||
|
@ -750,24 +784,31 @@ def test_multi_nested_subactors_error_through_nurseries(
|
||||||
|
|
||||||
timed_out_early: bool = False
|
timed_out_early: bool = False
|
||||||
|
|
||||||
for i in range(12):
|
for send_char in itertools.cycle(['c', 'q']):
|
||||||
try:
|
try:
|
||||||
child.expect(r"\(Pdb\+\+\)")
|
child.expect(r"\(Pdb\+\+\)")
|
||||||
child.sendline('c')
|
child.sendline(send_char)
|
||||||
time.sleep(0.1)
|
time.sleep(0.01)
|
||||||
|
|
||||||
except EOF:
|
except EOF:
|
||||||
|
|
||||||
# race conditions on how fast the continue is sent?
|
|
||||||
print(f"Failed early on {i}?")
|
|
||||||
timed_out_early = True
|
|
||||||
break
|
break
|
||||||
else:
|
|
||||||
child.expect(pexpect.EOF)
|
|
||||||
|
|
||||||
if not timed_out_early:
|
assert_before(child, [
|
||||||
before = str(child.before.decode())
|
|
||||||
assert "NameError" in before
|
# boxed source errors
|
||||||
|
"NameError: name 'doggypants' is not defined",
|
||||||
|
"tractor._exceptions.RemoteActorError: ('name_error'",
|
||||||
|
"bdb.BdbQuit",
|
||||||
|
|
||||||
|
# first level subtrees
|
||||||
|
"tractor._exceptions.RemoteActorError: ('spawner0'",
|
||||||
|
# "tractor._exceptions.RemoteActorError: ('spawner1'",
|
||||||
|
|
||||||
|
# propagation of errors up through nested subtrees
|
||||||
|
"tractor._exceptions.RemoteActorError: ('spawn_until_0'",
|
||||||
|
"tractor._exceptions.RemoteActorError: ('spawn_until_1'",
|
||||||
|
"tractor._exceptions.RemoteActorError: ('spawn_until_2'",
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.timeout(15)
|
@pytest.mark.timeout(15)
|
||||||
|
|
|
@ -8,6 +8,7 @@ import builtins
|
||||||
import itertools
|
import itertools
|
||||||
import importlib
|
import importlib
|
||||||
|
|
||||||
|
from exceptiongroup import BaseExceptionGroup
|
||||||
import pytest
|
import pytest
|
||||||
import trio
|
import trio
|
||||||
import tractor
|
import tractor
|
||||||
|
@ -409,11 +410,12 @@ def test_trio_error_cancels_intertask_chan(arb_addr):
|
||||||
# should trigger remote actor error
|
# should trigger remote actor error
|
||||||
await portal.result()
|
await portal.result()
|
||||||
|
|
||||||
with pytest.raises(RemoteActorError) as excinfo:
|
with pytest.raises(BaseExceptionGroup) as excinfo:
|
||||||
trio.run(main)
|
trio.run(main)
|
||||||
|
|
||||||
# ensure boxed error is correct
|
# ensure boxed errors
|
||||||
assert excinfo.value.type == Exception
|
for exc in excinfo.value.exceptions:
|
||||||
|
assert exc.type == Exception
|
||||||
|
|
||||||
|
|
||||||
def test_trio_closes_early_and_channel_exits(arb_addr):
|
def test_trio_closes_early_and_channel_exits(arb_addr):
|
||||||
|
@ -442,11 +444,12 @@ def test_aio_errors_and_channel_propagates_and_closes(arb_addr):
|
||||||
# should trigger remote actor error
|
# should trigger remote actor error
|
||||||
await portal.result()
|
await portal.result()
|
||||||
|
|
||||||
with pytest.raises(RemoteActorError) as excinfo:
|
with pytest.raises(BaseExceptionGroup) as excinfo:
|
||||||
trio.run(main)
|
trio.run(main)
|
||||||
|
|
||||||
# ensure boxed error is correct
|
# ensure boxed errors
|
||||||
assert excinfo.value.type == Exception
|
for exc in excinfo.value.exceptions:
|
||||||
|
assert exc.type == Exception
|
||||||
|
|
||||||
|
|
||||||
@tractor.context
|
@tractor.context
|
||||||
|
|
|
@ -11,15 +11,15 @@ from conftest import tractor_test
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.trio
|
@pytest.mark.trio
|
||||||
async def test_no_arbitter():
|
async def test_no_runtime():
|
||||||
"""An arbitter must be established before any nurseries
|
"""An arbitter must be established before any nurseries
|
||||||
can be created.
|
can be created.
|
||||||
|
|
||||||
(In other words ``tractor.open_root_actor()`` must be engaged at
|
(In other words ``tractor.open_root_actor()`` must be engaged at
|
||||||
some point?)
|
some point?)
|
||||||
"""
|
"""
|
||||||
with pytest.raises(RuntimeError):
|
with pytest.raises(RuntimeError) :
|
||||||
with tractor.open_nursery():
|
async with tractor.find_actor('doggy'):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,10 @@ async def test_lifetime_stack_wipes_tmpfile(
|
||||||
)
|
)
|
||||||
).result()
|
).result()
|
||||||
|
|
||||||
except tractor.RemoteActorError:
|
except (
|
||||||
|
tractor.RemoteActorError,
|
||||||
|
tractor.BaseExceptionGroup,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# tmp file should have been wiped by
|
# tmp file should have been wiped by
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
tractor: structured concurrent "actors".
|
tractor: structured concurrent "actors".
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from trio import MultiError
|
from exceptiongroup import BaseExceptionGroup
|
||||||
|
|
||||||
from ._clustering import open_actor_cluster
|
from ._clustering import open_actor_cluster
|
||||||
from ._ipc import Channel
|
from ._ipc import Channel
|
||||||
|
@ -62,7 +62,7 @@ __all__ = [
|
||||||
'ContextCancelled',
|
'ContextCancelled',
|
||||||
'ModuleNotExposed',
|
'ModuleNotExposed',
|
||||||
'MsgStream',
|
'MsgStream',
|
||||||
'MultiError',
|
'BaseExceptionGroup',
|
||||||
'Portal',
|
'Portal',
|
||||||
'ReceiveMsgStream',
|
'ReceiveMsgStream',
|
||||||
'RemoteActorError',
|
'RemoteActorError',
|
||||||
|
|
|
@ -25,6 +25,7 @@ import signal
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from contextlib import asynccontextmanager as acm
|
from contextlib import asynccontextmanager as acm
|
||||||
from typing import (
|
from typing import (
|
||||||
|
Any,
|
||||||
Optional,
|
Optional,
|
||||||
Callable,
|
Callable,
|
||||||
AsyncIterator,
|
AsyncIterator,
|
||||||
|
@ -75,8 +76,12 @@ class Lock:
|
||||||
# placeholder for function to set a ``trio.Event`` on debugger exit
|
# placeholder for function to set a ``trio.Event`` on debugger exit
|
||||||
# pdb_release_hook: Optional[Callable] = None
|
# pdb_release_hook: Optional[Callable] = None
|
||||||
|
|
||||||
|
_trio_handler: Callable[
|
||||||
|
[int, Optional[FrameType]], Any
|
||||||
|
] | int | None = None
|
||||||
|
|
||||||
# actor-wide variable pointing to current task name using debugger
|
# actor-wide variable pointing to current task name using debugger
|
||||||
local_task_in_debug: Optional[str] = None
|
local_task_in_debug: str | None = None
|
||||||
|
|
||||||
# NOTE: set by the current task waiting on the root tty lock from
|
# NOTE: set by the current task waiting on the root tty lock from
|
||||||
# the CALLER side of the `lock_tty_for_child()` context entry-call
|
# the CALLER side of the `lock_tty_for_child()` context entry-call
|
||||||
|
@ -105,19 +110,16 @@ class Lock:
|
||||||
@classmethod
|
@classmethod
|
||||||
def shield_sigint(cls):
|
def shield_sigint(cls):
|
||||||
cls._orig_sigint_handler = signal.signal(
|
cls._orig_sigint_handler = signal.signal(
|
||||||
signal.SIGINT,
|
signal.SIGINT,
|
||||||
shield_sigint,
|
shield_sigint,
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def unshield_sigint(cls):
|
def unshield_sigint(cls):
|
||||||
if cls._orig_sigint_handler is not None:
|
# always restore ``trio``'s sigint handler. see notes below in
|
||||||
# restore original sigint handler
|
# the pdb factory about the nightmare that is that code swapping
|
||||||
signal.signal(
|
# out the handler when the repl activates...
|
||||||
signal.SIGINT,
|
signal.signal(signal.SIGINT, cls._trio_handler)
|
||||||
cls._orig_sigint_handler
|
|
||||||
)
|
|
||||||
|
|
||||||
cls._orig_sigint_handler = None
|
cls._orig_sigint_handler = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -363,7 +365,7 @@ async def wait_for_parent_stdin_hijack(
|
||||||
|
|
||||||
) as (ctx, val):
|
) as (ctx, val):
|
||||||
|
|
||||||
log.pdb('locked context')
|
log.debug('locked context')
|
||||||
assert val == 'Locked'
|
assert val == 'Locked'
|
||||||
|
|
||||||
async with ctx.open_stream() as stream:
|
async with ctx.open_stream() as stream:
|
||||||
|
@ -382,15 +384,14 @@ async def wait_for_parent_stdin_hijack(
|
||||||
# sync with callee termination
|
# sync with callee termination
|
||||||
assert await ctx.result() == "pdb_unlock_complete"
|
assert await ctx.result() == "pdb_unlock_complete"
|
||||||
|
|
||||||
log.pdb('unlocked context')
|
log.debug('exitting child side locking task context')
|
||||||
|
|
||||||
except ContextCancelled:
|
except ContextCancelled:
|
||||||
log.warning('Root actor cancelled debug lock')
|
log.warning('Root actor cancelled debug lock')
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
log.pdb(f"Exiting debugger for actor {actor_uid}")
|
|
||||||
Lock.local_task_in_debug = None
|
Lock.local_task_in_debug = None
|
||||||
log.pdb(f"Child {actor_uid} released parent stdio lock")
|
log.debug('Exiting debugger from child')
|
||||||
|
|
||||||
|
|
||||||
def mk_mpdb() -> tuple[MultiActorPdb, Callable]:
|
def mk_mpdb() -> tuple[MultiActorPdb, Callable]:
|
||||||
|
@ -423,9 +424,8 @@ async def _breakpoint(
|
||||||
|
|
||||||
'''
|
'''
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
|
|
||||||
pdb, undo_sigint = mk_mpdb()
|
|
||||||
actor = tractor.current_actor()
|
actor = tractor.current_actor()
|
||||||
|
pdb, undo_sigint = mk_mpdb()
|
||||||
task_name = trio.lowlevel.current_task().name
|
task_name = trio.lowlevel.current_task().name
|
||||||
|
|
||||||
# TODO: is it possible to debug a trio.Cancelled except block?
|
# TODO: is it possible to debug a trio.Cancelled except block?
|
||||||
|
@ -449,7 +449,10 @@ async def _breakpoint(
|
||||||
# Recurrence entry case: this task already has the lock and
|
# Recurrence entry case: this task already has the lock and
|
||||||
# is likely recurrently entering a breakpoint
|
# is likely recurrently entering a breakpoint
|
||||||
if Lock.local_task_in_debug == task_name:
|
if Lock.local_task_in_debug == task_name:
|
||||||
# noop on recurrent entry case
|
# noop on recurrent entry case but we want to trigger
|
||||||
|
# a checkpoint to allow other actors error-propagate and
|
||||||
|
# potetially avoid infinite re-entries in some subactor.
|
||||||
|
await trio.lowlevel.checkpoint()
|
||||||
return
|
return
|
||||||
|
|
||||||
# if **this** actor is already in debug mode block here
|
# if **this** actor is already in debug mode block here
|
||||||
|
@ -468,10 +471,13 @@ async def _breakpoint(
|
||||||
# root nursery so that the debugger can continue to run without
|
# root nursery so that the debugger can continue to run without
|
||||||
# being restricted by the scope of a new task nursery.
|
# being restricted by the scope of a new task nursery.
|
||||||
|
|
||||||
# NOTE: if we want to debug a trio.Cancelled triggered exception
|
# TODO: if we want to debug a trio.Cancelled triggered exception
|
||||||
# we have to figure out how to avoid having the service nursery
|
# we have to figure out how to avoid having the service nursery
|
||||||
# cancel on this task start? I *think* this works below?
|
# cancel on this task start? I *think* this works below:
|
||||||
# actor._service_n.cancel_scope.shield = shield
|
# ```python
|
||||||
|
# actor._service_n.cancel_scope.shield = shield
|
||||||
|
# ```
|
||||||
|
# but not entirely sure if that's a sane way to implement it?
|
||||||
try:
|
try:
|
||||||
with trio.CancelScope(shield=True):
|
with trio.CancelScope(shield=True):
|
||||||
await actor._service_n.start(
|
await actor._service_n.start(
|
||||||
|
@ -480,6 +486,13 @@ async def _breakpoint(
|
||||||
)
|
)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
Lock.release()
|
Lock.release()
|
||||||
|
|
||||||
|
if actor._cancel_called:
|
||||||
|
# service nursery won't be usable and we
|
||||||
|
# don't want to lock up the root either way since
|
||||||
|
# we're in (the midst of) cancellation.
|
||||||
|
return
|
||||||
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
elif is_root_process():
|
elif is_root_process():
|
||||||
|
@ -530,10 +543,6 @@ async def _breakpoint(
|
||||||
# # last_f = frame.f_back
|
# # last_f = frame.f_back
|
||||||
# # last_f.f_globals['__tracebackhide__'] = True
|
# # last_f.f_globals['__tracebackhide__'] = True
|
||||||
# # signal.signal = pdbpp.hideframe(signal.signal)
|
# # signal.signal = pdbpp.hideframe(signal.signal)
|
||||||
# signal.signal(
|
|
||||||
# signal.SIGINT,
|
|
||||||
# orig_handler
|
|
||||||
# )
|
|
||||||
|
|
||||||
|
|
||||||
def shield_sigint(
|
def shield_sigint(
|
||||||
|
@ -544,7 +553,7 @@ def shield_sigint(
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
Specialized debugger compatible SIGINT handler.
|
Specialized, debugger-aware SIGINT handler.
|
||||||
|
|
||||||
In childred we always ignore to avoid deadlocks since cancellation
|
In childred we always ignore to avoid deadlocks since cancellation
|
||||||
should always be managed by the parent supervising actor. The root
|
should always be managed by the parent supervising actor. The root
|
||||||
|
@ -601,6 +610,8 @@ def shield_sigint(
|
||||||
# which has already terminated to unlock.
|
# which has already terminated to unlock.
|
||||||
and any_connected
|
and any_connected
|
||||||
):
|
):
|
||||||
|
# we are root and some actor is in debug mode
|
||||||
|
# if uid_in_debug is not None:
|
||||||
name = uid_in_debug[0]
|
name = uid_in_debug[0]
|
||||||
if name != 'root':
|
if name != 'root':
|
||||||
log.pdb(
|
log.pdb(
|
||||||
|
@ -611,6 +622,22 @@ def shield_sigint(
|
||||||
log.pdb(
|
log.pdb(
|
||||||
"Ignoring SIGINT while in debug mode"
|
"Ignoring SIGINT while in debug mode"
|
||||||
)
|
)
|
||||||
|
elif (
|
||||||
|
is_root_process()
|
||||||
|
):
|
||||||
|
log.pdb(
|
||||||
|
"Ignoring SIGINT since debug mode is enabled"
|
||||||
|
)
|
||||||
|
|
||||||
|
# revert back to ``trio`` handler asap!
|
||||||
|
Lock.unshield_sigint()
|
||||||
|
if (
|
||||||
|
Lock._root_local_task_cs_in_debug
|
||||||
|
and not Lock._root_local_task_cs_in_debug.cancel_called
|
||||||
|
):
|
||||||
|
Lock._root_local_task_cs_in_debug.cancel()
|
||||||
|
|
||||||
|
# raise KeyboardInterrupt
|
||||||
|
|
||||||
# child actor that has locked the debugger
|
# child actor that has locked the debugger
|
||||||
elif not is_root_process():
|
elif not is_root_process():
|
||||||
|
@ -636,10 +663,9 @@ def shield_sigint(
|
||||||
# https://github.com/goodboy/tractor/issues/320
|
# https://github.com/goodboy/tractor/issues/320
|
||||||
# elif debug_mode():
|
# elif debug_mode():
|
||||||
|
|
||||||
else:
|
else: # XXX: shouldn't ever get here?
|
||||||
log.pdb(
|
print("WTFWTFWTF")
|
||||||
"Ignoring SIGINT since debug mode is enabled"
|
raise KeyboardInterrupt
|
||||||
)
|
|
||||||
|
|
||||||
# NOTE: currently (at least on ``fancycompleter`` 0.9.2)
|
# NOTE: currently (at least on ``fancycompleter`` 0.9.2)
|
||||||
# it lookks to be that the last command that was run (eg. ll)
|
# it lookks to be that the last command that was run (eg. ll)
|
||||||
|
|
|
@ -27,6 +27,7 @@ import importlib
|
||||||
import builtins
|
import builtins
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import exceptiongroup as eg
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,9 +53,6 @@ class RemoteActorError(Exception):
|
||||||
self.type = suberror_type
|
self.type = suberror_type
|
||||||
self.msgdata = msgdata
|
self.msgdata = msgdata
|
||||||
|
|
||||||
# TODO: a trio.MultiError.catch like context manager
|
|
||||||
# for catching underlying remote errors of a particular type
|
|
||||||
|
|
||||||
|
|
||||||
class InternalActorError(RemoteActorError):
|
class InternalActorError(RemoteActorError):
|
||||||
"""Remote internal ``tractor`` error indicating
|
"""Remote internal ``tractor`` error indicating
|
||||||
|
@ -123,10 +121,12 @@ def unpack_error(
|
||||||
err_type=RemoteActorError
|
err_type=RemoteActorError
|
||||||
|
|
||||||
) -> Exception:
|
) -> Exception:
|
||||||
"""Unpack an 'error' message from the wire
|
'''
|
||||||
|
Unpack an 'error' message from the wire
|
||||||
into a local ``RemoteActorError``.
|
into a local ``RemoteActorError``.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
|
__tracebackhide__ = True
|
||||||
error = msg['error']
|
error = msg['error']
|
||||||
|
|
||||||
tb_str = error.get('tb_str', '')
|
tb_str = error.get('tb_str', '')
|
||||||
|
@ -139,7 +139,12 @@ def unpack_error(
|
||||||
suberror_type = trio.Cancelled
|
suberror_type = trio.Cancelled
|
||||||
|
|
||||||
else: # try to lookup a suitable local error type
|
else: # try to lookup a suitable local error type
|
||||||
for ns in [builtins, _this_mod, trio]:
|
for ns in [
|
||||||
|
builtins,
|
||||||
|
_this_mod,
|
||||||
|
eg,
|
||||||
|
trio,
|
||||||
|
]:
|
||||||
try:
|
try:
|
||||||
suberror_type = getattr(ns, type_name)
|
suberror_type = getattr(ns, type_name)
|
||||||
break
|
break
|
||||||
|
@ -158,12 +163,15 @@ def unpack_error(
|
||||||
|
|
||||||
|
|
||||||
def is_multi_cancelled(exc: BaseException) -> bool:
|
def is_multi_cancelled(exc: BaseException) -> bool:
|
||||||
"""Predicate to determine if a ``trio.MultiError`` contains only
|
'''
|
||||||
``trio.Cancelled`` sub-exceptions (and is likely the result of
|
Predicate to determine if a possible ``eg.BaseExceptionGroup`` contains
|
||||||
|
only ``trio.Cancelled`` sub-exceptions (and is likely the result of
|
||||||
cancelling a collection of subtasks.
|
cancelling a collection of subtasks.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
return not trio.MultiError.filter(
|
if isinstance(exc, eg.BaseExceptionGroup):
|
||||||
lambda exc: exc if not isinstance(exc, trio.Cancelled) else None,
|
return exc.subgroup(
|
||||||
exc,
|
lambda exc: isinstance(exc, trio.Cancelled)
|
||||||
)
|
) is not None
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
|
@ -52,17 +52,17 @@ log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _unwrap_msg(
|
def _unwrap_msg(
|
||||||
|
|
||||||
msg: dict[str, Any],
|
msg: dict[str, Any],
|
||||||
channel: Channel
|
channel: Channel
|
||||||
|
|
||||||
) -> Any:
|
) -> Any:
|
||||||
|
__tracebackhide__ = True
|
||||||
try:
|
try:
|
||||||
return msg['return']
|
return msg['return']
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# internal error should never get here
|
# internal error should never get here
|
||||||
assert msg.get('cid'), "Received internal error at portal?"
|
assert msg.get('cid'), "Received internal error at portal?"
|
||||||
raise unpack_error(msg, channel)
|
raise unpack_error(msg, channel) from None
|
||||||
|
|
||||||
|
|
||||||
class MessagingError(Exception):
|
class MessagingError(Exception):
|
||||||
|
@ -136,6 +136,7 @@ class Portal:
|
||||||
Return the result(s) from the remote actor's "main" task.
|
Return the result(s) from the remote actor's "main" task.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
# __tracebackhide__ = True
|
||||||
# Check for non-rpc errors slapped on the
|
# Check for non-rpc errors slapped on the
|
||||||
# channel for which we always raise
|
# channel for which we always raise
|
||||||
exc = self.channel._exc
|
exc = self.channel._exc
|
||||||
|
@ -460,7 +461,6 @@ class Portal:
|
||||||
# sure it's worth being pedantic:
|
# sure it's worth being pedantic:
|
||||||
# Exception,
|
# Exception,
|
||||||
# trio.Cancelled,
|
# trio.Cancelled,
|
||||||
# trio.MultiError,
|
|
||||||
# KeyboardInterrupt,
|
# KeyboardInterrupt,
|
||||||
|
|
||||||
) as err:
|
) as err:
|
||||||
|
|
|
@ -23,15 +23,22 @@ from functools import partial
|
||||||
import importlib
|
import importlib
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import signal
|
||||||
from typing import (
|
from typing import (
|
||||||
Optional,
|
Optional,
|
||||||
)
|
)
|
||||||
import typing
|
import typing
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
|
|
||||||
|
from exceptiongroup import BaseExceptionGroup
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
from ._runtime import Actor, Arbiter, async_main
|
from ._runtime import (
|
||||||
|
Actor,
|
||||||
|
Arbiter,
|
||||||
|
async_main,
|
||||||
|
)
|
||||||
from . import _debug
|
from . import _debug
|
||||||
from . import _spawn
|
from . import _spawn
|
||||||
from . import _state
|
from . import _state
|
||||||
|
@ -74,14 +81,19 @@ async def open_root_actor(
|
||||||
rpc_module_paths: Optional[list] = None,
|
rpc_module_paths: Optional[list] = None,
|
||||||
|
|
||||||
) -> typing.Any:
|
) -> typing.Any:
|
||||||
"""Async entry point for ``tractor``.
|
'''
|
||||||
|
Runtime init entry point for ``tractor``.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
# Override the global debugger hook to make it play nice with
|
# Override the global debugger hook to make it play nice with
|
||||||
# ``trio``, see:
|
# ``trio``, see:
|
||||||
# https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
|
# https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
|
||||||
os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace'
|
os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace'
|
||||||
|
|
||||||
|
# attempt to retreive ``trio``'s sigint handler and stash it
|
||||||
|
# on our debugger lock state.
|
||||||
|
_debug.Lock._trio_handler = signal.getsignal(signal.SIGINT)
|
||||||
|
|
||||||
# mark top most level process as root actor
|
# mark top most level process as root actor
|
||||||
_state._runtime_vars['_is_root'] = True
|
_state._runtime_vars['_is_root'] = True
|
||||||
|
|
||||||
|
@ -205,7 +217,10 @@ async def open_root_actor(
|
||||||
try:
|
try:
|
||||||
yield actor
|
yield actor
|
||||||
|
|
||||||
except (Exception, trio.MultiError) as err:
|
except (
|
||||||
|
Exception,
|
||||||
|
BaseExceptionGroup,
|
||||||
|
) as err:
|
||||||
|
|
||||||
entered = await _debug._maybe_enter_pm(err)
|
entered = await _debug._maybe_enter_pm(err)
|
||||||
|
|
||||||
|
|
|
@ -25,21 +25,23 @@ from itertools import chain
|
||||||
import importlib
|
import importlib
|
||||||
import importlib.util
|
import importlib.util
|
||||||
import inspect
|
import inspect
|
||||||
import uuid
|
import signal
|
||||||
|
import sys
|
||||||
from typing import (
|
from typing import (
|
||||||
Any, Optional,
|
Any, Optional,
|
||||||
Union, TYPE_CHECKING,
|
Union, TYPE_CHECKING,
|
||||||
Callable,
|
Callable,
|
||||||
)
|
)
|
||||||
|
import uuid
|
||||||
from types import ModuleType
|
from types import ModuleType
|
||||||
import sys
|
|
||||||
import os
|
import os
|
||||||
from contextlib import ExitStack
|
from contextlib import ExitStack
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
|
from async_generator import aclosing
|
||||||
|
from exceptiongroup import BaseExceptionGroup
|
||||||
import trio # type: ignore
|
import trio # type: ignore
|
||||||
from trio_typing import TaskStatus
|
from trio_typing import TaskStatus
|
||||||
from async_generator import aclosing
|
|
||||||
|
|
||||||
from ._ipc import Channel
|
from ._ipc import Channel
|
||||||
from ._streaming import Context
|
from ._streaming import Context
|
||||||
|
@ -194,7 +196,7 @@ async def _invoke(
|
||||||
res = await coro
|
res = await coro
|
||||||
await chan.send({'return': res, 'cid': cid})
|
await chan.send({'return': res, 'cid': cid})
|
||||||
|
|
||||||
except trio.MultiError:
|
except BaseExceptionGroup:
|
||||||
# if a context error was set then likely
|
# if a context error was set then likely
|
||||||
# thei multierror was raised due to that
|
# thei multierror was raised due to that
|
||||||
if ctx._error is not None:
|
if ctx._error is not None:
|
||||||
|
@ -266,7 +268,7 @@ async def _invoke(
|
||||||
|
|
||||||
except (
|
except (
|
||||||
Exception,
|
Exception,
|
||||||
trio.MultiError
|
BaseExceptionGroup,
|
||||||
) as err:
|
) as err:
|
||||||
|
|
||||||
if not is_multi_cancelled(err):
|
if not is_multi_cancelled(err):
|
||||||
|
@ -349,7 +351,7 @@ def _get_mod_abspath(module):
|
||||||
|
|
||||||
async def try_ship_error_to_parent(
|
async def try_ship_error_to_parent(
|
||||||
channel: Channel,
|
channel: Channel,
|
||||||
err: Union[Exception, trio.MultiError],
|
err: Union[Exception, BaseExceptionGroup],
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
with trio.CancelScope(shield=True):
|
with trio.CancelScope(shield=True):
|
||||||
|
@ -708,6 +710,14 @@ class Actor:
|
||||||
log.runtime(f"No more channels for {chan.uid}")
|
log.runtime(f"No more channels for {chan.uid}")
|
||||||
self._peers.pop(uid, None)
|
self._peers.pop(uid, None)
|
||||||
|
|
||||||
|
log.runtime(f"Peers is {self._peers}")
|
||||||
|
|
||||||
|
# No more channels to other actors (at all) registered
|
||||||
|
# as connected.
|
||||||
|
if not self._peers:
|
||||||
|
log.runtime("Signalling no more peer channel connections")
|
||||||
|
self._no_more_peers.set()
|
||||||
|
|
||||||
# NOTE: block this actor from acquiring the
|
# NOTE: block this actor from acquiring the
|
||||||
# debugger-TTY-lock since we have no way to know if we
|
# debugger-TTY-lock since we have no way to know if we
|
||||||
# cancelled it and further there is no way to ensure the
|
# cancelled it and further there is no way to ensure the
|
||||||
|
@ -721,23 +731,16 @@ class Actor:
|
||||||
# if a now stale local task has the TTY lock still
|
# if a now stale local task has the TTY lock still
|
||||||
# we cancel it to allow servicing other requests for
|
# we cancel it to allow servicing other requests for
|
||||||
# the lock.
|
# the lock.
|
||||||
|
db_cs = pdb_lock._root_local_task_cs_in_debug
|
||||||
if (
|
if (
|
||||||
pdb_lock._root_local_task_cs_in_debug
|
db_cs
|
||||||
and not pdb_lock._root_local_task_cs_in_debug.cancel_called
|
and not db_cs.cancel_called
|
||||||
):
|
):
|
||||||
log.warning(
|
log.warning(
|
||||||
f'STALE DEBUG LOCK DETECTED FOR {uid}'
|
f'STALE DEBUG LOCK DETECTED FOR {uid}'
|
||||||
)
|
)
|
||||||
# TODO: figure out why this breaks tests..
|
# TODO: figure out why this breaks tests..
|
||||||
# pdb_lock._root_local_task_cs_in_debug.cancel()
|
db_cs.cancel()
|
||||||
|
|
||||||
log.runtime(f"Peers is {self._peers}")
|
|
||||||
|
|
||||||
# No more channels to other actors (at all) registered
|
|
||||||
# as connected.
|
|
||||||
if not self._peers:
|
|
||||||
log.runtime("Signalling no more peer channel connections")
|
|
||||||
self._no_more_peers.set()
|
|
||||||
|
|
||||||
# XXX: is this necessary (GC should do it)?
|
# XXX: is this necessary (GC should do it)?
|
||||||
if chan.connected():
|
if chan.connected():
|
||||||
|
@ -1228,6 +1231,10 @@ async def async_main(
|
||||||
and when cancelled effectively cancels the actor.
|
and when cancelled effectively cancels the actor.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
# attempt to retreive ``trio``'s sigint handler and stash it
|
||||||
|
# on our debugger lock state.
|
||||||
|
_debug.Lock._trio_handler = signal.getsignal(signal.SIGINT)
|
||||||
|
|
||||||
registered_with_arbiter = False
|
registered_with_arbiter = False
|
||||||
try:
|
try:
|
||||||
|
|
||||||
|
@ -1549,7 +1556,10 @@ async def process_messages(
|
||||||
partial(_invoke, actor, cid, chan, func, kwargs),
|
partial(_invoke, actor, cid, chan, func, kwargs),
|
||||||
name=funcname,
|
name=funcname,
|
||||||
)
|
)
|
||||||
except (RuntimeError, trio.MultiError):
|
except (
|
||||||
|
RuntimeError,
|
||||||
|
BaseExceptionGroup,
|
||||||
|
):
|
||||||
# avoid reporting a benign race condition
|
# avoid reporting a benign race condition
|
||||||
# during actor runtime teardown.
|
# during actor runtime teardown.
|
||||||
nursery_cancelled_before_task = True
|
nursery_cancelled_before_task = True
|
||||||
|
@ -1594,7 +1604,10 @@ async def process_messages(
|
||||||
# transport **was** disconnected
|
# transport **was** disconnected
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except (Exception, trio.MultiError) as err:
|
except (
|
||||||
|
Exception,
|
||||||
|
BaseExceptionGroup,
|
||||||
|
) as err:
|
||||||
if nursery_cancelled_before_task:
|
if nursery_cancelled_before_task:
|
||||||
sn = actor._service_n
|
sn = actor._service_n
|
||||||
assert sn and sn.cancel_scope.cancel_called
|
assert sn and sn.cancel_scope.cancel_called
|
||||||
|
|
|
@ -31,6 +31,7 @@ from typing import (
|
||||||
)
|
)
|
||||||
from collections.abc import Awaitable
|
from collections.abc import Awaitable
|
||||||
|
|
||||||
|
from exceptiongroup import BaseExceptionGroup
|
||||||
import trio
|
import trio
|
||||||
from trio_typing import TaskStatus
|
from trio_typing import TaskStatus
|
||||||
|
|
||||||
|
@ -139,6 +140,7 @@ async def exhaust_portal(
|
||||||
If the main task is an async generator do our best to consume
|
If the main task is an async generator do our best to consume
|
||||||
what's left of it.
|
what's left of it.
|
||||||
'''
|
'''
|
||||||
|
__tracebackhide__ = True
|
||||||
try:
|
try:
|
||||||
log.debug(f"Waiting on final result from {actor.uid}")
|
log.debug(f"Waiting on final result from {actor.uid}")
|
||||||
|
|
||||||
|
@ -146,8 +148,11 @@ async def exhaust_portal(
|
||||||
# always be established and shutdown using a context manager api
|
# always be established and shutdown using a context manager api
|
||||||
final = await portal.result()
|
final = await portal.result()
|
||||||
|
|
||||||
except (Exception, trio.MultiError) as err:
|
except (
|
||||||
# we reraise in the parent task via a ``trio.MultiError``
|
Exception,
|
||||||
|
BaseExceptionGroup,
|
||||||
|
) as err:
|
||||||
|
# we reraise in the parent task via a ``BaseExceptionGroup``
|
||||||
return err
|
return err
|
||||||
except trio.Cancelled as err:
|
except trio.Cancelled as err:
|
||||||
# lol, of course we need this too ;P
|
# lol, of course we need this too ;P
|
||||||
|
@ -175,7 +180,7 @@ async def cancel_on_completion(
|
||||||
'''
|
'''
|
||||||
# if this call errors we store the exception for later
|
# if this call errors we store the exception for later
|
||||||
# in ``errors`` which will be reraised inside
|
# in ``errors`` which will be reraised inside
|
||||||
# a MultiError and we still send out a cancel request
|
# an exception group and we still send out a cancel request
|
||||||
result = await exhaust_portal(portal, actor)
|
result = await exhaust_portal(portal, actor)
|
||||||
if isinstance(result, Exception):
|
if isinstance(result, Exception):
|
||||||
errors[actor.uid] = result
|
errors[actor.uid] = result
|
||||||
|
|
|
@ -22,7 +22,6 @@ from typing import (
|
||||||
Optional,
|
Optional,
|
||||||
Any,
|
Any,
|
||||||
)
|
)
|
||||||
from collections.abc import Mapping
|
|
||||||
|
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
|
@ -46,30 +45,6 @@ def current_actor(err_on_no_runtime: bool = True) -> 'Actor': # type: ignore #
|
||||||
return _current_actor
|
return _current_actor
|
||||||
|
|
||||||
|
|
||||||
_conc_name_getters = {
|
|
||||||
'task': trio.lowlevel.current_task,
|
|
||||||
'actor': current_actor
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ActorContextInfo(Mapping):
|
|
||||||
"Dyanmic lookup for local actor and task names"
|
|
||||||
_context_keys = ('task', 'actor')
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self._context_keys)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return iter(self._context_keys)
|
|
||||||
|
|
||||||
def __getitem__(self, key: str) -> str:
|
|
||||||
try:
|
|
||||||
return _conc_name_getters[key]().name # type: ignore
|
|
||||||
except RuntimeError:
|
|
||||||
# no local actor/task context initialized yet
|
|
||||||
return f'no {key} context'
|
|
||||||
|
|
||||||
|
|
||||||
def is_main_process() -> bool:
|
def is_main_process() -> bool:
|
||||||
"""Bool determining if this actor is running in the top-most process.
|
"""Bool determining if this actor is running in the top-most process.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
``trio`` inspired apis and helpers
|
``trio`` inspired apis and helpers
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
from contextlib import asynccontextmanager as acm
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import inspect
|
import inspect
|
||||||
from typing import (
|
from typing import (
|
||||||
|
@ -27,8 +28,8 @@ from typing import (
|
||||||
import typing
|
import typing
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
|
from exceptiongroup import BaseExceptionGroup
|
||||||
import trio
|
import trio
|
||||||
from async_generator import asynccontextmanager
|
|
||||||
|
|
||||||
from ._debug import maybe_wait_for_debugger
|
from ._debug import maybe_wait_for_debugger
|
||||||
from ._state import current_actor, is_main_process
|
from ._state import current_actor, is_main_process
|
||||||
|
@ -82,7 +83,7 @@ class ActorNursery:
|
||||||
actor: Actor,
|
actor: Actor,
|
||||||
ria_nursery: trio.Nursery,
|
ria_nursery: trio.Nursery,
|
||||||
da_nursery: trio.Nursery,
|
da_nursery: trio.Nursery,
|
||||||
errors: dict[tuple[str, str], Exception],
|
errors: dict[tuple[str, str], BaseException],
|
||||||
) -> None:
|
) -> None:
|
||||||
# self.supervisor = supervisor # TODO
|
# self.supervisor = supervisor # TODO
|
||||||
self._actor: Actor = actor
|
self._actor: Actor = actor
|
||||||
|
@ -294,13 +295,17 @@ class ActorNursery:
|
||||||
self._join_procs.set()
|
self._join_procs.set()
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@acm
|
||||||
async def _open_and_supervise_one_cancels_all_nursery(
|
async def _open_and_supervise_one_cancels_all_nursery(
|
||||||
actor: Actor,
|
actor: Actor,
|
||||||
|
|
||||||
) -> typing.AsyncGenerator[ActorNursery, None]:
|
) -> typing.AsyncGenerator[ActorNursery, None]:
|
||||||
|
|
||||||
|
# TODO: yay or nay?
|
||||||
|
# __tracebackhide__ = True
|
||||||
|
|
||||||
# the collection of errors retreived from spawned sub-actors
|
# the collection of errors retreived from spawned sub-actors
|
||||||
errors: dict[tuple[str, str], Exception] = {}
|
errors: dict[tuple[str, str], BaseException] = {}
|
||||||
|
|
||||||
# This is the outermost level "deamon actor" nursery. It is awaited
|
# This is the outermost level "deamon actor" nursery. It is awaited
|
||||||
# **after** the below inner "run in actor nursery". This allows for
|
# **after** the below inner "run in actor nursery". This allows for
|
||||||
|
@ -333,19 +338,17 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
||||||
# after we yield upwards
|
# after we yield upwards
|
||||||
yield anursery
|
yield anursery
|
||||||
|
|
||||||
|
# When we didn't error in the caller's scope,
|
||||||
|
# signal all process-monitor-tasks to conduct
|
||||||
|
# the "hard join phase".
|
||||||
log.runtime(
|
log.runtime(
|
||||||
f"Waiting on subactors {anursery._children} "
|
f"Waiting on subactors {anursery._children} "
|
||||||
"to complete"
|
"to complete"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Last bit before first nursery block ends in the case
|
|
||||||
# where we didn't error in the caller's scope
|
|
||||||
|
|
||||||
# signal all process monitor tasks to conduct
|
|
||||||
# hard join phase.
|
|
||||||
anursery._join_procs.set()
|
anursery._join_procs.set()
|
||||||
|
|
||||||
except BaseException as err:
|
except BaseException as inner_err:
|
||||||
|
errors[actor.uid] = inner_err
|
||||||
|
|
||||||
# If we error in the root but the debugger is
|
# If we error in the root but the debugger is
|
||||||
# engaged we don't want to prematurely kill (and
|
# engaged we don't want to prematurely kill (and
|
||||||
|
@ -362,49 +365,42 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
||||||
# worry more are coming).
|
# worry more are coming).
|
||||||
anursery._join_procs.set()
|
anursery._join_procs.set()
|
||||||
|
|
||||||
try:
|
# XXX: hypothetically an error could be
|
||||||
# XXX: hypothetically an error could be
|
# raised and then a cancel signal shows up
|
||||||
# raised and then a cancel signal shows up
|
# slightly after in which case the `else:`
|
||||||
# slightly after in which case the `else:`
|
# block here might not complete? For now,
|
||||||
# block here might not complete? For now,
|
# shield both.
|
||||||
# shield both.
|
with trio.CancelScope(shield=True):
|
||||||
with trio.CancelScope(shield=True):
|
etype = type(inner_err)
|
||||||
etype = type(err)
|
if etype in (
|
||||||
if etype in (
|
trio.Cancelled,
|
||||||
trio.Cancelled,
|
KeyboardInterrupt
|
||||||
KeyboardInterrupt
|
) or (
|
||||||
) or (
|
is_multi_cancelled(inner_err)
|
||||||
is_multi_cancelled(err)
|
):
|
||||||
):
|
log.cancel(
|
||||||
log.cancel(
|
f"Nursery for {current_actor().uid} "
|
||||||
f"Nursery for {current_actor().uid} "
|
f"was cancelled with {etype}")
|
||||||
f"was cancelled with {etype}")
|
else:
|
||||||
else:
|
log.exception(
|
||||||
log.exception(
|
f"Nursery for {current_actor().uid} "
|
||||||
f"Nursery for {current_actor().uid} "
|
f"errored with")
|
||||||
f"errored with {err}, ")
|
|
||||||
|
|
||||||
# cancel all subactors
|
# cancel all subactors
|
||||||
await anursery.cancel()
|
await anursery.cancel()
|
||||||
|
|
||||||
except trio.MultiError as merr:
|
# ria_nursery scope end
|
||||||
# If we receive additional errors while waiting on
|
|
||||||
# remaining subactors that were cancelled,
|
|
||||||
# aggregate those errors with the original error
|
|
||||||
# that triggered this teardown.
|
|
||||||
if err not in merr.exceptions:
|
|
||||||
raise trio.MultiError(merr.exceptions + [err])
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
# ria_nursery scope end
|
# TODO: this is the handler around the ``.run_in_actor()``
|
||||||
|
# nursery. Ideally we can drop this entirely in the future as
|
||||||
# XXX: do we need a `trio.Cancelled` catch here as well?
|
# the whole ``.run_in_actor()`` API should be built "on top of"
|
||||||
# this is the catch around the ``.run_in_actor()`` nursery
|
# this lower level spawn-request-cancel "daemon actor" API where
|
||||||
|
# a local in-actor task nursery is used with one-to-one task
|
||||||
|
# + `await Portal.run()` calls and the results/errors are
|
||||||
|
# handled directly (inline) and errors by the local nursery.
|
||||||
except (
|
except (
|
||||||
|
|
||||||
Exception,
|
Exception,
|
||||||
trio.MultiError,
|
BaseExceptionGroup,
|
||||||
trio.Cancelled
|
trio.Cancelled
|
||||||
|
|
||||||
) as err:
|
) as err:
|
||||||
|
@ -436,18 +432,20 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
||||||
with trio.CancelScope(shield=True):
|
with trio.CancelScope(shield=True):
|
||||||
await anursery.cancel()
|
await anursery.cancel()
|
||||||
|
|
||||||
# use `MultiError` as needed
|
# use `BaseExceptionGroup` as needed
|
||||||
if len(errors) > 1:
|
if len(errors) > 1:
|
||||||
raise trio.MultiError(tuple(errors.values()))
|
raise BaseExceptionGroup(
|
||||||
|
'tractor.ActorNursery errored with',
|
||||||
|
tuple(errors.values()),
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise list(errors.values())[0]
|
raise list(errors.values())[0]
|
||||||
|
|
||||||
# ria_nursery scope end - nursery checkpoint
|
# da_nursery scope end - nursery checkpoint
|
||||||
|
# final exit
|
||||||
# after nursery exit
|
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@acm
|
||||||
async def open_nursery(
|
async def open_nursery(
|
||||||
**kwargs,
|
**kwargs,
|
||||||
|
|
||||||
|
|
|
@ -18,12 +18,14 @@
|
||||||
Log like a forester!
|
Log like a forester!
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
from collections.abc import Mapping
|
||||||
import sys
|
import sys
|
||||||
import logging
|
import logging
|
||||||
import colorlog # type: ignore
|
import colorlog # type: ignore
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from ._state import ActorContextInfo
|
import trio
|
||||||
|
|
||||||
|
from ._state import current_actor
|
||||||
|
|
||||||
|
|
||||||
_proj_name: str = 'tractor'
|
_proj_name: str = 'tractor'
|
||||||
|
@ -36,7 +38,8 @@ LOG_FORMAT = (
|
||||||
# "{bold_white}{log_color}{asctime}{reset}"
|
# "{bold_white}{log_color}{asctime}{reset}"
|
||||||
"{log_color}{asctime}{reset}"
|
"{log_color}{asctime}{reset}"
|
||||||
" {bold_white}{thin_white}({reset}"
|
" {bold_white}{thin_white}({reset}"
|
||||||
"{thin_white}{actor}, {process}, {task}){reset}{bold_white}{thin_white})"
|
"{thin_white}{actor_name}[{actor_uid}], "
|
||||||
|
"{process}, {task}){reset}{bold_white}{thin_white})"
|
||||||
" {reset}{log_color}[{reset}{bold_log_color}{levelname}{reset}{log_color}]"
|
" {reset}{log_color}[{reset}{bold_log_color}{levelname}{reset}{log_color}]"
|
||||||
" {log_color}{name}"
|
" {log_color}{name}"
|
||||||
" {thin_white}{filename}{log_color}:{reset}{thin_white}{lineno}{log_color}"
|
" {thin_white}{filename}{log_color}:{reset}{thin_white}{lineno}{log_color}"
|
||||||
|
@ -136,6 +139,37 @@ class StackLevelAdapter(logging.LoggerAdapter):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_conc_name_getters = {
|
||||||
|
'task': lambda: trio.lowlevel.current_task().name,
|
||||||
|
'actor': lambda: current_actor(),
|
||||||
|
'actor_name': lambda: current_actor().name,
|
||||||
|
'actor_uid': lambda: current_actor().uid[1][:6],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ActorContextInfo(Mapping):
|
||||||
|
"Dyanmic lookup for local actor and task names"
|
||||||
|
_context_keys = (
|
||||||
|
'task',
|
||||||
|
'actor',
|
||||||
|
'actor_name',
|
||||||
|
'actor_uid',
|
||||||
|
)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._context_keys)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self._context_keys)
|
||||||
|
|
||||||
|
def __getitem__(self, key: str) -> str:
|
||||||
|
try:
|
||||||
|
return _conc_name_getters[key]()
|
||||||
|
except RuntimeError:
|
||||||
|
# no local actor/task context initialized yet
|
||||||
|
return f'no {key} context'
|
||||||
|
|
||||||
|
|
||||||
def get_logger(
|
def get_logger(
|
||||||
|
|
||||||
name: str = None,
|
name: str = None,
|
||||||
|
|
Loading…
Reference in New Issue