Increase debugger poll delay back to prior value

If we make it too fast a nursery with debug mode children can cancel
too fast and causes some test failures. It's likely not a huge deal
anyway since the purpose of this poll/check is for human interaction
and the current delay isn't really that noticeable.

Decrease log levels in the debug module to avoid console noise when in
use. Toss in some more detailed comments around the new debugger lock
points.
agpl
Tyler Goodlet 2021-12-10 11:54:27 -05:00
parent 4f411d6926
commit a38a983225
3 changed files with 16 additions and 11 deletions

View File

@ -405,9 +405,11 @@ def test_multi_daemon_subactors(spawn, loglevel):
def test_multi_subactors_root_errors(spawn):
"""Multiple subactors, both erroring and breakpointing as well as
'''
Multiple subactors, both erroring and breakpointing as well as
a nested subactor erroring.
"""
'''
child = spawn('multi_subactor_root_errors')
# scan for the pdbpp prompt
@ -578,10 +580,6 @@ def test_different_debug_mode_per_actor(
assert "Attaching to pdb in crashed actor: ('debugged_boi'" in before
assert "RuntimeError" in before
# the crash boi should not have made a debugger request but
# instead crashed completely
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
child.sendline('c')
child.expect(pexpect.EOF)
@ -592,5 +590,9 @@ def test_different_debug_mode_per_actor(
# msg reported back from the debug mode actor is processed.
# assert "tractor._exceptions.RemoteActorError: ('debugged_boi'" in before
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
# the crash boi should not have made a debugger request but
# instead crashed completely
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
assert "RuntimeError" in before

View File

@ -252,7 +252,7 @@ async def _hijack_stdin_for_child(
# indicate to child that we've locked stdio
await ctx.started('Locked')
log.pdb(f"Actor {subactor_uid} ACQUIRED stdin hijack lock")
log.debug(f"Actor {subactor_uid} acquired stdin hijack lock")
# wait for unlock pdb by child
async with ctx.open_stream() as stream:
@ -577,7 +577,7 @@ async def acquire_debug_lock(
async def maybe_wait_for_debugger(
poll_steps: int = 2,
poll_delay: float = 0.01,
poll_delay: float = 0.1,
child_in_debug: bool = False,
) -> None:
@ -604,7 +604,7 @@ async def maybe_wait_for_debugger(
if _global_actor_in_debug:
sub_in_debug = tuple(_global_actor_in_debug)
log.warning(
log.debug(
'Root polling for debug')
with trio.CancelScope(shield=True):
@ -621,7 +621,7 @@ async def maybe_wait_for_debugger(
(debug_complete and
not debug_complete.is_set())
):
log.warning(
log.debug(
'Root has errored but pdb is in use by '
f'child {sub_in_debug}\n'
'Waiting on tty lock to release..')
@ -631,6 +631,6 @@ async def maybe_wait_for_debugger(
await trio.sleep(poll_delay)
continue
else:
log.warning(
log.debug(
'Root acquired TTY LOCK'
)

View File

@ -350,9 +350,12 @@ async def _open_and_supervise_one_cancels_all_nursery(
) as err:
# XXX: yet another guard before allowing the cancel
# sequence in case a (single) child is in debug.
await maybe_wait_for_debugger(
child_in_debug=anursery._at_least_one_child_in_debug
)
# If actor-local error was raised while waiting on
# ".run_in_actor()" actors then we also want to cancel all
# remaining sub-actors (due to our lone strategy: