Compare commits
23 Commits
4d675deb24
...
4bdf7f79f2
Author | SHA1 | Date |
---|---|---|
|
4bdf7f79f2 | |
|
a123809558 | |
|
2901274189 | |
|
77d79a28b6 | |
|
fd3a777cd6 | |
|
36e0c3473c | |
|
4feba68409 | |
|
46d7737522 | |
|
0db3dda269 | |
|
92c70f7986 | |
|
5c571ec522 | |
|
6ee07b21e4 | |
|
62ea085f01 | |
|
6888984e3f | |
|
ea2f5a5da3 | |
|
0de779012d | |
|
352b8b866a | |
|
4b381ff656 | |
|
751ba476f9 | |
|
78305e8808 | |
|
7d041e056b | |
|
8d2cf6c245 | |
|
48d67f5902 |
|
@ -10,7 +10,6 @@ TODO:
|
||||||
- wonder if any of it'll work on OS X?
|
- wonder if any of it'll work on OS X?
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from functools import partial
|
|
||||||
import itertools
|
import itertools
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import platform
|
import platform
|
||||||
|
@ -27,10 +26,6 @@ from pexpect.exceptions import (
|
||||||
from tractor._testing import (
|
from tractor._testing import (
|
||||||
examples_dir,
|
examples_dir,
|
||||||
)
|
)
|
||||||
from tractor.devx._debug import (
|
|
||||||
_pause_msg,
|
|
||||||
_crash_msg,
|
|
||||||
)
|
|
||||||
from conftest import (
|
from conftest import (
|
||||||
_ci_env,
|
_ci_env,
|
||||||
)
|
)
|
||||||
|
@ -128,52 +123,20 @@ def expect(
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def in_prompt_msg(
|
|
||||||
prompt: str,
|
|
||||||
parts: list[str],
|
|
||||||
|
|
||||||
pause_on_false: bool = False,
|
|
||||||
print_prompt_on_false: bool = True,
|
|
||||||
|
|
||||||
) -> bool:
|
|
||||||
'''
|
|
||||||
Predicate check if (the prompt's) std-streams output has all
|
|
||||||
`str`-parts in it.
|
|
||||||
|
|
||||||
Can be used in test asserts for bulk matching expected
|
|
||||||
log/REPL output for a given `pdb` interact point.
|
|
||||||
|
|
||||||
'''
|
|
||||||
for part in parts:
|
|
||||||
if part not in prompt:
|
|
||||||
|
|
||||||
if pause_on_false:
|
|
||||||
import pdbp
|
|
||||||
pdbp.set_trace()
|
|
||||||
|
|
||||||
if print_prompt_on_false:
|
|
||||||
print(prompt)
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def assert_before(
|
def assert_before(
|
||||||
child,
|
child,
|
||||||
patts: list[str],
|
patts: list[str],
|
||||||
|
|
||||||
**kwargs,
|
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
# as in before the prompt end
|
before = str(child.before.decode())
|
||||||
before: str = str(child.before.decode())
|
|
||||||
assert in_prompt_msg(
|
|
||||||
prompt=before,
|
|
||||||
parts=patts,
|
|
||||||
|
|
||||||
**kwargs
|
for patt in patts:
|
||||||
)
|
try:
|
||||||
|
assert patt in before
|
||||||
|
except AssertionError:
|
||||||
|
print(before)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
|
@ -232,10 +195,7 @@ def test_root_actor_error(spawn, user_in_out):
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
|
|
||||||
# make sure expected logging and error arrives
|
# make sure expected logging and error arrives
|
||||||
assert in_prompt_msg(
|
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||||
before,
|
|
||||||
[_crash_msg, "('root'"]
|
|
||||||
)
|
|
||||||
assert 'AssertionError' in before
|
assert 'AssertionError' in before
|
||||||
|
|
||||||
# send user command
|
# send user command
|
||||||
|
@ -372,10 +332,7 @@ def test_subactor_error(
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
|
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching to pdb in crashed actor: ('name_error'" in before
|
||||||
before,
|
|
||||||
[_crash_msg, "('name_error'"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if do_next:
|
if do_next:
|
||||||
child.sendline('n')
|
child.sendline('n')
|
||||||
|
@ -396,15 +353,9 @@ def test_subactor_error(
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
|
|
||||||
# root actor gets debugger engaged
|
# root actor gets debugger engaged
|
||||||
assert in_prompt_msg(
|
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||||
before,
|
|
||||||
[_crash_msg, "('root'"]
|
|
||||||
)
|
|
||||||
# error is a remote error propagated from the subactor
|
# error is a remote error propagated from the subactor
|
||||||
assert in_prompt_msg(
|
assert "RemoteActorError: ('name_error'" in before
|
||||||
before,
|
|
||||||
[_crash_msg, "('name_error'"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# another round
|
# another round
|
||||||
if ctlc:
|
if ctlc:
|
||||||
|
@ -429,10 +380,7 @@ def test_subactor_breakpoint(
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
|
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||||
before,
|
|
||||||
[_pause_msg, "('breakpoint_forever'"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# do some "next" commands to demonstrate recurrent breakpoint
|
# do some "next" commands to demonstrate recurrent breakpoint
|
||||||
# entries
|
# entries
|
||||||
|
@ -448,10 +396,7 @@ def test_subactor_breakpoint(
|
||||||
child.sendline('continue')
|
child.sendline('continue')
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||||
before,
|
|
||||||
[_pause_msg, "('breakpoint_forever'"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
@ -496,10 +441,7 @@ def test_multi_subactors(
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
|
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||||
before,
|
|
||||||
[_pause_msg, "('breakpoint_forever'"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
@ -519,10 +461,7 @@ def test_multi_subactors(
|
||||||
# first name_error failure
|
# first name_error failure
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching to pdb in crashed actor: ('name_error'" in before
|
||||||
before,
|
|
||||||
[_crash_msg, "('name_error'"]
|
|
||||||
)
|
|
||||||
assert "NameError" in before
|
assert "NameError" in before
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
|
@ -548,10 +487,7 @@ def test_multi_subactors(
|
||||||
child.sendline('c')
|
child.sendline('c')
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||||
before,
|
|
||||||
[_pause_msg, "('breakpoint_forever'"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
@ -591,12 +527,9 @@ def test_multi_subactors(
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
|
|
||||||
assert_before(
|
assert_before(child, [
|
||||||
child, [
|
|
||||||
# debugger attaches to root
|
# debugger attaches to root
|
||||||
# "Attaching to pdb in crashed actor: ('root'",
|
"Attaching to pdb in crashed actor: ('root'",
|
||||||
_crash_msg,
|
|
||||||
"('root'",
|
|
||||||
|
|
||||||
# expect a multierror with exceptions for each sub-actor
|
# expect a multierror with exceptions for each sub-actor
|
||||||
"RemoteActorError: ('breakpoint_forever'",
|
"RemoteActorError: ('breakpoint_forever'",
|
||||||
|
@ -604,8 +537,7 @@ def test_multi_subactors(
|
||||||
"RemoteActorError: ('spawn_error'",
|
"RemoteActorError: ('spawn_error'",
|
||||||
"RemoteActorError: ('name_error_1'",
|
"RemoteActorError: ('name_error_1'",
|
||||||
'bdb.BdbQuit',
|
'bdb.BdbQuit',
|
||||||
]
|
])
|
||||||
)
|
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
@ -642,22 +574,15 @@ def test_multi_daemon_subactors(
|
||||||
# the root's tty lock first so anticipate either crash
|
# the root's tty lock first so anticipate either crash
|
||||||
# message on the first entry.
|
# message on the first entry.
|
||||||
|
|
||||||
bp_forev_parts = [_pause_msg, "('bp_forever'"]
|
bp_forever_msg = "Attaching pdb to actor: ('bp_forever'"
|
||||||
bp_forev_in_msg = partial(
|
|
||||||
in_prompt_msg,
|
|
||||||
parts=bp_forev_parts,
|
|
||||||
)
|
|
||||||
|
|
||||||
name_error_msg = "NameError: name 'doggypants' is not defined"
|
name_error_msg = "NameError: name 'doggypants' is not defined"
|
||||||
name_error_parts = [name_error_msg]
|
|
||||||
|
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
|
if bp_forever_msg in before:
|
||||||
if bp_forev_in_msg(prompt=before):
|
next_msg = name_error_msg
|
||||||
next_parts = name_error_parts
|
|
||||||
|
|
||||||
elif name_error_msg in before:
|
elif name_error_msg in before:
|
||||||
next_parts = bp_forev_parts
|
next_msg = bp_forever_msg
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Neither log msg was found !?")
|
raise ValueError("Neither log msg was found !?")
|
||||||
|
@ -674,10 +599,7 @@ def test_multi_daemon_subactors(
|
||||||
|
|
||||||
child.sendline('c')
|
child.sendline('c')
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
assert_before(
|
assert_before(child, [next_msg])
|
||||||
child,
|
|
||||||
next_parts,
|
|
||||||
)
|
|
||||||
|
|
||||||
# XXX: hooray the root clobbering the child here was fixed!
|
# XXX: hooray the root clobbering the child here was fixed!
|
||||||
# IMO, this demonstrates the true power of SC system design.
|
# IMO, this demonstrates the true power of SC system design.
|
||||||
|
@ -701,15 +623,9 @@ def test_multi_daemon_subactors(
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
assert_before(
|
assert_before(child, [bp_forever_msg])
|
||||||
child,
|
|
||||||
bp_forev_parts,
|
|
||||||
)
|
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
assert_before(
|
assert_before(child, [name_error_msg])
|
||||||
child,
|
|
||||||
name_error_parts,
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if ctlc:
|
if ctlc:
|
||||||
|
@ -721,10 +637,7 @@ def test_multi_daemon_subactors(
|
||||||
|
|
||||||
child.sendline('c')
|
child.sendline('c')
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
assert_before(
|
assert_before(child, [name_error_msg])
|
||||||
child,
|
|
||||||
name_error_parts,
|
|
||||||
)
|
|
||||||
|
|
||||||
# wait for final error in root
|
# wait for final error in root
|
||||||
# where it crashs with boxed error
|
# where it crashs with boxed error
|
||||||
|
@ -734,7 +647,7 @@ def test_multi_daemon_subactors(
|
||||||
child.expect(PROMPT)
|
child.expect(PROMPT)
|
||||||
assert_before(
|
assert_before(
|
||||||
child,
|
child,
|
||||||
bp_forev_parts
|
[bp_forever_msg]
|
||||||
)
|
)
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
break
|
break
|
||||||
|
@ -743,9 +656,7 @@ def test_multi_daemon_subactors(
|
||||||
child,
|
child,
|
||||||
[
|
[
|
||||||
# boxed error raised in root task
|
# boxed error raised in root task
|
||||||
# "Attaching to pdb in crashed actor: ('root'",
|
"Attaching to pdb in crashed actor: ('root'",
|
||||||
_crash_msg,
|
|
||||||
"('root'",
|
|
||||||
"_exceptions.RemoteActorError: ('name_error'",
|
"_exceptions.RemoteActorError: ('name_error'",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -859,7 +770,7 @@ def test_multi_nested_subactors_error_through_nurseries(
|
||||||
|
|
||||||
child = spawn('multi_nested_subactors_error_up_through_nurseries')
|
child = spawn('multi_nested_subactors_error_up_through_nurseries')
|
||||||
|
|
||||||
# timed_out_early: bool = False
|
timed_out_early: bool = False
|
||||||
|
|
||||||
for send_char in itertools.cycle(['c', 'q']):
|
for send_char in itertools.cycle(['c', 'q']):
|
||||||
try:
|
try:
|
||||||
|
@ -960,14 +871,11 @@ def test_root_nursery_cancels_before_child_releases_tty_lock(
|
||||||
|
|
||||||
if not timed_out_early:
|
if not timed_out_early:
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert_before(
|
assert_before(child, [
|
||||||
child,
|
|
||||||
[
|
|
||||||
"tractor._exceptions.RemoteActorError: ('spawner0'",
|
"tractor._exceptions.RemoteActorError: ('spawner0'",
|
||||||
"tractor._exceptions.RemoteActorError: ('name_error'",
|
"tractor._exceptions.RemoteActorError: ('name_error'",
|
||||||
"NameError: name 'doggypants' is not defined",
|
"NameError: name 'doggypants' is not defined",
|
||||||
],
|
])
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_root_cancels_child_context_during_startup(
|
def test_root_cancels_child_context_during_startup(
|
||||||
|
@ -1001,10 +909,8 @@ def test_different_debug_mode_per_actor(
|
||||||
|
|
||||||
# only one actor should enter the debugger
|
# only one actor should enter the debugger
|
||||||
before = str(child.before.decode())
|
before = str(child.before.decode())
|
||||||
assert in_prompt_msg(
|
assert "Attaching to pdb in crashed actor: ('debugged_boi'" in before
|
||||||
before,
|
assert "RuntimeError" in before
|
||||||
[_crash_msg, "('debugged_boi'", "RuntimeError"],
|
|
||||||
)
|
|
||||||
|
|
||||||
if ctlc:
|
if ctlc:
|
||||||
do_ctlc(child)
|
do_ctlc(child)
|
||||||
|
|
|
@ -868,9 +868,6 @@ class Context:
|
||||||
|
|
||||||
# TODO: maybe we should also call `._res_scope.cancel()` if it
|
# TODO: maybe we should also call `._res_scope.cancel()` if it
|
||||||
# exists to support cancelling any drain loop hangs?
|
# exists to support cancelling any drain loop hangs?
|
||||||
# NOTE: this usage actually works here B)
|
|
||||||
# from .devx._debug import breakpoint
|
|
||||||
# await breakpoint()
|
|
||||||
|
|
||||||
# TODO: add to `Channel`?
|
# TODO: add to `Channel`?
|
||||||
@property
|
@property
|
||||||
|
|
|
@ -1,19 +1,18 @@
|
||||||
# tractor: structured concurrent "actors".
|
# tractor: structured concurrent "actors".
|
||||||
# Copyright 2018-eternity Tyler Goodlet.
|
# Copyright 2018-eternity Tyler Goodlet.
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# modify it under the terms of the GNU Affero General Public License
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
# as published by the Free Software Foundation, either version 3 of
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
# the License, or (at your option) any later version.
|
# (at your option) any later version.
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful, but
|
# This program is distributed in the hope that it will be useful,
|
||||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
# Affero General Public License for more details.
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# License along with this program. If not, see
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
# <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Multi-core debugging for da peeps!
|
Multi-core debugging for da peeps!
|
||||||
|
@ -21,19 +20,14 @@ Multi-core debugging for da peeps!
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import bdb
|
import bdb
|
||||||
from contextlib import (
|
import os
|
||||||
asynccontextmanager as acm,
|
import sys
|
||||||
contextmanager as cm,
|
import signal
|
||||||
nullcontext,
|
|
||||||
)
|
|
||||||
from functools import (
|
from functools import (
|
||||||
partial,
|
partial,
|
||||||
cached_property,
|
cached_property,
|
||||||
)
|
)
|
||||||
import os
|
from contextlib import asynccontextmanager as acm
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
Callable,
|
Callable,
|
||||||
|
@ -45,31 +39,27 @@ from types import FrameType
|
||||||
import pdbp
|
import pdbp
|
||||||
import tractor
|
import tractor
|
||||||
import trio
|
import trio
|
||||||
from trio.lowlevel import current_task
|
|
||||||
from trio_typing import (
|
from trio_typing import (
|
||||||
TaskStatus,
|
TaskStatus,
|
||||||
# Task,
|
# Task,
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..log import get_logger
|
from .log import get_logger
|
||||||
from .._state import (
|
from ._discovery import get_root
|
||||||
current_actor,
|
from ._state import (
|
||||||
is_root_process,
|
is_root_process,
|
||||||
debug_mode,
|
debug_mode,
|
||||||
)
|
)
|
||||||
from .._exceptions import (
|
from ._exceptions import (
|
||||||
is_multi_cancelled,
|
is_multi_cancelled,
|
||||||
ContextCancelled,
|
ContextCancelled,
|
||||||
)
|
)
|
||||||
from .._ipc import Channel
|
from ._ipc import Channel
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = ['breakpoint', 'post_mortem']
|
||||||
'breakpoint',
|
|
||||||
'post_mortem',
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class Lock:
|
class Lock:
|
||||||
|
@ -242,7 +232,7 @@ async def _acquire_debug_lock_from_root_task(
|
||||||
to the ``pdb`` repl.
|
to the ``pdb`` repl.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
task_name: str = current_task().name
|
task_name: str = trio.lowlevel.current_task().name
|
||||||
we_acquired: bool = False
|
we_acquired: bool = False
|
||||||
|
|
||||||
log.runtime(
|
log.runtime(
|
||||||
|
@ -327,13 +317,14 @@ async def lock_tty_for_child(
|
||||||
highly reliable at releasing the mutex complete!
|
highly reliable at releasing the mutex complete!
|
||||||
|
|
||||||
'''
|
'''
|
||||||
task_name: str = current_task().name
|
task_name = trio.lowlevel.current_task().name
|
||||||
|
|
||||||
if tuple(subactor_uid) in Lock._blocked:
|
if tuple(subactor_uid) in Lock._blocked:
|
||||||
log.warning(
|
log.warning(
|
||||||
f'Actor {subactor_uid} is blocked from acquiring debug lock\n'
|
f'Actor {subactor_uid} is blocked from acquiring debug lock\n'
|
||||||
f"remote task: {task_name}:{subactor_uid}"
|
f"remote task: {task_name}:{subactor_uid}"
|
||||||
)
|
)
|
||||||
ctx._enter_debugger_on_cancel: bool = False
|
ctx._enter_debugger_on_cancel = False
|
||||||
await ctx.cancel(f'Debug lock blocked for {subactor_uid}')
|
await ctx.cancel(f'Debug lock blocked for {subactor_uid}')
|
||||||
return 'pdb_lock_blocked'
|
return 'pdb_lock_blocked'
|
||||||
|
|
||||||
|
@ -384,14 +375,12 @@ async def wait_for_parent_stdin_hijack(
|
||||||
|
|
||||||
This function is used by any sub-actor to acquire mutex access to
|
This function is used by any sub-actor to acquire mutex access to
|
||||||
the ``pdb`` REPL and thus the root's TTY for interactive debugging
|
the ``pdb`` REPL and thus the root's TTY for interactive debugging
|
||||||
(see below inside ``pause()``). It can be used to ensure that
|
(see below inside ``_pause()``). It can be used to ensure that
|
||||||
an intermediate nursery-owning actor does not clobber its children
|
an intermediate nursery-owning actor does not clobber its children
|
||||||
if they are in debug (see below inside
|
if they are in debug (see below inside
|
||||||
``maybe_wait_for_debugger()``).
|
``maybe_wait_for_debugger()``).
|
||||||
|
|
||||||
'''
|
'''
|
||||||
from .._discovery import get_root
|
|
||||||
|
|
||||||
with trio.CancelScope(shield=True) as cs:
|
with trio.CancelScope(shield=True) as cs:
|
||||||
Lock._debugger_request_cs = cs
|
Lock._debugger_request_cs = cs
|
||||||
|
|
||||||
|
@ -401,7 +390,7 @@ async def wait_for_parent_stdin_hijack(
|
||||||
# this syncs to child's ``Context.started()`` call.
|
# this syncs to child's ``Context.started()`` call.
|
||||||
async with portal.open_context(
|
async with portal.open_context(
|
||||||
|
|
||||||
lock_tty_for_child,
|
tractor._debug.lock_tty_for_child,
|
||||||
subactor_uid=actor_uid,
|
subactor_uid=actor_uid,
|
||||||
|
|
||||||
) as (ctx, val):
|
) as (ctx, val):
|
||||||
|
@ -410,13 +399,11 @@ async def wait_for_parent_stdin_hijack(
|
||||||
assert val == 'Locked'
|
assert val == 'Locked'
|
||||||
|
|
||||||
async with ctx.open_stream() as stream:
|
async with ctx.open_stream() as stream:
|
||||||
try:
|
|
||||||
# unblock local caller
|
# unblock local caller
|
||||||
|
|
||||||
|
try:
|
||||||
assert Lock.local_pdb_complete
|
assert Lock.local_pdb_complete
|
||||||
task_status.started(cs)
|
task_status.started(cs)
|
||||||
|
|
||||||
# wait for local task to exit and
|
|
||||||
# release the REPL
|
|
||||||
await Lock.local_pdb_complete.wait()
|
await Lock.local_pdb_complete.wait()
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
@ -454,6 +441,171 @@ def mk_mpdb() -> tuple[MultiActorPdb, Callable]:
|
||||||
return pdb, Lock.unshield_sigint
|
return pdb, Lock.unshield_sigint
|
||||||
|
|
||||||
|
|
||||||
|
async def _pause(
|
||||||
|
|
||||||
|
debug_func: Callable | None = None,
|
||||||
|
release_lock_signal: trio.Event | None = None,
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# shield: bool = False
|
||||||
|
task_status: TaskStatus[trio.Event] = trio.TASK_STATUS_IGNORED
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
'''
|
||||||
|
A pause point (more commonly known as a "breakpoint") interrupt
|
||||||
|
instruction for engaging a blocking debugger instance to
|
||||||
|
conduct manual console-based-REPL-interaction from within
|
||||||
|
`tractor`'s async runtime, normally from some single-threaded
|
||||||
|
and currently executing actor-hosted-`trio`-task in some
|
||||||
|
(remote) process.
|
||||||
|
|
||||||
|
NOTE: we use the semantics "pause" since it better encompasses
|
||||||
|
the entirety of the necessary global-runtime-state-mutation any
|
||||||
|
actor-task must access and lock in order to get full isolated
|
||||||
|
control over the process tree's root TTY:
|
||||||
|
https://en.wikipedia.org/wiki/Breakpoint
|
||||||
|
|
||||||
|
'''
|
||||||
|
__tracebackhide__ = True
|
||||||
|
actor = tractor.current_actor()
|
||||||
|
pdb, undo_sigint = mk_mpdb()
|
||||||
|
task_name = trio.lowlevel.current_task().name
|
||||||
|
|
||||||
|
# TODO: is it possible to debug a trio.Cancelled except block?
|
||||||
|
# right now it seems like we can kinda do with by shielding
|
||||||
|
# around ``tractor.breakpoint()`` but not if we move the shielded
|
||||||
|
# scope here???
|
||||||
|
# with trio.CancelScope(shield=shield):
|
||||||
|
# await trio.lowlevel.checkpoint()
|
||||||
|
|
||||||
|
if (
|
||||||
|
not Lock.local_pdb_complete
|
||||||
|
or Lock.local_pdb_complete.is_set()
|
||||||
|
):
|
||||||
|
Lock.local_pdb_complete = trio.Event()
|
||||||
|
|
||||||
|
# TODO: need a more robust check for the "root" actor
|
||||||
|
if (
|
||||||
|
not is_root_process()
|
||||||
|
and actor._parent_chan # a connected child
|
||||||
|
):
|
||||||
|
|
||||||
|
if Lock.local_task_in_debug:
|
||||||
|
|
||||||
|
# Recurrence entry case: this task already has the lock and
|
||||||
|
# is likely recurrently entering a breakpoint
|
||||||
|
if Lock.local_task_in_debug == task_name:
|
||||||
|
# noop on recurrent entry case but we want to trigger
|
||||||
|
# a checkpoint to allow other actors error-propagate and
|
||||||
|
# potetially avoid infinite re-entries in some subactor.
|
||||||
|
await trio.lowlevel.checkpoint()
|
||||||
|
return
|
||||||
|
|
||||||
|
# if **this** actor is already in debug mode block here
|
||||||
|
# waiting for the control to be released - this allows
|
||||||
|
# support for recursive entries to `tractor.breakpoint()`
|
||||||
|
log.warning(f"{actor.uid} already has a debug lock, waiting...")
|
||||||
|
|
||||||
|
await Lock.local_pdb_complete.wait()
|
||||||
|
await trio.sleep(0.1)
|
||||||
|
|
||||||
|
# mark local actor as "in debug mode" to avoid recurrent
|
||||||
|
# entries/requests to the root process
|
||||||
|
Lock.local_task_in_debug = task_name
|
||||||
|
|
||||||
|
# this **must** be awaited by the caller and is done using the
|
||||||
|
# root nursery so that the debugger can continue to run without
|
||||||
|
# being restricted by the scope of a new task nursery.
|
||||||
|
|
||||||
|
# TODO: if we want to debug a trio.Cancelled triggered exception
|
||||||
|
# we have to figure out how to avoid having the service nursery
|
||||||
|
# cancel on this task start? I *think* this works below:
|
||||||
|
# ```python
|
||||||
|
# actor._service_n.cancel_scope.shield = shield
|
||||||
|
# ```
|
||||||
|
# but not entirely sure if that's a sane way to implement it?
|
||||||
|
try:
|
||||||
|
with trio.CancelScope(shield=True):
|
||||||
|
await actor._service_n.start(
|
||||||
|
wait_for_parent_stdin_hijack,
|
||||||
|
actor.uid,
|
||||||
|
)
|
||||||
|
Lock.repl = pdb
|
||||||
|
except RuntimeError:
|
||||||
|
Lock.release()
|
||||||
|
|
||||||
|
if actor._cancel_called:
|
||||||
|
# service nursery won't be usable and we
|
||||||
|
# don't want to lock up the root either way since
|
||||||
|
# we're in (the midst of) cancellation.
|
||||||
|
return
|
||||||
|
|
||||||
|
raise
|
||||||
|
|
||||||
|
elif is_root_process():
|
||||||
|
|
||||||
|
# we also wait in the root-parent for any child that
|
||||||
|
# may have the tty locked prior
|
||||||
|
# TODO: wait, what about multiple root tasks acquiring it though?
|
||||||
|
if Lock.global_actor_in_debug == actor.uid:
|
||||||
|
# re-entrant root process already has it: noop.
|
||||||
|
return
|
||||||
|
|
||||||
|
# XXX: since we need to enter pdb synchronously below,
|
||||||
|
# we have to release the lock manually from pdb completion
|
||||||
|
# callbacks. Can't think of a nicer way then this atm.
|
||||||
|
if Lock._debug_lock.locked():
|
||||||
|
log.warning(
|
||||||
|
'Root actor attempting to shield-acquire active tty lock'
|
||||||
|
f' owned by {Lock.global_actor_in_debug}')
|
||||||
|
|
||||||
|
# must shield here to avoid hitting a ``Cancelled`` and
|
||||||
|
# a child getting stuck bc we clobbered the tty
|
||||||
|
with trio.CancelScope(shield=True):
|
||||||
|
await Lock._debug_lock.acquire()
|
||||||
|
else:
|
||||||
|
# may be cancelled
|
||||||
|
await Lock._debug_lock.acquire()
|
||||||
|
|
||||||
|
Lock.global_actor_in_debug = actor.uid
|
||||||
|
Lock.local_task_in_debug = task_name
|
||||||
|
Lock.repl = pdb
|
||||||
|
|
||||||
|
try:
|
||||||
|
# breakpoint()
|
||||||
|
if debug_func is None:
|
||||||
|
# assert release_lock_signal, (
|
||||||
|
# 'Must pass `release_lock_signal: trio.Event` if no '
|
||||||
|
# 'trace func provided!'
|
||||||
|
# )
|
||||||
|
print(f"{actor.uid} ENTERING WAIT")
|
||||||
|
task_status.started()
|
||||||
|
|
||||||
|
# with trio.CancelScope(shield=True):
|
||||||
|
# await release_lock_signal.wait()
|
||||||
|
|
||||||
|
else:
|
||||||
|
# block here one (at the appropriate frame *up*) where
|
||||||
|
# ``breakpoint()`` was awaited and begin handling stdio.
|
||||||
|
log.debug("Entering the synchronous world of pdb")
|
||||||
|
debug_func(actor, pdb)
|
||||||
|
|
||||||
|
except bdb.BdbQuit:
|
||||||
|
Lock.release()
|
||||||
|
raise
|
||||||
|
|
||||||
|
# XXX: apparently we can't do this without showing this frame
|
||||||
|
# in the backtrace on first entry to the REPL? Seems like an odd
|
||||||
|
# behaviour that should have been fixed by now. This is also why
|
||||||
|
# we scrapped all the @cm approaches that were tried previously.
|
||||||
|
# finally:
|
||||||
|
# __tracebackhide__ = True
|
||||||
|
# # frame = sys._getframe()
|
||||||
|
# # last_f = frame.f_back
|
||||||
|
# # last_f.f_globals['__tracebackhide__'] = True
|
||||||
|
# # signal.signal = pdbp.hideframe(signal.signal)
|
||||||
|
|
||||||
|
|
||||||
def shield_sigint_handler(
|
def shield_sigint_handler(
|
||||||
signum: int,
|
signum: int,
|
||||||
frame: 'frame', # type: ignore # noqa
|
frame: 'frame', # type: ignore # noqa
|
||||||
|
@ -473,7 +625,7 @@ def shield_sigint_handler(
|
||||||
|
|
||||||
uid_in_debug: tuple[str, str] | None = Lock.global_actor_in_debug
|
uid_in_debug: tuple[str, str] | None = Lock.global_actor_in_debug
|
||||||
|
|
||||||
actor = current_actor()
|
actor = tractor.current_actor()
|
||||||
# print(f'{actor.uid} in HANDLER with ')
|
# print(f'{actor.uid} in HANDLER with ')
|
||||||
|
|
||||||
def do_cancel():
|
def do_cancel():
|
||||||
|
@ -612,62 +764,27 @@ def shield_sigint_handler(
|
||||||
# https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py
|
# https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py
|
||||||
|
|
||||||
|
|
||||||
_pause_msg: str = 'Attaching to pdb REPL in actor'
|
|
||||||
|
|
||||||
|
|
||||||
def _set_trace(
|
def _set_trace(
|
||||||
actor: tractor.Actor | None = None,
|
actor: tractor.Actor | None = None,
|
||||||
pdb: MultiActorPdb | None = None,
|
pdb: MultiActorPdb | None = None,
|
||||||
shield: bool = False,
|
|
||||||
|
|
||||||
extra_frames_up_when_async: int = 1,
|
|
||||||
):
|
):
|
||||||
__tracebackhide__: bool = True
|
__tracebackhide__ = True
|
||||||
actor: tractor.Actor = actor or current_actor()
|
actor: tractor.Actor = actor or tractor.current_actor()
|
||||||
|
|
||||||
# always start 1 level up from THIS in user code.
|
# start 2 levels up in user code
|
||||||
frame: FrameType|None
|
frame: FrameType | None = sys._getframe()
|
||||||
if frame := sys._getframe():
|
if frame:
|
||||||
frame: FrameType = frame.f_back # type: ignore
|
frame: FrameType = frame.f_back # type: ignore
|
||||||
|
|
||||||
if (
|
if (
|
||||||
frame
|
frame
|
||||||
and (
|
and pdb
|
||||||
pdb
|
|
||||||
and actor is not None
|
and actor is not None
|
||||||
)
|
|
||||||
# or shield
|
|
||||||
):
|
):
|
||||||
msg: str = _pause_msg
|
log.pdb(f"\nAttaching pdb to actor: {actor.uid}\n")
|
||||||
if shield:
|
|
||||||
# log.warning(
|
|
||||||
msg = (
|
|
||||||
'\n\n'
|
|
||||||
' ------ - ------\n'
|
|
||||||
'Debugger invoked with `shield=True` so an extra\n'
|
|
||||||
'`trio.CancelScope.__exit__()` frame is shown..\n'
|
|
||||||
'\n'
|
|
||||||
'Try going up one frame to see your pause point!\n'
|
|
||||||
'\n'
|
|
||||||
' SORRY we need to fix this!\n'
|
|
||||||
' ------ - ------\n\n'
|
|
||||||
) + msg
|
|
||||||
|
|
||||||
# pdbp.set_trace()
|
|
||||||
# TODO: maybe print the actor supervion tree up to the
|
|
||||||
# root here? Bo
|
|
||||||
log.pdb(
|
|
||||||
f'{msg}\n'
|
|
||||||
'|\n'
|
|
||||||
f'|_ {actor.uid}\n'
|
|
||||||
)
|
|
||||||
# no f!#$&* idea, but when we're in async land
|
# no f!#$&* idea, but when we're in async land
|
||||||
# we need 2x frames up?
|
# we need 2x frames up?
|
||||||
for i in range(extra_frames_up_when_async):
|
frame = frame.f_back
|
||||||
frame: FrameType = frame.f_back
|
|
||||||
log.debug(
|
|
||||||
f'Going up frame {i} -> {frame}\n'
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
pdb, undo_sigint = mk_mpdb()
|
pdb, undo_sigint = mk_mpdb()
|
||||||
|
@ -677,278 +794,18 @@ def _set_trace(
|
||||||
Lock.local_task_in_debug = 'sync'
|
Lock.local_task_in_debug = 'sync'
|
||||||
|
|
||||||
pdb.set_trace(frame=frame)
|
pdb.set_trace(frame=frame)
|
||||||
|
# undo_
|
||||||
|
|
||||||
|
|
||||||
async def _pause(
|
# TODO: allow pausing from sync code, normally by remapping
|
||||||
|
# python's builtin breakpoint() hook to this runtime aware version.
|
||||||
debug_func: Callable = _set_trace,
|
|
||||||
release_lock_signal: trio.Event | None = None,
|
|
||||||
|
|
||||||
# TODO: allow caller to pause despite task cancellation,
|
|
||||||
# exactly the same as wrapping with:
|
|
||||||
# with CancelScope(shield=True):
|
|
||||||
# await pause()
|
|
||||||
# => the REMAINING ISSUE is that the scope's .__exit__() frame
|
|
||||||
# is always show in the debugger on entry.. and there seems to
|
|
||||||
# be no way to override it?..
|
|
||||||
# shield: bool = False,
|
|
||||||
|
|
||||||
shield: bool = False,
|
|
||||||
task_status: TaskStatus[trio.Event] = trio.TASK_STATUS_IGNORED
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Inner impl for `pause()` to avoid the `trio.CancelScope.__exit__()`
|
|
||||||
stack frame when not shielded (since apparently i can't figure out
|
|
||||||
how to hide it using the normal mechanisms..)
|
|
||||||
|
|
||||||
Hopefully we won't need this in the long run.
|
|
||||||
|
|
||||||
'''
|
|
||||||
__tracebackhide__: bool = True
|
|
||||||
actor = current_actor()
|
|
||||||
pdb, undo_sigint = mk_mpdb()
|
|
||||||
task_name: str = trio.lowlevel.current_task().name
|
|
||||||
|
|
||||||
if (
|
|
||||||
not Lock.local_pdb_complete
|
|
||||||
or Lock.local_pdb_complete.is_set()
|
|
||||||
):
|
|
||||||
Lock.local_pdb_complete = trio.Event()
|
|
||||||
|
|
||||||
debug_func = partial(
|
|
||||||
debug_func,
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: need a more robust check for the "root" actor
|
|
||||||
if (
|
|
||||||
not is_root_process()
|
|
||||||
and actor._parent_chan # a connected child
|
|
||||||
):
|
|
||||||
|
|
||||||
if Lock.local_task_in_debug:
|
|
||||||
|
|
||||||
# Recurrence entry case: this task already has the lock and
|
|
||||||
# is likely recurrently entering a breakpoint
|
|
||||||
if Lock.local_task_in_debug == task_name:
|
|
||||||
# noop on recurrent entry case but we want to trigger
|
|
||||||
# a checkpoint to allow other actors error-propagate and
|
|
||||||
# potetially avoid infinite re-entries in some subactor.
|
|
||||||
await trio.lowlevel.checkpoint()
|
|
||||||
return
|
|
||||||
|
|
||||||
# if **this** actor is already in debug mode block here
|
|
||||||
# waiting for the control to be released - this allows
|
|
||||||
# support for recursive entries to `tractor.breakpoint()`
|
|
||||||
log.warning(f"{actor.uid} already has a debug lock, waiting...")
|
|
||||||
|
|
||||||
await Lock.local_pdb_complete.wait()
|
|
||||||
await trio.sleep(0.1)
|
|
||||||
|
|
||||||
# mark local actor as "in debug mode" to avoid recurrent
|
|
||||||
# entries/requests to the root process
|
|
||||||
Lock.local_task_in_debug = task_name
|
|
||||||
|
|
||||||
# this **must** be awaited by the caller and is done using the
|
|
||||||
# root nursery so that the debugger can continue to run without
|
|
||||||
# being restricted by the scope of a new task nursery.
|
|
||||||
|
|
||||||
# TODO: if we want to debug a trio.Cancelled triggered exception
|
|
||||||
# we have to figure out how to avoid having the service nursery
|
|
||||||
# cancel on this task start? I *think* this works below:
|
|
||||||
# ```python
|
|
||||||
# actor._service_n.cancel_scope.shield = shield
|
|
||||||
# ```
|
|
||||||
# but not entirely sure if that's a sane way to implement it?
|
|
||||||
try:
|
|
||||||
with trio.CancelScope(shield=True):
|
|
||||||
await actor._service_n.start(
|
|
||||||
wait_for_parent_stdin_hijack,
|
|
||||||
actor.uid,
|
|
||||||
)
|
|
||||||
Lock.repl = pdb
|
|
||||||
except RuntimeError:
|
|
||||||
Lock.release()
|
|
||||||
|
|
||||||
if actor._cancel_called:
|
|
||||||
# service nursery won't be usable and we
|
|
||||||
# don't want to lock up the root either way since
|
|
||||||
# we're in (the midst of) cancellation.
|
|
||||||
return
|
|
||||||
|
|
||||||
raise
|
|
||||||
|
|
||||||
elif is_root_process():
|
|
||||||
|
|
||||||
# we also wait in the root-parent for any child that
|
|
||||||
# may have the tty locked prior
|
|
||||||
# TODO: wait, what about multiple root tasks acquiring it though?
|
|
||||||
if Lock.global_actor_in_debug == actor.uid:
|
|
||||||
# re-entrant root process already has it: noop.
|
|
||||||
return
|
|
||||||
|
|
||||||
# XXX: since we need to enter pdb synchronously below,
|
|
||||||
# we have to release the lock manually from pdb completion
|
|
||||||
# callbacks. Can't think of a nicer way then this atm.
|
|
||||||
if Lock._debug_lock.locked():
|
|
||||||
log.warning(
|
|
||||||
'Root actor attempting to shield-acquire active tty lock'
|
|
||||||
f' owned by {Lock.global_actor_in_debug}')
|
|
||||||
|
|
||||||
# must shield here to avoid hitting a ``Cancelled`` and
|
|
||||||
# a child getting stuck bc we clobbered the tty
|
|
||||||
with trio.CancelScope(shield=True):
|
|
||||||
await Lock._debug_lock.acquire()
|
|
||||||
else:
|
|
||||||
# may be cancelled
|
|
||||||
await Lock._debug_lock.acquire()
|
|
||||||
|
|
||||||
Lock.global_actor_in_debug = actor.uid
|
|
||||||
Lock.local_task_in_debug = task_name
|
|
||||||
Lock.repl = pdb
|
|
||||||
|
|
||||||
try:
|
|
||||||
# TODO: do we want to support using this **just** for the
|
|
||||||
# locking / common code (prolly to help address #320)?
|
|
||||||
#
|
|
||||||
# if debug_func is None:
|
|
||||||
# assert release_lock_signal, (
|
|
||||||
# 'Must pass `release_lock_signal: trio.Event` if no '
|
|
||||||
# 'trace func provided!'
|
|
||||||
# )
|
|
||||||
# print(f"{actor.uid} ENTERING WAIT")
|
|
||||||
# with trio.CancelScope(shield=True):
|
|
||||||
# await release_lock_signal.wait()
|
|
||||||
|
|
||||||
# else:
|
|
||||||
# block here one (at the appropriate frame *up*) where
|
|
||||||
# ``breakpoint()`` was awaited and begin handling stdio.
|
|
||||||
log.debug('Entering sync world of the `pdb` REPL..')
|
|
||||||
try:
|
|
||||||
debug_func(
|
|
||||||
actor,
|
|
||||||
pdb,
|
|
||||||
extra_frames_up_when_async=2,
|
|
||||||
shield=shield,
|
|
||||||
)
|
|
||||||
except BaseException:
|
|
||||||
log.exception(
|
|
||||||
'Failed to invoke internal `debug_func = '
|
|
||||||
f'{debug_func.func.__name__}`\n'
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
except bdb.BdbQuit:
|
|
||||||
Lock.release()
|
|
||||||
raise
|
|
||||||
|
|
||||||
except BaseException:
|
|
||||||
log.exception(
|
|
||||||
'Failed to engage debugger via `_pause()` ??\n'
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# XXX: apparently we can't do this without showing this frame
|
|
||||||
# in the backtrace on first entry to the REPL? Seems like an odd
|
|
||||||
# behaviour that should have been fixed by now. This is also why
|
|
||||||
# we scrapped all the @cm approaches that were tried previously.
|
|
||||||
# finally:
|
|
||||||
# __tracebackhide__ = True
|
|
||||||
# # frame = sys._getframe()
|
|
||||||
# # last_f = frame.f_back
|
|
||||||
# # last_f.f_globals['__tracebackhide__'] = True
|
|
||||||
# # signal.signal = pdbp.hideframe(signal.signal)
|
|
||||||
|
|
||||||
|
|
||||||
async def pause(
|
|
||||||
|
|
||||||
debug_func: Callable = _set_trace,
|
|
||||||
release_lock_signal: trio.Event | None = None,
|
|
||||||
|
|
||||||
# TODO: allow caller to pause despite task cancellation,
|
|
||||||
# exactly the same as wrapping with:
|
|
||||||
# with CancelScope(shield=True):
|
|
||||||
# await pause()
|
|
||||||
# => the REMAINING ISSUE is that the scope's .__exit__() frame
|
|
||||||
# is always show in the debugger on entry.. and there seems to
|
|
||||||
# be no way to override it?..
|
|
||||||
# shield: bool = False,
|
|
||||||
|
|
||||||
shield: bool = False,
|
|
||||||
task_status: TaskStatus[trio.Event] = trio.TASK_STATUS_IGNORED
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
A pause point (more commonly known as a "breakpoint") interrupt
|
|
||||||
instruction for engaging a blocking debugger instance to
|
|
||||||
conduct manual console-based-REPL-interaction from within
|
|
||||||
`tractor`'s async runtime, normally from some single-threaded
|
|
||||||
and currently executing actor-hosted-`trio`-task in some
|
|
||||||
(remote) process.
|
|
||||||
|
|
||||||
NOTE: we use the semantics "pause" since it better encompasses
|
|
||||||
the entirety of the necessary global-runtime-state-mutation any
|
|
||||||
actor-task must access and lock in order to get full isolated
|
|
||||||
control over the process tree's root TTY:
|
|
||||||
https://en.wikipedia.org/wiki/Breakpoint
|
|
||||||
|
|
||||||
'''
|
|
||||||
__tracebackhide__: bool = True
|
|
||||||
|
|
||||||
if shield:
|
|
||||||
# NOTE XXX: even hard coding this inside the `class CancelScope:`
|
|
||||||
# doesn't seem to work for me!?
|
|
||||||
# ^ XXX ^
|
|
||||||
|
|
||||||
# def _exit(self, *args, **kwargs):
|
|
||||||
# __tracebackhide__: bool = True
|
|
||||||
# super().__exit__(*args, **kwargs)
|
|
||||||
|
|
||||||
trio.CancelScope.__enter__.__tracebackhide__ = True
|
|
||||||
trio.CancelScope.__exit__.__tracebackhide__ = True
|
|
||||||
|
|
||||||
# import types
|
|
||||||
# with trio.CancelScope(shield=shield) as cs:
|
|
||||||
# cs.__exit__ = types.MethodType(_exit, cs)
|
|
||||||
# cs.__exit__.__tracebackhide__ = True
|
|
||||||
|
|
||||||
with trio.CancelScope(shield=shield) as cs:
|
|
||||||
# setattr(cs.__exit__.__func__, '__tracebackhide__', True)
|
|
||||||
# setattr(cs.__enter__.__func__, '__tracebackhide__', True)
|
|
||||||
|
|
||||||
# NOTE: so the caller can always cancel even if shielded
|
|
||||||
task_status.started(cs)
|
|
||||||
return await _pause(
|
|
||||||
debug_func=debug_func,
|
|
||||||
release_lock_signal=release_lock_signal,
|
|
||||||
shield=True,
|
|
||||||
task_status=task_status,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return await _pause(
|
|
||||||
debug_func=debug_func,
|
|
||||||
release_lock_signal=release_lock_signal,
|
|
||||||
shield=False,
|
|
||||||
task_status=task_status,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: allow pausing from sync code.
|
|
||||||
# normally by remapping python's builtin breakpoint() hook to this
|
|
||||||
# runtime aware version which takes care of all .
|
|
||||||
def pause_from_sync() -> None:
|
def pause_from_sync() -> None:
|
||||||
print("ENTER SYNC PAUSE")
|
print("ENTER SYNC PAUSE")
|
||||||
actor: tractor.Actor = current_actor(
|
|
||||||
err_on_no_runtime=False,
|
|
||||||
)
|
|
||||||
if actor:
|
|
||||||
try:
|
try:
|
||||||
import greenback
|
import greenback
|
||||||
# __tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
|
|
||||||
|
|
||||||
|
actor: tractor.Actor = tractor.current_actor()
|
||||||
# task_can_release_tty_lock = trio.Event()
|
# task_can_release_tty_lock = trio.Event()
|
||||||
|
|
||||||
# spawn bg task which will lock out the TTY, we poll
|
# spawn bg task which will lock out the TTY, we poll
|
||||||
|
@ -961,11 +818,8 @@ def pause_from_sync() -> None:
|
||||||
# release_lock_signal=task_can_release_tty_lock,
|
# release_lock_signal=task_can_release_tty_lock,
|
||||||
))
|
))
|
||||||
)
|
)
|
||||||
|
|
||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError:
|
||||||
log.warning('NO GREENBACK FOUND')
|
log.warning('NO GREENBACK FOUND')
|
||||||
else:
|
|
||||||
log.warning('Not inside actor-runtime')
|
|
||||||
|
|
||||||
db, undo_sigint = mk_mpdb()
|
db, undo_sigint = mk_mpdb()
|
||||||
Lock.local_task_in_debug = 'sync'
|
Lock.local_task_in_debug = 'sync'
|
||||||
|
@ -1000,7 +854,11 @@ def pause_from_sync() -> None:
|
||||||
# using the "pause" semantics instead since
|
# using the "pause" semantics instead since
|
||||||
# that better covers actually somewhat "pausing the runtime"
|
# that better covers actually somewhat "pausing the runtime"
|
||||||
# for this particular paralell task to do debugging B)
|
# for this particular paralell task to do debugging B)
|
||||||
# pp = pause # short-hand for "pause point"
|
pause = partial(
|
||||||
|
_pause,
|
||||||
|
_set_trace,
|
||||||
|
)
|
||||||
|
pp = pause # short-hand for "pause point"
|
||||||
|
|
||||||
|
|
||||||
async def breakpoint(**kwargs):
|
async def breakpoint(**kwargs):
|
||||||
|
@ -1011,18 +869,9 @@ async def breakpoint(**kwargs):
|
||||||
await pause(**kwargs)
|
await pause(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
_crash_msg: str = (
|
|
||||||
'Attaching to pdb REPL in crashed actor'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _post_mortem(
|
def _post_mortem(
|
||||||
actor: tractor.Actor,
|
actor: tractor.Actor,
|
||||||
pdb: MultiActorPdb,
|
pdb: MultiActorPdb,
|
||||||
shield: bool = False,
|
|
||||||
|
|
||||||
# only for compat with `._set_trace()`..
|
|
||||||
extra_frames_up_when_async=0,
|
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -1030,28 +879,20 @@ def _post_mortem(
|
||||||
debugger instance.
|
debugger instance.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
# TODO: print the actor supervion tree up to the root
|
log.pdb(f"\nAttaching to pdb in crashed actor: {actor.uid}\n")
|
||||||
# here! Bo
|
|
||||||
log.pdb(
|
|
||||||
f'{_crash_msg}\n'
|
|
||||||
'|\n'
|
|
||||||
f'|_ {actor.uid}\n'
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: only replacing this to add the
|
# TODO: you need ``pdbpp`` master (at least this commit
|
||||||
# `end=''` to the print XD
|
# https://github.com/pdbpp/pdbpp/commit/b757794857f98d53e3ebbe70879663d7d843a6c2)
|
||||||
# pdbp.xpm(Pdb=lambda: pdb)
|
# to fix this and avoid the hang it causes. See issue:
|
||||||
info = sys.exc_info()
|
# https://github.com/pdbpp/pdbpp/issues/480
|
||||||
print(traceback.format_exc(), end='')
|
# TODO: help with a 3.10+ major release if/when it arrives.
|
||||||
pdbp.post_mortem(
|
|
||||||
t=info[2],
|
pdbp.xpm(Pdb=lambda: pdb)
|
||||||
Pdb=lambda: pdb,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
post_mortem = partial(
|
post_mortem = partial(
|
||||||
pause,
|
_pause,
|
||||||
debug_func=_post_mortem,
|
_post_mortem,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1092,10 +933,9 @@ async def acquire_debug_lock(
|
||||||
'''
|
'''
|
||||||
Grab root's debug lock on entry, release on exit.
|
Grab root's debug lock on entry, release on exit.
|
||||||
|
|
||||||
This helper is for actor's who don't actually need to acquired
|
This helper is for actor's who don't actually need
|
||||||
the debugger but want to wait until the lock is free in the
|
to acquired the debugger but want to wait until the
|
||||||
process-tree root such that they don't clobber an ongoing pdb
|
lock is free in the process-tree root.
|
||||||
REPL session in some peer or child!
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
if not debug_mode():
|
if not debug_mode():
|
||||||
|
@ -1116,18 +956,14 @@ async def maybe_wait_for_debugger(
|
||||||
poll_delay: float = 0.1,
|
poll_delay: float = 0.1,
|
||||||
child_in_debug: bool = False,
|
child_in_debug: bool = False,
|
||||||
|
|
||||||
header_msg: str = '',
|
) -> None:
|
||||||
|
|
||||||
) -> bool: # was locked and we polled?
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not debug_mode()
|
not debug_mode()
|
||||||
and not child_in_debug
|
and not child_in_debug
|
||||||
):
|
):
|
||||||
return False
|
return
|
||||||
|
|
||||||
|
|
||||||
msg: str = header_msg
|
|
||||||
if (
|
if (
|
||||||
is_root_process()
|
is_root_process()
|
||||||
):
|
):
|
||||||
|
@ -1137,147 +973,41 @@ async def maybe_wait_for_debugger(
|
||||||
# will make the pdb repl unusable.
|
# will make the pdb repl unusable.
|
||||||
# Instead try to wait for pdb to be released before
|
# Instead try to wait for pdb to be released before
|
||||||
# tearing down.
|
# tearing down.
|
||||||
in_debug: tuple[str, str]|None = Lock.global_actor_in_debug
|
sub_in_debug: tuple[str, str] | None = None
|
||||||
debug_complete: trio.Event|None = Lock.no_remote_has_tty
|
|
||||||
|
|
||||||
if in_debug == current_actor().uid:
|
for _ in range(poll_steps):
|
||||||
log.debug(
|
|
||||||
msg
|
|
||||||
+
|
|
||||||
'Root already owns the TTY LOCK'
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
elif in_debug:
|
if Lock.global_actor_in_debug:
|
||||||
msg += (
|
sub_in_debug = tuple(Lock.global_actor_in_debug)
|
||||||
f'Debug `Lock` in use by subactor: {in_debug}\n'
|
|
||||||
)
|
|
||||||
# TODO: could this make things more deterministic?
|
|
||||||
# wait to see if a sub-actor task will be
|
|
||||||
# scheduled and grab the tty lock on the next
|
|
||||||
# tick?
|
|
||||||
# XXX => but it doesn't seem to work..
|
|
||||||
# await trio.testing.wait_all_tasks_blocked(cushion=0)
|
|
||||||
else:
|
|
||||||
log.debug(
|
|
||||||
msg
|
|
||||||
+
|
|
||||||
'Root immediately acquired debug TTY LOCK'
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
for istep in range(poll_steps):
|
log.debug('Root polling for debug')
|
||||||
if (
|
|
||||||
debug_complete
|
|
||||||
and not debug_complete.is_set()
|
|
||||||
and in_debug is not None
|
|
||||||
):
|
|
||||||
log.pdb(
|
|
||||||
msg
|
|
||||||
+
|
|
||||||
'Root is waiting on tty lock to release..\n'
|
|
||||||
)
|
|
||||||
with trio.CancelScope(shield=True):
|
|
||||||
await debug_complete.wait()
|
|
||||||
log.pdb(
|
|
||||||
f'Child subactor released debug lock\n'
|
|
||||||
f'|_{in_debug}\n'
|
|
||||||
)
|
|
||||||
|
|
||||||
# is no subactor locking debugger currently?
|
|
||||||
if (
|
|
||||||
in_debug is None
|
|
||||||
and (
|
|
||||||
debug_complete is None
|
|
||||||
or debug_complete.is_set()
|
|
||||||
)
|
|
||||||
):
|
|
||||||
log.pdb(
|
|
||||||
msg
|
|
||||||
+
|
|
||||||
'Root acquired tty lock!'
|
|
||||||
)
|
|
||||||
break
|
|
||||||
|
|
||||||
else:
|
|
||||||
# TODO: don't need this right?
|
|
||||||
# await trio.lowlevel.checkpoint()
|
|
||||||
|
|
||||||
log.debug(
|
|
||||||
'Root polling for debug:\n'
|
|
||||||
f'poll step: {istep}\n'
|
|
||||||
f'poll delya: {poll_delay}'
|
|
||||||
)
|
|
||||||
with trio.CancelScope(shield=True):
|
with trio.CancelScope(shield=True):
|
||||||
await trio.sleep(poll_delay)
|
await trio.sleep(poll_delay)
|
||||||
continue
|
|
||||||
|
|
||||||
# fallthrough on failure to acquire..
|
# TODO: could this make things more deterministic? wait
|
||||||
# else:
|
# to see if a sub-actor task will be scheduled and grab
|
||||||
# raise RuntimeError(
|
# the tty lock on the next tick?
|
||||||
# msg
|
# XXX: doesn't seem to work
|
||||||
# +
|
# await trio.testing.wait_all_tasks_blocked(cushion=0)
|
||||||
# 'Root actor failed to acquire debug lock?'
|
|
||||||
# )
|
|
||||||
return True
|
|
||||||
|
|
||||||
# else:
|
debug_complete = Lock.no_remote_has_tty
|
||||||
# # TODO: non-root call for #320?
|
if (
|
||||||
# this_uid: tuple[str, str] = current_actor().uid
|
debug_complete
|
||||||
# async with acquire_debug_lock(
|
and sub_in_debug is not None
|
||||||
# subactor_uid=this_uid,
|
and not debug_complete.is_set()
|
||||||
# ):
|
|
||||||
# pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
# TODO: better naming and what additionals?
|
|
||||||
# - [ ] optional runtime plugging?
|
|
||||||
# - [ ] detection for sync vs. async code?
|
|
||||||
# - [ ] specialized REPL entry when in distributed mode?
|
|
||||||
# - [x] allow ignoring kbi Bo
|
|
||||||
@cm
|
|
||||||
def open_crash_handler(
|
|
||||||
catch: set[BaseException] = {
|
|
||||||
Exception,
|
|
||||||
BaseException,
|
|
||||||
},
|
|
||||||
ignore: set[BaseException] = {
|
|
||||||
KeyboardInterrupt,
|
|
||||||
},
|
|
||||||
):
|
):
|
||||||
'''
|
log.pdb(
|
||||||
Generic "post mortem" crash handler using `pdbp` REPL debugger.
|
'Root has errored but pdb is in use by '
|
||||||
|
f'child {sub_in_debug}\n'
|
||||||
|
'Waiting on tty lock to release..'
|
||||||
|
)
|
||||||
|
|
||||||
We expose this as a CLI framework addon to both `click` and
|
await debug_complete.wait()
|
||||||
`typer` users so they can quickly wrap cmd endpoints which get
|
|
||||||
automatically wrapped to use the runtime's `debug_mode: bool`
|
|
||||||
AND `pdbp.pm()` around any code that is PRE-runtime entry
|
|
||||||
- any sync code which runs BEFORE the main call to
|
|
||||||
`trio.run()`.
|
|
||||||
|
|
||||||
'''
|
await trio.sleep(poll_delay)
|
||||||
try:
|
continue
|
||||||
yield
|
else:
|
||||||
except tuple(catch) as err:
|
log.debug(
|
||||||
|
'Root acquired TTY LOCK'
|
||||||
if type(err) not in ignore:
|
)
|
||||||
pdbp.xpm()
|
|
||||||
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
@cm
|
|
||||||
def maybe_open_crash_handler(pdb: bool = False):
|
|
||||||
'''
|
|
||||||
Same as `open_crash_handler()` but with bool input flag
|
|
||||||
to allow conditional handling.
|
|
||||||
|
|
||||||
Normally this is used with CLI endpoints such that if the --pdb
|
|
||||||
flag is passed the pdb REPL is engaed on any crashes B)
|
|
||||||
'''
|
|
||||||
rtctx = nullcontext
|
|
||||||
if pdb:
|
|
||||||
rtctx = open_crash_handler
|
|
||||||
|
|
||||||
with rtctx():
|
|
||||||
yield
|
|
|
@ -37,7 +37,7 @@ from ._runtime import (
|
||||||
# Arbiter as Registry,
|
# Arbiter as Registry,
|
||||||
async_main,
|
async_main,
|
||||||
)
|
)
|
||||||
from .devx import _debug
|
from . import _debug
|
||||||
from . import _spawn
|
from . import _spawn
|
||||||
from . import _state
|
from . import _state
|
||||||
from . import log
|
from . import log
|
||||||
|
@ -99,7 +99,7 @@ async def open_root_actor(
|
||||||
# https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
|
# https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
|
||||||
builtin_bp_handler = sys.breakpointhook
|
builtin_bp_handler = sys.breakpointhook
|
||||||
orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None)
|
orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None)
|
||||||
os.environ['PYTHONBREAKPOINT'] = 'tractor.devx._debug.pause_from_sync'
|
os.environ['PYTHONBREAKPOINT'] = 'tractor._debug.pause_from_sync'
|
||||||
|
|
||||||
# attempt to retreive ``trio``'s sigint handler and stash it
|
# attempt to retreive ``trio``'s sigint handler and stash it
|
||||||
# on our debugger lock state.
|
# on our debugger lock state.
|
||||||
|
@ -146,7 +146,7 @@ async def open_root_actor(
|
||||||
|
|
||||||
# expose internal debug module to every actor allowing
|
# expose internal debug module to every actor allowing
|
||||||
# for use of ``await tractor.breakpoint()``
|
# for use of ``await tractor.breakpoint()``
|
||||||
enable_modules.append('tractor.devx._debug')
|
enable_modules.append('tractor._debug')
|
||||||
|
|
||||||
# if debug mode get's enabled *at least* use that level of
|
# if debug mode get's enabled *at least* use that level of
|
||||||
# logging for some informative console prompts.
|
# logging for some informative console prompts.
|
||||||
|
|
|
@ -78,7 +78,7 @@ from ._exceptions import (
|
||||||
ContextCancelled,
|
ContextCancelled,
|
||||||
TransportClosed,
|
TransportClosed,
|
||||||
)
|
)
|
||||||
from .devx import _debug
|
from . import _debug
|
||||||
from ._discovery import get_registry
|
from ._discovery import get_registry
|
||||||
from ._portal import Portal
|
from ._portal import Portal
|
||||||
from . import _state
|
from . import _state
|
||||||
|
@ -197,7 +197,7 @@ class Actor:
|
||||||
self._parent_main_data = _mp_fixup_main._mp_figure_out_main()
|
self._parent_main_data = _mp_fixup_main._mp_figure_out_main()
|
||||||
|
|
||||||
# always include debugging tools module
|
# always include debugging tools module
|
||||||
enable_modules.append('tractor.devx._debug')
|
enable_modules.append('tractor._debug')
|
||||||
|
|
||||||
self.enable_modules: dict[str, str] = {}
|
self.enable_modules: dict[str, str] = {}
|
||||||
for name in enable_modules:
|
for name in enable_modules:
|
||||||
|
|
|
@ -34,7 +34,7 @@ from typing import (
|
||||||
import trio
|
import trio
|
||||||
from trio import TaskStatus
|
from trio import TaskStatus
|
||||||
|
|
||||||
from .devx._debug import (
|
from ._debug import (
|
||||||
maybe_wait_for_debugger,
|
maybe_wait_for_debugger,
|
||||||
acquire_debug_lock,
|
acquire_debug_lock,
|
||||||
)
|
)
|
||||||
|
@ -554,14 +554,13 @@ async def trio_proc(
|
||||||
with trio.move_on_after(0.5):
|
with trio.move_on_after(0.5):
|
||||||
await proc.wait()
|
await proc.wait()
|
||||||
|
|
||||||
|
log.pdb(
|
||||||
|
'Delaying subproc reaper while debugger locked..'
|
||||||
|
)
|
||||||
await maybe_wait_for_debugger(
|
await maybe_wait_for_debugger(
|
||||||
child_in_debug=_runtime_vars.get(
|
child_in_debug=_runtime_vars.get(
|
||||||
'_debug_mode', False
|
'_debug_mode', False
|
||||||
),
|
),
|
||||||
header_msg=(
|
|
||||||
'Delaying subproc reaper while debugger locked..\n'
|
|
||||||
),
|
|
||||||
|
|
||||||
# TODO: need a diff value then default?
|
# TODO: need a diff value then default?
|
||||||
# poll_steps=9999999,
|
# poll_steps=9999999,
|
||||||
)
|
)
|
||||||
|
|
|
@ -28,7 +28,7 @@ import warnings
|
||||||
|
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
from .devx._debug import maybe_wait_for_debugger
|
from ._debug import maybe_wait_for_debugger
|
||||||
from ._state import current_actor, is_main_process
|
from ._state import current_actor, is_main_process
|
||||||
from .log import get_logger, get_loglevel
|
from .log import get_logger, get_loglevel
|
||||||
from ._runtime import Actor
|
from ._runtime import Actor
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
# tractor: structured concurrent "actors".
|
|
||||||
# Copyright 2018-eternity Tyler Goodlet.
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Runtime "developer experience" utils and addons to aid our
|
|
||||||
(advanced) users and core devs in building distributed applications
|
|
||||||
and working with/on the actor runtime.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from ._debug import (
|
|
||||||
maybe_wait_for_debugger as maybe_wait_for_debugger,
|
|
||||||
acquire_debug_lock as acquire_debug_lock,
|
|
||||||
breakpoint as breakpoint,
|
|
||||||
pause as pause,
|
|
||||||
pause_from_sync as pause_from_sync,
|
|
||||||
shield_sigint_handler as shield_sigint_handler,
|
|
||||||
MultiActorPdb as MultiActorPdb,
|
|
||||||
open_crash_handler as open_crash_handler,
|
|
||||||
maybe_open_crash_handler as maybe_open_crash_handler,
|
|
||||||
post_mortem as post_mortem,
|
|
||||||
)
|
|
||||||
from ._stackscope import (
|
|
||||||
enable_stack_on_sig as enable_stack_on_sig,
|
|
||||||
)
|
|
|
@ -1,84 +0,0 @@
|
||||||
# tractor: structured concurrent "actors".
|
|
||||||
# Copyright eternity Tyler Goodlet.
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
'''
|
|
||||||
The fundamental cross process SC abstraction: an inter-actor,
|
|
||||||
cancel-scope linked task "context".
|
|
||||||
|
|
||||||
A ``Context`` is very similar to the ``trio.Nursery.cancel_scope`` built
|
|
||||||
into each ``trio.Nursery`` except it links the lifetimes of memory space
|
|
||||||
disjoint, parallel executing tasks in separate actors.
|
|
||||||
|
|
||||||
'''
|
|
||||||
from signal import (
|
|
||||||
signal,
|
|
||||||
SIGUSR1,
|
|
||||||
)
|
|
||||||
|
|
||||||
import trio
|
|
||||||
|
|
||||||
@trio.lowlevel.disable_ki_protection
|
|
||||||
def dump_task_tree() -> None:
|
|
||||||
import stackscope
|
|
||||||
from tractor.log import get_console_log
|
|
||||||
|
|
||||||
tree_str: str = str(
|
|
||||||
stackscope.extract(
|
|
||||||
trio.lowlevel.current_root_task(),
|
|
||||||
recurse_child_tasks=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
log = get_console_log('cancel')
|
|
||||||
log.pdb(
|
|
||||||
f'Dumping `stackscope` tree:\n\n'
|
|
||||||
f'{tree_str}\n'
|
|
||||||
)
|
|
||||||
# import logging
|
|
||||||
# try:
|
|
||||||
# with open("/dev/tty", "w") as tty:
|
|
||||||
# tty.write(tree_str)
|
|
||||||
# except BaseException:
|
|
||||||
# logging.getLogger(
|
|
||||||
# "task_tree"
|
|
||||||
# ).exception("Error printing task tree")
|
|
||||||
|
|
||||||
|
|
||||||
def signal_handler(sig: int, frame: object) -> None:
|
|
||||||
import traceback
|
|
||||||
try:
|
|
||||||
trio.lowlevel.current_trio_token(
|
|
||||||
).run_sync_soon(dump_task_tree)
|
|
||||||
except RuntimeError:
|
|
||||||
# not in async context -- print a normal traceback
|
|
||||||
traceback.print_stack()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def enable_stack_on_sig(
|
|
||||||
sig: int = SIGUSR1
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Enable `stackscope` tracing on reception of a signal; by
|
|
||||||
default this is SIGUSR1.
|
|
||||||
|
|
||||||
'''
|
|
||||||
signal(
|
|
||||||
sig,
|
|
||||||
signal_handler,
|
|
||||||
)
|
|
||||||
# NOTE: not the above can be triggered from
|
|
||||||
# a (xonsh) shell using:
|
|
||||||
# kill -SIGUSR1 @$(pgrep -f '<cmd>')
|
|
|
@ -1,129 +0,0 @@
|
||||||
# tractor: structured concurrent "actors".
|
|
||||||
# Copyright 2018-eternity Tyler Goodlet.
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
CLI framework extensions for hacking on the actor runtime.
|
|
||||||
|
|
||||||
Currently popular frameworks supported are:
|
|
||||||
|
|
||||||
- `typer` via the `@callback` API
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import annotations
|
|
||||||
from typing import (
|
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
)
|
|
||||||
from typing_extensions import Annotated
|
|
||||||
|
|
||||||
import typer
|
|
||||||
|
|
||||||
|
|
||||||
_runtime_vars: dict[str, Any] = {}
|
|
||||||
|
|
||||||
|
|
||||||
def load_runtime_vars(
|
|
||||||
ctx: typer.Context,
|
|
||||||
callback: Callable,
|
|
||||||
pdb: bool = False, # --pdb
|
|
||||||
ll: Annotated[
|
|
||||||
str,
|
|
||||||
typer.Option(
|
|
||||||
'--loglevel',
|
|
||||||
'-l',
|
|
||||||
help='BigD logging level',
|
|
||||||
),
|
|
||||||
] = 'cancel', # -l info
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Maybe engage crash handling with `pdbp` when code inside
|
|
||||||
a `typer` CLI endpoint cmd raises.
|
|
||||||
|
|
||||||
To use this callback simply take your `app = typer.Typer()` instance
|
|
||||||
and decorate this function with it like so:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
from tractor.devx import cli
|
|
||||||
|
|
||||||
app = typer.Typer()
|
|
||||||
|
|
||||||
# manual decoration to hook into `click`'s context system!
|
|
||||||
cli.load_runtime_vars = app.callback(
|
|
||||||
invoke_without_command=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
And then you can use the now augmented `click` CLI context as so,
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@app.command(
|
|
||||||
context_settings={
|
|
||||||
"allow_extra_args": True,
|
|
||||||
"ignore_unknown_options": True,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
def my_cli_cmd(
|
|
||||||
ctx: typer.Context,
|
|
||||||
):
|
|
||||||
rtvars: dict = ctx.runtime_vars
|
|
||||||
pdb: bool = rtvars['pdb']
|
|
||||||
|
|
||||||
with tractor.devx.cli.maybe_open_crash_handler(pdb=pdb):
|
|
||||||
trio.run(
|
|
||||||
partial(
|
|
||||||
my_tractor_main_task_func,
|
|
||||||
debug_mode=pdb,
|
|
||||||
loglevel=rtvars['ll'],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
which will enable log level and debug mode globally for the entire
|
|
||||||
`tractor` + `trio` runtime thereafter!
|
|
||||||
|
|
||||||
Bo
|
|
||||||
|
|
||||||
'''
|
|
||||||
global _runtime_vars
|
|
||||||
_runtime_vars |= {
|
|
||||||
'pdb': pdb,
|
|
||||||
'll': ll,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.runtime_vars: dict[str, Any] = _runtime_vars
|
|
||||||
print(
|
|
||||||
f'`typer` sub-cmd: {ctx.invoked_subcommand}\n'
|
|
||||||
f'`tractor` runtime vars: {_runtime_vars}'
|
|
||||||
)
|
|
||||||
|
|
||||||
# XXX NOTE XXX: hackzone.. if no sub-cmd is specified (the
|
|
||||||
# default if the user just invokes `bigd`) then we simply
|
|
||||||
# invoke the sole `_bigd()` cmd passing in the "parent"
|
|
||||||
# typer.Context directly to that call since we're treating it
|
|
||||||
# as a "non sub-command" or wtv..
|
|
||||||
# TODO: ideally typer would have some kinda built-in way to get
|
|
||||||
# this behaviour without having to construct and manually
|
|
||||||
# invoke our own cmd..
|
|
||||||
if (
|
|
||||||
ctx.invoked_subcommand is None
|
|
||||||
or ctx.invoked_subcommand == callback.__name__
|
|
||||||
):
|
|
||||||
cmd: typer.core.TyperCommand = typer.core.TyperCommand(
|
|
||||||
name='bigd',
|
|
||||||
callback=callback,
|
|
||||||
)
|
|
||||||
ctx.params = {'ctx': ctx}
|
|
||||||
cmd.invoke(ctx)
|
|
Loading…
Reference in New Issue