Compare commits

...

4 Commits

Author SHA1 Message Date
Tyler Goodlet 72b4dc1461 Provision for infected-`asyncio` debug mode support
It's **almost** there, we're just missing the final translation code to
get from an `asyncio` side task to be able to call
`.devx._debug..wait_for_parent_stdin_hijack()` to do root actor TTY
locking. Then we just need to ensure internals also do the right thing
with `greenback()` for equivalent sync `breakpoint()` style pause
points.

Since i'm deferring this until later, tossing in some xfail tests to
`test_infected_asyncio` with TODOs for the needed implementation as well
as eventual test org.

By "provision" it means we add:
- `greenback` init block to `_run_asyncio_task()` when debug mode is
  enabled (but which will currently rte when `asyncio` is detected)
  using `.bestow_portal()` around the `asyncio.Task`.
- a call to `_debug.maybe_init_greenback()` in the `run_as_asyncio_guest()`
  guest-mode entry point.
- as part of `._debug.Lock.is_main_trio_thread()` whenever the async-lib
  is not 'trio' error lock the backend name (which is obvi `'asyncio'`
  in this use case).
2024-03-25 16:09:32 -04:00
Tyler Goodlet 90bfdaf58c Drop extra newline from log msg 2024-03-25 15:03:33 -04:00
Tyler Goodlet 507cd96904 Change all `| None` -> `|None` in `._runtime` 2024-03-25 14:15:36 -04:00
Tyler Goodlet 2588e54867 Add todo-notes for hiding `@acm` frames
In the particular case of the `Portal.open_context().__aexit__()` frame,
due to usage of `contextlib.asynccontextmanager`, we can't easily hook
into monkeypatching a `__tracebackhide__` set nor catch-n-reraise around
the block exit without defining our own `.__aexit__()` impl. Thus, it's
prolly most sane to do something with an override of
`contextlib._AsyncGeneratorContextManager` or the public exposed
`AsyncContextDecorator` (which uses the former internally right?).

Also fixup some old `._invoke` mod paths in comments and just show
`str(eoc)` in `.open_stream().__aexit__()` terminated-by-EoC log msg
since the `repr()` form won't pprint the IPC msg nicely..
2024-03-24 16:49:07 -04:00
7 changed files with 137 additions and 44 deletions

View File

@ -77,7 +77,9 @@ async def main(
) -> None:
async with tractor.open_nursery() as n:
async with tractor.open_nursery(
# debug_mode=True,
) as n:
p = await n.start_actor(
'aio_daemon',

View File

@ -601,7 +601,8 @@ def test_echoserver_detailed_mechanics(
pass
else:
pytest.fail(
"stream wasn't stopped after sentinel?!")
'stream not stopped after sentinel ?!'
)
# TODO: the case where this blocks and
# is cancelled by kbi or out of task cancellation
@ -613,3 +614,37 @@ def test_echoserver_detailed_mechanics(
else:
trio.run(main)
# TODO: debug_mode tests once we get support for `asyncio`!
#
# -[ ] need tests to wrap both scripts:
# - [ ] infected_asyncio_echo_server.py
# - [ ] debugging/asyncio_bp.py
# -[ ] consider moving ^ (some of) these ^ to `test_debugger`?
#
# -[ ] missing impl outstanding includes:
# - [x] for sync pauses we need to ensure we open yet another
# `greenback` portal in the asyncio task
# => completed using `.bestow_portal(task)` inside
# `.to_asyncio._run_asyncio_task()` right?
# -[ ] translation func to get from `asyncio` task calling to
# `._debug.wait_for_parent_stdin_hijack()` which does root
# call to do TTY locking.
#
def test_sync_breakpoint():
'''
Verify we can do sync-func/code breakpointing using the
`breakpoint()` builtin inside infected mode actors.
'''
pytest.xfail('This support is not implemented yet!')
def test_debug_mode_crash_handling():
'''
Verify mult-actor crash handling works with a combo of infected-`asyncio`-mode
and normal `trio` actors despite nested process trees.
'''
pytest.xfail('This support is not implemented yet!')

View File

@ -351,7 +351,7 @@ class Context:
by the runtime in 2 ways:
- by entering ``Portal.open_context()`` which is the primary
public API for any "caller" task or,
- by the RPC machinery's `._runtime._invoke()` as a `ctx` arg
- by the RPC machinery's `._rpc._invoke()` as a `ctx` arg
to a remotely scheduled "callee" function.
AND is always constructed using the below ``mk_context()``.
@ -361,10 +361,10 @@ class Context:
`trio.Task`s. Contexts are allocated on each side of any task
RPC-linked msg dialog, i.e. for every request to a remote
actor from a `Portal`. On the "callee" side a context is
always allocated inside ``._runtime._invoke()``.
always allocated inside ``._rpc._invoke()``.
# TODO: more detailed writeup on cancellation, error and
# streaming semantics..
TODO: more detailed writeup on cancellation, error and
streaming semantics..
A context can be cancelled and (possibly eventually restarted) from
either side of the underlying IPC channel, it can also open task
@ -1206,7 +1206,9 @@ class Context:
# await pause()
log.warning(
'Stream was terminated by EoC\n\n'
f'{repr(eoc)}\n'
# NOTE: won't show the error <Type> but
# does show txt followed by IPC msg.
f'{str(eoc)}\n'
)
finally:
@ -1303,7 +1305,7 @@ class Context:
# `._cancel_called == True`.
not raise_overrun_from_self
and isinstance(remote_error, RemoteActorError)
and remote_error.msgdata['type_str'] == 'StreamOverrun'
and remote_error.msgdata['boxed_type_str'] == 'StreamOverrun'
and tuple(remote_error.msgdata['sender']) == our_uid
):
# NOTE: we set the local scope error to any "self
@ -1880,6 +1882,19 @@ class Context:
return False
# TODO: exception tb masking by using a manual
# `.__aexit__()`/.__aenter__()` pair on a type?
# => currently this is one of the few places we can't easily
# mask errors - on the exit side of a `Portal.open_context()`..
# there's # => currently this is one of the few places we can't
# there's 2 ways to approach it:
# - manually write an @acm type as per above
# - use `contextlib.AsyncContextDecorator` to override the default
# impl to suppress traceback frames:
# * https://docs.python.org/3/library/contextlib.html#contextlib.AsyncContextDecorator
# * https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
# - also we could just override directly the underlying
# `contextlib._AsyncGeneratorContextManager`?
@acm
async def open_context_from_portal(
portal: Portal,

View File

@ -140,16 +140,16 @@ class Actor:
msg_buffer_size: int = 2**6
# nursery placeholders filled in by `async_main()` after fork
_root_n: Nursery | None = None
_service_n: Nursery | None = None
_server_n: Nursery | None = None
_root_n: Nursery|None = None
_service_n: Nursery|None = None
_server_n: Nursery|None = None
# Information about `__main__` from parent
_parent_main_data: dict[str, str]
_parent_chan_cs: CancelScope | None = None
_parent_chan_cs: CancelScope|None = None
# syncs for setup/teardown sequences
_server_down: trio.Event | None = None
_server_down: trio.Event|None = None
# user toggled crash handling (including monkey-patched in
# `trio.open_nursery()` via `.trionics._supervisor` B)
@ -178,7 +178,7 @@ class Actor:
spawn_method: str|None = None,
# TODO: remove!
arbiter_addr: tuple[str, int] | None = None,
arbiter_addr: tuple[str, int]|None = None,
) -> None:
'''
@ -193,7 +193,7 @@ class Actor:
)
self._cancel_complete = trio.Event()
self._cancel_called_by_remote: tuple[str, tuple] | None = None
self._cancel_called_by_remote: tuple[str, tuple]|None = None
self._cancel_called: bool = False
# retreive and store parent `__main__` data which
@ -249,11 +249,11 @@ class Actor:
] = {}
self._listeners: list[trio.abc.Listener] = []
self._parent_chan: Channel | None = None
self._forkserver_info: tuple | None = None
self._parent_chan: Channel|None = None
self._forkserver_info: tuple|None = None
self._actoruid2nursery: dict[
tuple[str, str],
ActorNursery | None,
ActorNursery|None,
] = {} # type: ignore # noqa
# when provided, init the registry addresses property from
@ -779,7 +779,7 @@ class Actor:
#
# side: str|None = None,
msg_buffer_size: int | None = None,
msg_buffer_size: int|None = None,
allow_overruns: bool = False,
) -> Context:
@ -844,7 +844,7 @@ class Actor:
kwargs: dict,
# IPC channel config
msg_buffer_size: int | None = None,
msg_buffer_size: int|None = None,
allow_overruns: bool = False,
load_nsf: bool = False,
@ -918,11 +918,11 @@ class Actor:
async def _from_parent(
self,
parent_addr: tuple[str, int] | None,
parent_addr: tuple[str, int]|None,
) -> tuple[
Channel,
list[tuple[str, int]] | None,
list[tuple[str, int]]|None,
]:
'''
Bootstrap this local actor's runtime config from its parent by
@ -943,7 +943,7 @@ class Actor:
# Initial handshake: swap names.
await self._do_handshake(chan)
accept_addrs: list[tuple[str, int]] | None = None
accept_addrs: list[tuple[str, int]]|None = None
if self._spawn_method == "trio":
# Receive runtime state from our parent
parent_data: dict[str, Any]
@ -1007,7 +1007,7 @@ class Actor:
handler_nursery: Nursery,
*,
# (host, port) to bind for channel server
listen_sockaddrs: list[tuple[str, int]] | None = None,
listen_sockaddrs: list[tuple[str, int]]|None = None,
task_status: TaskStatus[Nursery] = trio.TASK_STATUS_IGNORED,
) -> None:
@ -1464,7 +1464,7 @@ class Actor:
async def async_main(
actor: Actor,
accept_addrs: tuple[str, int] | None = None,
accept_addrs: tuple[str, int]|None = None,
# XXX: currently ``parent_addr`` is only needed for the
# ``multiprocessing`` backend (which pickles state sent to
@ -1473,7 +1473,7 @@ async def async_main(
# change this to a simple ``is_subactor: bool`` which will
# be False when running as root actor and True when as
# a subactor.
parent_addr: tuple[str, int] | None = None,
parent_addr: tuple[str, int]|None = None,
task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED,
) -> None:
@ -1496,7 +1496,7 @@ async def async_main(
try:
# establish primary connection with immediate parent
actor._parent_chan: Channel | None = None
actor._parent_chan: Channel|None = None
if parent_addr is not None:
(
@ -1795,7 +1795,7 @@ class Arbiter(Actor):
self,
name: str,
) -> tuple[str, int] | None:
) -> tuple[str, int]|None:
for uid, sockaddr in self._registry.items():
if name in uid:

View File

@ -583,7 +583,7 @@ async def open_nursery(
finally:
msg: str = (
'Actor-nursery exited\n'
f'|_{an}\n\n'
f'|_{an}\n'
)
# shutdown runtime if it was started

View File

@ -187,13 +187,18 @@ class Lock:
`trio.to_thread.run_sync()`.
'''
return (
is_trio_main = (
# TODO: since this is private, @oremanj says
# we should just copy the impl for now..
trio._util.is_main_thread()
and
sniffio.current_async_library() == 'trio'
(async_lib := sniffio.current_async_library()) == 'trio'
)
if not is_trio_main:
log.warning(
f'Current async-lib detected by `sniffio`: {async_lib}\n'
)
return is_trio_main
# XXX apparently unreliable..see ^
# (
# threading.current_thread()
@ -1114,6 +1119,14 @@ def pause_from_sync(
'- `async with tractor.open_root_actor()`\n'
)
# NOTE: once supported, remove this AND the one
# inside `._pause()`!
if actor.is_infected_aio():
raise RuntimeError(
'`tractor.pause[_from_sync]()` not yet supported '
'for infected `asyncio` mode!'
)
# raises on not-found by default
greenback: ModuleType = maybe_import_greenback()
mdb: MultiActorPdb = mk_mpdb()

View File

@ -33,10 +33,14 @@ from typing import (
import trio
from outcome import Error
from .log import get_logger
from ._state import current_actor
from ._exceptions import AsyncioCancelled
from .trionics._broadcast import (
from tractor.log import get_logger
from tractor._state import (
current_actor,
debug_mode,
)
from tractor.devx import _debug
from tractor._exceptions import AsyncioCancelled
from tractor.trionics._broadcast import (
broadcast_receiver,
BroadcastReceiver,
)
@ -64,9 +68,9 @@ class LinkedTaskChannel(trio.abc.Channel):
_trio_exited: bool = False
# set after ``asyncio.create_task()``
_aio_task: asyncio.Task | None = None
_aio_err: BaseException | None = None
_broadcaster: BroadcastReceiver | None = None
_aio_task: asyncio.Task|None = None
_aio_err: BaseException|None = None
_broadcaster: BroadcastReceiver|None = None
async def aclose(self) -> None:
await self._from_aio.aclose()
@ -158,7 +162,9 @@ def _run_asyncio_task(
'''
__tracebackhide__ = True
if not current_actor().is_infected_aio():
raise RuntimeError("`infect_asyncio` mode is not enabled!?")
raise RuntimeError(
"`infect_asyncio` mode is not enabled!?"
)
# ITC (inter task comms), these channel/queue names are mostly from
# ``asyncio``'s perspective.
@ -187,7 +193,7 @@ def _run_asyncio_task(
cancel_scope = trio.CancelScope()
aio_task_complete = trio.Event()
aio_err: BaseException | None = None
aio_err: BaseException|None = None
chan = LinkedTaskChannel(
aio_q, # asyncio.Queue
@ -253,7 +259,7 @@ def _run_asyncio_task(
if not inspect.isawaitable(coro):
raise TypeError(f"No support for invoking {coro}")
task = asyncio.create_task(
task: asyncio.Task = asyncio.create_task(
wait_on_coro_final_result(
to_trio,
coro,
@ -262,6 +268,18 @@ def _run_asyncio_task(
)
chan._aio_task = task
# XXX TODO XXX get this actually workin.. XD
# maybe setup `greenback` for `asyncio`-side task REPLing
if (
debug_mode()
and
(greenback := _debug.maybe_import_greenback(
force_reload=True,
raise_not_found=False,
))
):
greenback.bestow_portal(task)
def cancel_trio(task: asyncio.Task) -> None:
'''
Cancel the calling ``trio`` task on error.
@ -269,7 +287,7 @@ def _run_asyncio_task(
'''
nonlocal chan
aio_err = chan._aio_err
task_err: BaseException | None = None
task_err: BaseException|None = None
# only to avoid ``asyncio`` complaining about uncaptured
# task exceptions
@ -349,11 +367,11 @@ async def translate_aio_errors(
'''
trio_task = trio.lowlevel.current_task()
aio_err: BaseException | None = None
aio_err: BaseException|None = None
# TODO: make thisi a channel method?
def maybe_raise_aio_err(
err: Exception | None = None
err: Exception|None = None
) -> None:
aio_err = chan._aio_err
if (
@ -531,6 +549,16 @@ def run_as_asyncio_guest(
loop = asyncio.get_running_loop()
trio_done_fut = asyncio.Future()
if debug_mode():
# XXX make it obvi we know this isn't supported yet!
log.error(
'Attempting to enter unsupported `greenback` init '
'from `asyncio` task..'
)
await _debug.maybe_init_greenback(
force_reload=True,
)
def trio_done_callback(main_outcome):
if isinstance(main_outcome, Error):