forked from goodboy/tractor
1
0
Fork 0

Compare commits

..

No commits in common. "aio_abandons" and "master" have entirely different histories.

90 changed files with 4574 additions and 23800 deletions

View File

@ -3,8 +3,8 @@
|gh_actions|
|docs|
``tractor`` is a `structured concurrent`_, (optionally
distributed_) multi-processing_ runtime built on trio_.
``tractor`` is a `structured concurrent`_, multi-processing_ runtime
built on trio_.
Fundamentally, ``tractor`` gives you parallelism via
``trio``-"*actors*": independent Python processes (aka
@ -17,20 +17,11 @@ protocol" constructed on top of multiple Pythons each running a ``trio``
scheduled runtime - a call to ``trio.run()``.
We believe the system adheres to the `3 axioms`_ of an "`actor model`_"
but likely **does not** look like what **you** probably *think* an "actor
model" looks like, and that's **intentional**.
but likely *does not* look like what *you* probably think an "actor
model" looks like, and that's *intentional*.
Where do i start!?
------------------
The first step to grok ``tractor`` is to get an intermediate
knowledge of ``trio`` and **structured concurrency** B)
Some great places to start are,
- the seminal `blog post`_
- obviously the `trio docs`_
- wikipedia's nascent SC_ page
- the fancy diagrams @ libdill-docs_
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
A great place to start is the `trio docs`_ and this `blog post`_.
Features
@ -602,7 +593,6 @@ matrix seems too hip, we're also mostly all in the the `trio gitter
channel`_!
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
.. _distributed: https://en.wikipedia.org/wiki/Distributed_computing
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
.. _trio: https://github.com/python-trio/trio
.. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements
@ -621,9 +611,8 @@ channel`_!
.. _trio docs: https://trio.readthedocs.io/en/latest/
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _SC: https://en.wikipedia.org/wiki/Structured_concurrency
.. _libdill-docs: https://sustrik.github.io/libdill/structured-concurrency.html
.. _structured chadcurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
.. _async generators: https://www.python.org/dev/peps/pep-0525/
.. _trio-parallel: https://github.com/richardsheridan/trio-parallel

View File

@ -6,59 +6,53 @@ been an outage) and we want to ensure that despite being in debug mode
actor tree will eventually be cancelled without leaving any zombies.
'''
from contextlib import asynccontextmanager as acm
from functools import partial
import trio
from tractor import (
open_nursery,
context,
Context,
ContextCancelled,
MsgStream,
_testing,
)
import trio
import pytest
async def break_ipc_then_error(
async def break_channel_silently_then_error(
stream: MsgStream,
break_ipc_with: str|None = None,
pre_close: bool = False,
):
await _testing.break_ipc(
stream=stream,
method=break_ipc_with,
pre_close=pre_close,
)
async for msg in stream:
await stream.send(msg)
# XXX: close the channel right after an error is raised
# purposely breaking the IPC transport to make sure the parent
# doesn't get stuck in debug or hang on the connection join.
# this more or less simulates an infinite msg-receive hang on
# the other end.
await stream._ctx.chan.send(None)
assert 0
async def iter_ipc_stream(
async def close_stream_and_error(
stream: MsgStream,
break_ipc_with: str|None = None,
pre_close: bool = False,
):
async for msg in stream:
await stream.send(msg)
# wipe out channel right before raising
await stream._ctx.chan.send(None)
await stream.aclose()
assert 0
@context
async def recv_and_spawn_net_killers(
ctx: Context,
break_ipc_after: bool | int = False,
pre_close: bool = False,
) -> None:
'''
Receive stream msgs and spawn some IPC killers mid-stream.
'''
broke_ipc: bool = False
await ctx.started()
async with (
ctx.open_stream() as stream,
@ -66,58 +60,27 @@ async def recv_and_spawn_net_killers(
):
async for i in stream:
print(f'child echoing {i}')
if not broke_ipc:
await stream.send(i)
else:
await trio.sleep(0.01)
if (
break_ipc_after
and
i >= break_ipc_after
and i > break_ipc_after
):
broke_ipc = True
n.start_soon(
iter_ipc_stream,
stream,
)
n.start_soon(
partial(
break_ipc_then_error,
stream=stream,
pre_close=pre_close,
)
)
@acm
async def stuff_hangin_ctlc(timeout: float = 1) -> None:
with trio.move_on_after(timeout) as cs:
yield timeout
if cs.cancelled_caught:
# pretend to be a user seeing no streaming action
# thinking it's a hang, and then hitting ctl-c..
print(
f"i'm a user on the PARENT side and thingz hangin "
f'after timeout={timeout} ???\n\n'
'MASHING CTlR-C..!?\n'
)
raise KeyboardInterrupt
'#################################\n'
'Simulating child-side IPC BREAK!\n'
'#################################'
n.start_soon(break_channel_silently_then_error, stream)
n.start_soon(close_stream_and_error, stream)
async def main(
debug_mode: bool = False,
start_method: str = 'trio',
loglevel: str = 'cancel',
# by default we break the parent IPC first (if configured to break
# at all), but this can be changed so the child does first (even if
# both are set to break).
break_parent_ipc_after: int | bool = False,
break_child_ipc_after: int | bool = False,
pre_close: bool = False,
) -> None:
@ -128,128 +91,59 @@ async def main(
# NOTE: even debugger is used we shouldn't get
# a hang since it never engages due to broken IPC
debug_mode=debug_mode,
loglevel=loglevel,
loglevel='warning',
) as an,
):
sub_name: str = 'chitty_hijo'
portal = await an.start_actor(
sub_name,
'chitty_hijo',
enable_modules=[__name__],
)
async with (
stuff_hangin_ctlc(timeout=2) as timeout,
_testing.expect_ctxc(
yay=(
break_parent_ipc_after
or break_child_ipc_after
),
# TODO: we CAN'T remove this right?
# since we need the ctxc to bubble up from either
# the stream API after the `None` msg is sent
# (which actually implicitly cancels all remote
# tasks in the hijo) or from simluated
# KBI-mash-from-user
# or should we expect that a KBI triggers the ctxc
# and KBI in an eg?
reraise=True,
),
portal.open_context(
async with portal.open_context(
recv_and_spawn_net_killers,
break_ipc_after=break_child_ipc_after,
pre_close=pre_close,
) as (ctx, sent),
):
rx_eoc: bool = False
ipc_break_sent: bool = False
) as (ctx, sent):
async with ctx.open_stream() as stream:
for i in range(1000):
if (
break_parent_ipc_after
and
i > break_parent_ipc_after
and
not ipc_break_sent
and i > break_parent_ipc_after
):
print(
'#################################\n'
'Simulating PARENT-side IPC BREAK!\n'
'#################################\n'
'Simulating parent-side IPC BREAK!\n'
'#################################'
)
# TODO: other methods? see break func above.
# await stream._ctx.chan.send(None)
# await stream._ctx.chan.transport.stream.send_eof()
await stream._ctx.chan.transport.stream.aclose()
ipc_break_sent = True
await stream._ctx.chan.send(None)
# it actually breaks right here in the
# mp_spawn/forkserver backends and thus the
# zombie reaper never even kicks in?
try:
# mp_spawn/forkserver backends and thus the zombie
# reaper never even kicks in?
print(f'parent sending {i}')
await stream.send(i)
except ContextCancelled as ctxc:
print(
'parent received ctxc on `stream.send()`\n'
f'{ctxc}\n'
)
assert 'root' in ctxc.canceller
assert sub_name in ctx.canceller
# TODO: is this needed or no?
raise
with trio.move_on_after(2) as cs:
except trio.ClosedResourceError:
# NOTE: don't send if we already broke the
# connection to avoid raising a closed-error
# such that we drop through to the ctl-c
# mashing by user.
await trio.sleep(0.01)
# timeout: int = 1
# with trio.move_on_after(timeout) as cs:
async with stuff_hangin_ctlc() as timeout:
print(
f'PARENT `stream.receive()` with timeout={timeout}\n'
)
# NOTE: in the parent side IPC failure case this
# will raise an ``EndOfChannel`` after the child
# is killed and sends a stop msg back to it's
# caller/this-parent.
try:
rx = await stream.receive()
print(
"I'm a happy PARENT user and echoed to me is\n"
f'{rx}\n'
)
except trio.EndOfChannel:
rx_eoc: bool = True
print('MsgStream got EoC for PARENT')
raise
print(f"I'm a happy user and echoed to me is {rx}")
if cs.cancelled_caught:
# pretend to be a user seeing no streaming action
# thinking it's a hang, and then hitting ctl-c..
print("YOO i'm a user anddd thingz hangin..")
print(
'Streaming finished and we got Eoc.\n'
'Canceling `.open_context()` in root with\n'
'CTlR-C..'
"YOO i'm mad send side dun but thingz hangin..\n"
'MASHING CTlR-C Ctl-c..'
)
if rx_eoc:
assert stream.closed
try:
await stream.send(i)
pytest.fail('stream not closed?')
except (
trio.ClosedResourceError,
trio.EndOfChannel,
) as send_err:
if rx_eoc:
assert send_err is stream._eoc
else:
assert send_err is stream._closed
raise KeyboardInterrupt

View File

@ -1,122 +0,0 @@
import asyncio
import trio
import tractor
from tractor import (
to_asyncio,
Portal,
)
async def aio_sleep_forever():
await asyncio.sleep(float('inf'))
async def bp_then_error(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
raise_after_bp: bool = True,
) -> None:
# sync with ``trio``-side (caller) task
to_trio.send_nowait('start')
# NOTE: what happens here inside the hook needs some refinement..
# => seems like it's still `._debug._set_trace()` but
# we set `Lock.local_task_in_debug = 'sync'`, we probably want
# some further, at least, meta-data about the task/actoq in debug
# in terms of making it clear it's asyncio mucking about.
breakpoint()
# short checkpoint / delay
await asyncio.sleep(0.5)
if raise_after_bp:
raise ValueError('blah')
# TODO: test case with this so that it gets cancelled?
else:
# XXX NOTE: this is required in order to get the SIGINT-ignored
# hang case documented in the module script section!
await aio_sleep_forever()
@tractor.context
async def trio_ctx(
ctx: tractor.Context,
bp_before_started: bool = True,
):
# this will block until the ``asyncio`` task sends a "first"
# message, see first line in above func.
async with (
to_asyncio.open_channel_from(
bp_then_error,
raise_after_bp=not bp_before_started,
) as (first, chan),
trio.open_nursery() as n,
):
assert first == 'start'
if bp_before_started:
await tractor.breakpoint()
await ctx.started(first)
n.start_soon(
to_asyncio.run_task,
aio_sleep_forever,
)
await trio.sleep_forever()
async def main(
bps_all_over: bool = True,
) -> None:
async with tractor.open_nursery(
debug_mode=True,
maybe_enable_greenback=True,
# loglevel='devx',
) as n:
ptl: Portal = await n.start_actor(
'aio_daemon',
enable_modules=[__name__],
infect_asyncio=True,
debug_mode=True,
# loglevel='cancel',
)
async with ptl.open_context(
trio_ctx,
bp_before_started=bps_all_over,
) as (ctx, first):
assert first == 'start'
if bps_all_over:
await tractor.breakpoint()
# await trio.sleep_forever()
await ctx.cancel()
assert 0
# TODO: case where we cancel from trio-side while asyncio task
# has debugger lock?
# await ptl.cancel_actor()
if __name__ == '__main__':
# works fine B)
trio.run(main)
# will hang and ignores SIGINT !!
# NOTE: you'll need to send a SIGQUIT (via ctl-\) to kill it
# manually..
# trio.run(main, True)

View File

@ -1,9 +0,0 @@
'''
Reproduce a bug where enabling debug mode for a sub-actor actually causes
a hang on teardown...
'''
import asyncio
import trio
import tractor

View File

@ -4,15 +4,9 @@ import trio
async def breakpoint_forever():
"Indefinitely re-enter debugger in child actor."
try:
while True:
yield 'yo'
await tractor.breakpoint()
except BaseException:
tractor.log.get_console_log().exception(
'Cancelled while trying to enter pause point!'
)
raise
async def name_error():
@ -25,8 +19,7 @@ async def main():
"""
async with tractor.open_nursery(
debug_mode=True,
# loglevel='cancel',
# loglevel='devx',
loglevel='error',
) as n:
p0 = await n.start_actor('bp_forever', enable_modules=[__name__])
@ -39,7 +32,7 @@ async def main():
try:
await p1.run(name_error)
except tractor.RemoteActorError as rae:
assert rae.boxed_type is NameError
assert rae.type is NameError
async for i in stream:

View File

@ -45,7 +45,6 @@ async def spawn_until(depth=0):
)
# TODO: notes on the new boxed-relayed errors through proxy actors
async def main():
"""The main ``tractor`` routine.

View File

@ -38,7 +38,6 @@ async def main():
"""
async with tractor.open_nursery(
debug_mode=True,
# loglevel='runtime',
) as n:
# Spawn both actors, don't bother with collecting results

View File

@ -23,6 +23,5 @@ async def main():
n.start_soon(debug_actor.run, die)
n.start_soon(crash_boi.run, die)
if __name__ == '__main__':
trio.run(main)

View File

@ -1,56 +0,0 @@
import trio
import tractor
@tractor.context
async def name_error(
ctx: tractor.Context,
):
'''
Raise a `NameError`, catch it and enter `.post_mortem()`, then
expect the `._rpc._invoke()` crash handler to also engage.
'''
try:
getattr(doggypants) # noqa (on purpose)
except NameError:
await tractor.post_mortem()
raise
async def main():
'''
Test 3 `PdbREPL` entries:
- one in the child due to manual `.post_mortem()`,
- another in the child due to runtime RPC crash handling.
- final one here in parent from the RAE.
'''
# XXX NOTE: ideally the REPL arrives at this frame in the parent
# ONE UP FROM the inner ctx block below!
async with tractor.open_nursery(
debug_mode=True,
# loglevel='cancel',
) as an:
p: tractor.Portal = await an.start_actor(
'child',
enable_modules=[__name__],
)
# XXX should raise `RemoteActorError[NameError]`
# AND be the active frame when REPL enters!
try:
async with p.open_context(name_error) as (ctx, first):
assert first
except tractor.RemoteActorError as rae:
assert rae.boxed_type is NameError
# manually handle in root's parent task
await tractor.post_mortem()
raise
else:
raise RuntimeError('IPC ctx should have remote errored!?')
if __name__ == '__main__':
trio.run(main)

View File

@ -2,13 +2,10 @@ import trio
import tractor
async def main(
registry_addrs: tuple[str, int]|None = None
):
async def main():
async with tractor.open_root_actor(
debug_mode=True,
# loglevel='runtime',
):
while True:
await tractor.breakpoint()

View File

@ -1,81 +0,0 @@
'''
Verify we can dump a `stackscope` tree on a hang.
'''
import os
import signal
import trio
import tractor
@tractor.context
async def start_n_shield_hang(
ctx: tractor.Context,
):
# actor: tractor.Actor = tractor.current_actor()
# sync to parent-side task
await ctx.started(os.getpid())
print('Entering shield sleep..')
with trio.CancelScope(shield=True):
await trio.sleep_forever() # in subactor
# XXX NOTE ^^^ since this shields, we expect
# the zombie reaper (aka T800) to engage on
# SIGINT from the user and eventually hard-kill
# this subprocess!
async def main(
from_test: bool = False,
) -> None:
async with (
tractor.open_nursery(
debug_mode=True,
enable_stack_on_sig=True,
# maybe_enable_greenback=False,
loglevel='devx',
) as an,
):
ptl: tractor.Portal = await an.start_actor(
'hanger',
enable_modules=[__name__],
debug_mode=True,
)
async with ptl.open_context(
start_n_shield_hang,
) as (ctx, cpid):
_, proc, _ = an._children[ptl.chan.uid]
assert cpid == proc.pid
print(
'Yo my child hanging..?\n'
'Sending SIGUSR1 to see a tree-trace!\n'
)
# XXX simulate the wrapping test's "user actions"
# (i.e. if a human didn't run this manually but wants to
# know what they should do to reproduce test behaviour)
if from_test:
os.kill(
cpid,
signal.SIGUSR1,
)
# simulate user cancelling program
await trio.sleep(0.5)
os.kill(
os.getpid(),
signal.SIGINT,
)
else:
# actually let user send the ctl-c
await trio.sleep_forever() # in root
if __name__ == '__main__':
trio.run(main)

View File

@ -1,88 +0,0 @@
import trio
import tractor
async def cancellable_pause_loop(
task_status: trio.TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED
):
with trio.CancelScope() as cs:
task_status.started(cs)
for _ in range(3):
try:
# ON first entry, there is no level triggered
# cancellation yet, so this cp does a parent task
# ctx-switch so that this scope raises for the NEXT
# checkpoint we hit.
await trio.lowlevel.checkpoint()
await tractor.pause()
cs.cancel()
# parent should have called `cs.cancel()` by now
await trio.lowlevel.checkpoint()
except trio.Cancelled:
print('INSIDE SHIELDED PAUSE')
await tractor.pause(shield=True)
else:
# should raise it again, bubbling up to parent
print('BUBBLING trio.Cancelled to parent task-nursery')
await trio.lowlevel.checkpoint()
async def pm_on_cancelled():
async with trio.open_nursery() as tn:
tn.cancel_scope.cancel()
try:
await trio.sleep_forever()
except trio.Cancelled:
# should also raise `Cancelled` since
# we didn't pass `shield=True`.
try:
await tractor.post_mortem(hide_tb=False)
except trio.Cancelled as taskc:
# should enter just fine, in fact it should
# be debugging the internals of the previous
# sin-shield call above Bo
await tractor.post_mortem(
hide_tb=False,
shield=True,
)
raise taskc
else:
raise RuntimeError('Dint cancel as expected!?')
async def cancelled_before_pause(
):
'''
Verify that using a shielded pause works despite surrounding
cancellation called state in the calling task.
'''
async with trio.open_nursery() as tn:
cs: trio.CancelScope = await tn.start(cancellable_pause_loop)
await trio.sleep(0.1)
assert cs.cancelled_caught
await pm_on_cancelled()
async def main():
async with tractor.open_nursery(
debug_mode=True,
) as n:
portal: tractor.Portal = await n.run_in_actor(
cancelled_before_pause,
)
await portal.result()
# ensure the same works in the root actor!
await pm_on_cancelled()
if __name__ == '__main__':
trio.run(main)

View File

@ -3,20 +3,17 @@ import tractor
async def breakpoint_forever():
'''
Indefinitely re-enter debugger in child actor.
'''
"""Indefinitely re-enter debugger in child actor.
"""
while True:
await trio.sleep(0.1)
await tractor.pause()
await tractor.breakpoint()
async def main():
async with tractor.open_nursery(
debug_mode=True,
loglevel='cancel',
) as n:
portal = await n.run_in_actor(

View File

@ -3,26 +3,16 @@ import tractor
async def name_error():
getattr(doggypants) # noqa (on purpose)
getattr(doggypants)
async def main():
async with tractor.open_nursery(
debug_mode=True,
# loglevel='transport',
) as an:
) as n:
# TODO: ideally the REPL arrives at this frame in the parent,
# ABOVE the @api_frame of `Portal.run_in_actor()` (which
# should eventually not even be a portal method ... XD)
# await tractor.pause()
p: tractor.Portal = await an.run_in_actor(name_error)
# with this style, should raise on this line
await p.result()
# with this alt style should raise at `open_nusery()`
# return await p.result()
portal = await n.run_in_actor(name_error)
await portal.result()
if __name__ == '__main__':

View File

@ -1,169 +0,0 @@
from functools import partial
import time
import trio
import tractor
# TODO: only import these when not running from test harness?
# can we detect `pexpect` usage maybe?
# from tractor.devx._debug import (
# get_lock,
# get_debug_req,
# )
def sync_pause(
use_builtin: bool = False,
error: bool = False,
hide_tb: bool = True,
pre_sleep: float|None = None,
):
if pre_sleep:
time.sleep(pre_sleep)
if use_builtin:
breakpoint(hide_tb=hide_tb)
else:
# TODO: maybe for testing some kind of cm style interface
# where the `._set_trace()` call doesn't happen until block
# exit?
# assert get_lock().ctx_in_debug is None
# assert get_debug_req().repl is None
tractor.pause_from_sync()
# assert get_debug_req().repl is None
if error:
raise RuntimeError('yoyo sync code error')
@tractor.context
async def start_n_sync_pause(
ctx: tractor.Context,
):
actor: tractor.Actor = tractor.current_actor()
# sync to parent-side task
await ctx.started()
print(f'Entering `sync_pause()` in subactor: {actor.uid}\n')
sync_pause()
print(f'Exited `sync_pause()` in subactor: {actor.uid}\n')
async def main() -> None:
async with (
tractor.open_nursery(
debug_mode=True,
maybe_enable_greenback=True,
enable_stack_on_sig=True,
# loglevel='warning',
# loglevel='devx',
) as an,
trio.open_nursery() as tn,
):
# just from root task
sync_pause()
p: tractor.Portal = await an.start_actor(
'subactor',
enable_modules=[__name__],
# infect_asyncio=True,
debug_mode=True,
)
# TODO: 3 sub-actor usage cases:
# -[x] via a `.open_context()`
# -[ ] via a `.run_in_actor()` call
# -[ ] via a `.run()`
# -[ ] via a `.to_thread.run_sync()` in subactor
async with p.open_context(
start_n_sync_pause,
) as (ctx, first):
assert first is None
# TODO: handle bg-thread-in-root-actor special cases!
#
# there are a couple very subtle situations possible here
# and they are likely to become more important as cpython
# moves to support no-GIL.
#
# Cases:
# 1. root-actor bg-threads that call `.pause_from_sync()`
# whilst an in-tree subactor also is using ` .pause()`.
# |_ since the root-actor bg thread can not
# `Lock._debug_lock.acquire_nowait()` without running
# a `trio.Task`, AND because the
# `PdbREPL.set_continue()` is called from that
# bg-thread, we can not `._debug_lock.release()`
# either!
# |_ this results in no actor-tree `Lock` being used
# on behalf of the bg-thread and thus the subactor's
# task and the thread trying to to use stdio
# simultaneously which results in the classic TTY
# clobbering!
#
# 2. mutiple sync-bg-threads that call
# `.pause_from_sync()` where one is scheduled via
# `Nursery.start_soon(to_thread.run_sync)` in a bg
# task.
#
# Due to the GIL, the threads never truly try to step
# through the REPL simultaneously, BUT their `logging`
# and traceback outputs are interleaved since the GIL
# (seemingly) on every REPL-input from the user
# switches threads..
#
# Soo, the context switching semantics of the GIL
# result in a very confusing and messy interaction UX
# since eval and (tb) print output is NOT synced to
# each REPL-cycle (like we normally make it via
# a `.set_continue()` callback triggering the
# `Lock.release()`). Ideally we can solve this
# usability issue NOW because this will of course be
# that much more important when eventually there is no
# GIL!
# XXX should cause double REPL entry and thus TTY
# clobbering due to case 1. above!
tn.start_soon(
partial(
trio.to_thread.run_sync,
partial(
sync_pause,
use_builtin=False,
# pre_sleep=0.5,
),
abandon_on_cancel=True,
thread_name='start_soon_root_bg_thread',
)
)
await tractor.pause()
# XXX should cause double REPL entry and thus TTY
# clobbering due to case 2. above!
await trio.to_thread.run_sync(
partial(
sync_pause,
# NOTE this already works fine since in the new
# thread the `breakpoint()` built-in is never
# overloaded, thus NO locking is used, HOWEVER
# the case 2. from above still exists!
use_builtin=True,
),
# TODO: with this `False` we can hang!??!
# abandon_on_cancel=False,
abandon_on_cancel=True,
thread_name='inline_root_bg_thread',
)
await ctx.cancel()
# TODO: case where we cancel from trio-side while asyncio task
# has debugger lock?
await p.cancel_actor()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,11 +1,6 @@
import time
import trio
import tractor
from tractor import (
ActorNursery,
MsgStream,
Portal,
)
# this is the first 2 actors, streamer_1 and streamer_2
@ -17,18 +12,14 @@ async def stream_data(seed):
# this is the third actor; the aggregator
async def aggregate(seed):
'''
Ensure that the two streams we receive match but only stream
"""Ensure that the two streams we receive match but only stream
a single set of values to the parent.
'''
an: ActorNursery
async with tractor.open_nursery() as an:
portals: list[Portal] = []
"""
async with tractor.open_nursery() as nursery:
portals = []
for i in range(1, 3):
# fork/spawn call
portal = await an.start_actor(
# fork point
portal = await nursery.start_actor(
name=f'streamer_{i}',
enable_modules=[__name__],
)
@ -52,11 +43,7 @@ async def aggregate(seed):
async with trio.open_nursery() as n:
for portal in portals:
n.start_soon(
push_to_chan,
portal,
send_chan.clone(),
)
n.start_soon(push_to_chan, portal, send_chan.clone())
# close this local task's reference to send side
await send_chan.aclose()
@ -73,36 +60,26 @@ async def aggregate(seed):
print("FINISHED ITERATING in aggregator")
await an.cancel()
await nursery.cancel()
print("WAITING on `ActorNursery` to finish")
print("AGGREGATOR COMPLETE!")
async def main() -> list[int]:
'''
This is the "root" actor's main task's entrypoint.
By default (and if not otherwise specified) that root process
also acts as a "registry actor" / "registrar" on the localhost
for the purposes of multi-actor "service discovery".
'''
# yes, a nursery which spawns `trio`-"actors" B)
an: ActorNursery
# this is the main actor and *arbiter*
async def main():
# a nursery which spawns "actors"
async with tractor.open_nursery(
loglevel='cancel',
debug_mode=True,
) as an:
arbiter_addr=('127.0.0.1', 1616)
) as nursery:
seed = int(1e3)
pre_start = time.time()
portal: Portal = await an.start_actor(
portal = await nursery.start_actor(
name='aggregator',
enable_modules=[__name__],
)
stream: MsgStream
async with portal.open_stream_from(
aggregate,
seed=seed,
@ -111,12 +88,11 @@ async def main() -> list[int]:
start = time.time()
# the portal call returns exactly what you'd expect
# as if the remote "aggregate" function was called locally
result_stream: list[int] = []
result_stream = []
async for value in stream:
result_stream.append(value)
cancelled: bool = await portal.cancel_actor()
assert cancelled
await portal.cancel_actor()
print(f"STREAM TIME = {time.time() - start}")
print(f"STREAM + SPAWN TIME = {time.time() - pre_start}")

View File

@ -8,10 +8,7 @@ This uses no extra threads, fancy semaphores or futures; all we need
is ``tractor``'s channels.
"""
from contextlib import (
asynccontextmanager as acm,
aclosing,
)
from contextlib import asynccontextmanager
from typing import Callable
import itertools
import math
@ -19,6 +16,7 @@ import time
import tractor
import trio
from async_generator import aclosing
PRIMES = [
@ -46,7 +44,7 @@ async def is_prime(n):
return True
@acm
@asynccontextmanager
async def worker_pool(workers=4):
"""Though it's a trivial special case for ``tractor``, the well
known "worker pool" seems to be the defacto "but, I want this

View File

@ -13,7 +13,7 @@ async def simple_rpc(
'''
# signal to parent that we're up much like
# ``trio.TaskStatus.started()``
# ``trio_typing.TaskStatus.started()``
await ctx.started(data + 1)
async with ctx.open_stream() as stream:

View File

@ -9,7 +9,7 @@ async def main(service_name):
async with tractor.open_nursery() as an:
await an.start_actor(service_name)
async with tractor.get_registry('127.0.0.1', 1616) as portal:
async with tractor.get_arbiter('127.0.0.1', 1616) as portal:
print(f"Arbiter is listening on {portal.channel}")
async with tractor.wait_for_actor(service_name) as sockaddr:

View File

@ -1,72 +1,3 @@
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
# ------ - ------
[tool.poetry]
name = "tractor"
version = "0.1.0a6dev0"
description='structured concurrent `trio`-"actors"'
authors = ["Tyler Goodlet <goodboy_foss@protonmail.com>"]
license = "AGPlv3"
readme = "docs/README.rst"
# TODO: do we need this xontrib loader at all given pep420
# and xonsh's xontrib global-autoload-via-setuptools?
# https://xon.sh/tutorial_xontrib.html#authoring-xontribs
packages = [
{include = 'tractor' },
# {include = 'tractor.experimental' },
# {include = 'tractor.trionics' },
# {include = 'tractor.msg' },
# {include = 'tractor.devx' },
]
# ------ - ------
[tool.poetry.dependencies]
python = "^3.11"
# trio runtime related
# proper range spec:
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
trio='^0.24'
tricycle = "^0.4.1"
trio-typing = "^0.10.0"
msgspec='^0.18.5' # interchange
wrapt = "^1.16.0" # decorators
colorlog = "^6.8.2" # logging
# built-in multi-actor `pdb` REPL
pdbp = "^1.5.0"
# TODO: distributed transport using
# linux kernel networking
# 'pyroute2
# ------ - ------
[tool.poetry.group.dev]
optional = false
[tool.poetry.group.dev.dependencies]
# testing
pytest = "^8.2.0"
pexpect = "^4.9.0"
# .devx tooling
greenback = "^1.2.1"
stackscope = "^0.2.2"
# (light) xonsh usage/integration
xontrib-vox = "^0.0.1"
prompt-toolkit = "^3.0.43"
xonsh-vox-tabcomplete = "^0.5"
# ------ - ------
[tool.towncrier]
package = "tractor"
filename = "NEWS.rst"
@ -95,47 +26,3 @@ all_bullets = true
directory = "trivial"
name = "Trivial/Internal Changes"
showcontent = true
# ------ - ------
[tool.pytest.ini_options]
minversion = '6.0'
testpaths = [
'tests'
]
addopts = [
# TODO: figure out why this isn't working..
'--rootdir=./tests',
'--import-mode=importlib',
# don't show frickin captured logs AGAIN in the report..
'--show-capture=no',
]
log_cli = false
# TODO: maybe some of these layout choices?
# https://docs.pytest.org/en/8.0.x/explanation/goodpractices.html#choosing-a-test-layout-import-rules
# pythonpath = "src"
# ------ - ------
[project]
keywords = [
'trio',
'async',
'concurrency',
'structured concurrency',
'actor model',
'distributed',
'multiprocessing'
]
classifiers = [
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Framework :: Trio",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.11",
"Topic :: System :: Distributed Computing",
]

View File

@ -6,4 +6,3 @@ mypy
trio_typing
pexpect
towncrier
numpy

View File

@ -26,7 +26,7 @@ with open('docs/README.rst', encoding='utf-8') as f:
setup(
name="tractor",
version='0.1.0a6dev0', # alpha zone
description='structured concurrent `trio`-"actors"',
description='structured concurrrent `trio`-"actors"',
long_description=readme,
license='AGPLv3',
author='Tyler Goodlet',
@ -36,44 +36,41 @@ setup(
platforms=['linux', 'windows'],
packages=[
'tractor',
'tractor.experimental', # wacky ideas
'tractor.trionics', # trio extensions
'tractor.msg', # lowlevel data types
'tractor.devx', # "dev-experience"
'tractor.experimental',
'tractor.trionics',
],
install_requires=[
# trio related
# proper range spec:
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
'trio >= 0.24',
# 'async_generator', # in stdlib mostly!
# 'trio_typing', # trio==0.23.0 has type hints!
# 'exceptiongroup', # in stdlib as of 3.11!
'trio >= 0.22',
'async_generator',
'trio_typing',
'exceptiongroup',
# tooling
'stackscope',
'tricycle',
'trio_typing',
'colorlog',
'wrapt',
# IPC serialization
'msgspec>=0.18.5',
'msgspec',
# debug mode REPL
'pdbp',
# TODO: distributed transport using
# linux kernel networking
# 'pyroute2',
# pip ref docs on these specs:
# https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples
# and pep:
# https://peps.python.org/pep-0440/#version-specifiers
# windows deps workaround for ``pdbpp``
# https://github.com/pdbpp/pdbpp/issues/498
# https://github.com/pdbpp/fancycompleter/issues/37
'pyreadline3 ; platform_system == "Windows"',
],
tests_require=['pytest'],
python_requires=">=3.10",

View File

@ -7,19 +7,94 @@ import os
import random
import signal
import platform
import pathlib
import time
import inspect
from functools import partial, wraps
import pytest
import trio
import tractor
from tractor._testing import (
examples_dir as examples_dir,
tractor_test as tractor_test,
expect_ctxc as expect_ctxc,
)
# TODO: include wtv plugin(s) we build in `._testing.pytest`?
pytest_plugins = ['pytester']
def tractor_test(fn):
"""
Use:
@tractor_test
async def test_whatever():
await ...
If fixtures:
- ``arb_addr`` (a socket addr tuple where arbiter is listening)
- ``loglevel`` (logging level passed to tractor internals)
- ``start_method`` (subprocess spawning backend)
are defined in the `pytest` fixture space they will be automatically
injected to tests declaring these funcargs.
"""
@wraps(fn)
def wrapper(
*args,
loglevel=None,
arb_addr=None,
start_method=None,
**kwargs
):
# __tracebackhide__ = True
if 'arb_addr' in inspect.signature(fn).parameters:
# injects test suite fixture value to test as well
# as `run()`
kwargs['arb_addr'] = arb_addr
if 'loglevel' in inspect.signature(fn).parameters:
# allows test suites to define a 'loglevel' fixture
# that activates the internal logging
kwargs['loglevel'] = loglevel
if start_method is None:
if platform.system() == "Windows":
start_method = 'trio'
if 'start_method' in inspect.signature(fn).parameters:
# set of subprocess spawning backends
kwargs['start_method'] = start_method
if kwargs:
# use explicit root actor start
async def _main():
async with tractor.open_root_actor(
# **kwargs,
arbiter_addr=arb_addr,
loglevel=loglevel,
start_method=start_method,
# TODO: only enable when pytest is passed --pdb
# debug_mode=True,
):
await fn(*args, **kwargs)
main = _main
else:
# use implicit root actor start
main = partial(fn, *args, **kwargs)
return trio.run(main)
return wrapper
_arb_addr = '127.0.0.1', random.randint(1000, 9999)
# Sending signal.SIGINT on subprocess fails on windows. Use CTRL_* alternatives
if platform.system() == 'Windows':
_KILL_SIGNAL = signal.CTRL_BREAK_EVENT
@ -39,45 +114,41 @@ no_windows = pytest.mark.skipif(
)
def repodir() -> pathlib.Path:
'''
Return the abspath to the repo directory.
'''
# 2 parents up to step up through tests/<repo_dir>
return pathlib.Path(__file__).parent.parent.absolute()
def examples_dir() -> pathlib.Path:
'''
Return the abspath to the examples directory as `pathlib.Path`.
'''
return repodir() / 'examples'
def pytest_addoption(parser):
parser.addoption(
"--ll",
action="store",
dest='loglevel',
"--ll", action="store", dest='loglevel',
default='ERROR', help="logging level to set when testing"
)
parser.addoption(
"--spawn-backend",
action="store",
dest='spawn_backend',
"--spawn-backend", action="store", dest='spawn_backend',
default='trio',
help="Processing spawning backend to use for test run",
)
parser.addoption(
"--tpdb", "--debug-mode",
action="store_true",
dest='tractor_debug_mode',
# default=False,
help=(
'Enable a flag that can be used by tests to to set the '
'`debug_mode: bool` for engaging the internal '
'multi-proc debugger sys.'
),
)
def pytest_configure(config):
backend = config.option.spawn_backend
tractor._spawn.try_set_start_method(backend)
@pytest.fixture(scope='session')
def debug_mode(request):
return request.config.option.tractor_debug_mode
@pytest.fixture(scope='session', autouse=True)
def loglevel(request):
orig = tractor.log._default_loglevel
@ -97,35 +168,14 @@ _ci_env: bool = os.environ.get('CI', False)
@pytest.fixture(scope='session')
def ci_env() -> bool:
'''
Detect CI envoirment.
'''
"""Detect CI envoirment.
"""
return _ci_env
# TODO: also move this to `._testing` for now?
# -[ ] possibly generalize and re-use for multi-tree spawning
# along with the new stuff for multi-addrs in distribute_dis
# branch?
#
# choose randomly at import time
_reg_addr: tuple[str, int] = (
'127.0.0.1',
random.randint(1000, 9999),
)
@pytest.fixture(scope='session')
def reg_addr() -> tuple[str, int]:
# globally override the runtime to the per-test-session-dynamic
# addr so that all tests never conflict with any other actor
# tree using the default.
from tractor import _root
_root._default_lo_addrs = [_reg_addr]
return _reg_addr
def arb_addr():
return _arb_addr
def pytest_generate_tests(metafunc):
@ -162,40 +212,34 @@ def sig_prog(proc, sig):
assert ret
# TODO: factor into @cm and move to `._testing`?
@pytest.fixture
def daemon(
loglevel: str,
testdir,
reg_addr: tuple[str, int],
arb_addr: tuple[str, int],
):
'''
Run a daemon root actor as a separate actor-process tree and
"remote registrar" for discovery-protocol related tests.
Run a daemon actor as a "remote arbiter".
'''
if loglevel in ('trace', 'debug'):
# XXX: too much logging will lock up the subproc (smh)
loglevel: str = 'info'
# too much logging will lock up the subproc (smh)
loglevel = 'info'
code: str = (
"import tractor; "
"tractor.run_daemon([], registry_addrs={reg_addrs}, loglevel={ll})"
).format(
reg_addrs=str([reg_addr]),
ll="'{}'".format(loglevel) if loglevel else None,
)
cmd: list[str] = [
sys.executable,
'-c', code,
cmdargs = [
sys.executable, '-c',
"import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})"
.format(
arb_addr,
"'{}'".format(loglevel) if loglevel else None)
]
kwargs = {}
kwargs = dict()
if platform.system() == 'Windows':
# without this, tests hang on windows forever
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
proc = testdir.popen(
cmd,
cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs,

View File

View File

@ -1,168 +0,0 @@
'''
`tractor.devx.*` tooling sub-pkg test space.
'''
from typing import (
Callable,
)
import pytest
from pexpect.exceptions import (
TIMEOUT,
)
from pexpect.spawnbase import SpawnBase
from tractor._testing import (
mk_cmd,
)
@pytest.fixture
def spawn(
start_method,
testdir: pytest.Testdir,
reg_addr: tuple[str, int],
) -> Callable[[str], None]:
'''
Use the `pexpect` module shipped via `testdir.spawn()` to
run an `./examples/..` script by name.
'''
if start_method != 'trio':
pytest.skip(
'`pexpect` based tests only supported on `trio` backend'
)
def _spawn(
cmd: str,
**mkcmd_kwargs,
):
return testdir.spawn(
cmd=mk_cmd(
cmd,
**mkcmd_kwargs,
),
expect_timeout=3,
)
# such that test-dep can pass input script name.
return _spawn
@pytest.fixture(
params=[False, True],
ids='ctl-c={}'.format,
)
def ctlc(
request,
ci_env: bool,
) -> bool:
use_ctlc = request.param
node = request.node
markers = node.own_markers
for mark in markers:
if mark.name == 'has_nested_actors':
pytest.skip(
f'Test {node} has nested actors and fails with Ctrl-C.\n'
f'The test can sometimes run fine locally but until'
' we solve' 'this issue this CI test will be xfail:\n'
'https://github.com/goodboy/tractor/issues/320'
)
if use_ctlc:
# XXX: disable pygments highlighting for auto-tests
# since some envs (like actions CI) will struggle
# the the added color-char encoding..
from tractor.devx._debug import TractorConfig
TractorConfig.use_pygements = False
yield use_ctlc
def expect(
child,
# normally a `pdb` prompt by default
patt: str,
**kwargs,
) -> None:
'''
Expect wrapper that prints last seen console
data before failing.
'''
try:
child.expect(
patt,
**kwargs,
)
except TIMEOUT:
before = str(child.before.decode())
print(before)
raise
def in_prompt_msg(
child: SpawnBase,
parts: list[str],
pause_on_false: bool = False,
err_on_false: bool = False,
print_prompt_on_false: bool = True,
) -> bool:
'''
Predicate check if (the prompt's) std-streams output has all
`str`-parts in it.
Can be used in test asserts for bulk matching expected
log/REPL output for a given `pdb` interact point.
'''
__tracebackhide__: bool = False
before: str = str(child.before.decode())
for part in parts:
if part not in before:
if pause_on_false:
import pdbp
pdbp.set_trace()
if print_prompt_on_false:
print(before)
if err_on_false:
raise ValueError(
f'Could not find pattern in `before` output?\n'
f'part: {part!r}\n'
)
return False
return True
# TODO: todo support terminal color-chars stripping so we can match
# against call stack frame output from the the 'll' command the like!
# -[ ] SO answer for stipping ANSI codes: https://stackoverflow.com/a/14693789
def assert_before(
child: SpawnBase,
patts: list[str],
**kwargs,
) -> None:
__tracebackhide__: bool = False
assert in_prompt_msg(
child=child,
parts=patts,
# since this is an "assert" helper ;)
err_on_false=True,
**kwargs
)

View File

@ -1,120 +0,0 @@
'''
That "native" runtime-hackin toolset better be dang useful!
Verify the funtion of a variety of "developer-experience" tools we
offer from the `.devx` sub-pkg:
- use of the lovely `stackscope` for dumping actor `trio`-task trees
during operation and hangs.
TODO:
- demonstration of `CallerInfo` call stack frame filtering such that
for logging and REPL purposes a user sees exactly the layers needed
when debugging a problem inside the stack vs. in their app.
'''
import os
import signal
from .conftest import (
expect,
assert_before,
# in_prompt_msg,
)
def test_shield_pause(
spawn,
):
'''
Verify the `tractor.pause()/.post_mortem()` API works inside an
already cancelled `trio.CancelScope` and that you can step to the
next checkpoint wherein the cancelled will get raised.
'''
child = spawn(
'shield_hang_in_sub'
)
expect(
child,
'Yo my child hanging..?',
)
assert_before(
child,
[
'Entering shield sleep..',
'Enabling trace-trees on `SIGUSR1` since `stackscope` is installed @',
]
)
print(
'Sending SIGUSR1 to see a tree-trace!',
)
os.kill(
child.pid,
signal.SIGUSR1,
)
expect(
child,
# end-of-tree delimiter
"------ \('root', ",
)
assert_before(
child,
[
'Trying to dump `stackscope` tree..',
'Dumping `stackscope` tree for actor',
"('root'", # uid line
# parent block point (non-shielded)
'await trio.sleep_forever() # in root',
]
)
# expect(
# child,
# # relay to the sub should be reported
# 'Relaying `SIGUSR1`[10] to sub-actor',
# )
expect(
child,
# end-of-tree delimiter
"------ \('hanger', ",
)
assert_before(
child,
[
# relay to the sub should be reported
'Relaying `SIGUSR1`[10] to sub-actor',
"('hanger'", # uid line
# hanger LOC where it's shield-halted
'await trio.sleep_forever() # in subactor',
]
)
# breakpoint()
# simulate the user sending a ctl-c to the hanging program.
# this should result in the terminator kicking in since
# the sub is shield blocking and can't respond to SIGINT.
os.kill(
child.pid,
signal.SIGINT,
)
expect(
child,
'Shutting down actor runtime',
timeout=6,
)
assert_before(
child,
[
'raise KeyboardInterrupt',
# 'Shutting down actor runtime',
'#T-800 deployed to collect zombie B0',
"'--uid', \"('hanger',",
]
)

View File

@ -3,30 +3,22 @@ Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la
cancelacion?..
'''
import itertools
from functools import partial
from types import ModuleType
import pytest
from _pytest.pathlib import import_path
import trio
import tractor
from tractor._testing import (
from conftest import (
examples_dir,
break_ipc,
)
@pytest.mark.parametrize(
'pre_aclose_msgstream',
[
False,
True,
],
ids=[
'no_msgstream_aclose',
'pre_aclose_msgstream',
],
'debug_mode',
[False, True],
ids=['no_debug_mode', 'debug_mode'],
)
@pytest.mark.parametrize(
'ipc_break',
@ -71,10 +63,8 @@ from tractor._testing import (
)
def test_ipc_channel_break_during_stream(
debug_mode: bool,
loglevel: str,
spawn_backend: str,
ipc_break: dict | None,
pre_aclose_msgstream: bool,
):
'''
Ensure we can have an IPC channel break its connection during
@ -91,152 +81,72 @@ def test_ipc_channel_break_during_stream(
# non-`trio` spawners should never hit the hang condition that
# requires the user to do ctl-c to cancel the actor tree.
# expect_final_exc = trio.ClosedResourceError
expect_final_exc = tractor.TransportClosed
expect_final_exc = trio.ClosedResourceError
mod: ModuleType = import_path(
examples_dir() / 'advanced_faults'
/ 'ipc_failure_during_stream.py',
mod = import_path(
examples_dir() / 'advanced_faults' / 'ipc_failure_during_stream.py',
root=examples_dir(),
consider_namespace_packages=False,
)
# by def we expect KBI from user after a simulated "hang
# period" wherein the user eventually hits ctl-c to kill the
# root-actor tree.
expect_final_exc: BaseException = KeyboardInterrupt
if (
# only expect EoC if trans is broken on the child side,
ipc_break['break_child_ipc_after'] is not False
# AND we tell the child to call `MsgStream.aclose()`.
and pre_aclose_msgstream
):
# expect_final_exc = trio.EndOfChannel
# ^XXX NOPE! XXX^ since now `.open_stream()` absorbs this
# gracefully!
expect_final_exc = KeyboardInterrupt
# NOTE when ONLY the child breaks or it breaks BEFORE the
# parent we expect the parent to get a closed resource error
# on the next `MsgStream.receive()` and then fail out and
# cancel the child from there.
#
# ONLY CHILD breaks
# when ONLY the child breaks we expect the parent to get a closed
# resource error on the next `MsgStream.receive()` and then fail out
# and cancel the child from there.
if (
# only child breaks
(
ipc_break['break_child_ipc_after']
and
ipc_break['break_parent_ipc_after'] is False
):
# NOTE: we DO NOT expect this any more since
# the child side's channel will be broken silently
# and nothing on the parent side will indicate this!
# expect_final_exc = trio.ClosedResourceError
and ipc_break['break_parent_ipc_after'] is False
)
# NOTE: child will send a 'stop' msg before it breaks
# the transport channel BUT, that will be absorbed by the
# `ctx.open_stream()` block and thus the `.open_context()`
# should hang, after which the test script simulates
# a user sending ctl-c by raising a KBI.
if pre_aclose_msgstream:
expect_final_exc = KeyboardInterrupt
# XXX OLD XXX
# if child calls `MsgStream.aclose()` then expect EoC.
# ^ XXX not any more ^ since eoc is always absorbed
# gracefully and NOT bubbled to the `.open_context()`
# block!
# expect_final_exc = trio.EndOfChannel
# BOTH but, CHILD breaks FIRST
elif (
# both break but, parent breaks first
or (
ipc_break['break_child_ipc_after'] is not False
and (
ipc_break['break_parent_ipc_after']
> ipc_break['break_child_ipc_after']
)
):
if pre_aclose_msgstream:
expect_final_exc = KeyboardInterrupt
)
# NOTE when the parent IPC side dies (even if the child does as well
# but the child fails BEFORE the parent) we always expect the
# IPC layer to raise a closed-resource, NEVER do we expect
# a stop msg since the parent-side ctx apis will error out
# IMMEDIATELY before the child ever sends any 'stop' msg.
#
# ONLY PARENT breaks
):
expect_final_exc = trio.ClosedResourceError
# when the parent IPC side dies (even if the child's does as well
# but the child fails BEFORE the parent) we expect the channel to be
# sent a stop msg from the child at some point which will signal the
# parent that the stream has been terminated.
# NOTE: when the parent breaks "after" the child you get this same
# case as well, the child breaks the IPC channel with a stop msg
# before any closure takes place.
elif (
# only parent breaks
(
ipc_break['break_parent_ipc_after']
and
ipc_break['break_child_ipc_after'] is False
):
# expect_final_exc = trio.ClosedResourceError
expect_final_exc = tractor.TransportClosed
and ipc_break['break_child_ipc_after'] is False
)
# BOTH but, PARENT breaks FIRST
elif (
# both break but, child breaks first
or (
ipc_break['break_parent_ipc_after'] is not False
and (
ipc_break['break_child_ipc_after']
>
ipc_break['break_parent_ipc_after']
> ipc_break['break_parent_ipc_after']
)
)
):
# expect_final_exc = trio.ClosedResourceError
expect_final_exc = tractor.TransportClosed
expect_final_exc = trio.EndOfChannel
with pytest.raises(
expected_exception=(
expect_final_exc,
ExceptionGroup,
),
) as excinfo:
try:
with pytest.raises(expect_final_exc):
trio.run(
partial(
mod.main,
debug_mode=debug_mode,
start_method=spawn_backend,
loglevel=loglevel,
pre_close=pre_aclose_msgstream,
**ipc_break,
)
)
except KeyboardInterrupt as _kbi:
kbi = _kbi
if expect_final_exc is not KeyboardInterrupt:
pytest.fail(
'Rxed unexpected KBI !?\n'
f'{repr(kbi)}'
)
raise
except tractor.TransportClosed as _tc:
tc = _tc
if expect_final_exc is KeyboardInterrupt:
pytest.fail(
'Unexpected transport failure !?\n'
f'{repr(tc)}'
)
cause: Exception = tc.__cause__
assert (
type(cause) is trio.ClosedResourceError
and
cause.args[0] == 'another task closed this fd'
)
raise
# get raw instance from pytest wrapper
value = excinfo.value
if isinstance(value, ExceptionGroup):
value = next(
itertools.dropwhile(
lambda exc: not isinstance(exc, expect_final_exc),
value.exceptions,
)
)
assert value
@tractor.context
@ -245,15 +155,9 @@ async def break_ipc_after_started(
) -> None:
await ctx.started()
async with ctx.open_stream() as stream:
# TODO: make a test which verifies the error
# for this, i.e. raises a `MsgTypeError`
# await ctx.chan.send(None)
await break_ipc(
stream=stream,
pre_close=True,
)
await stream.aclose()
await trio.sleep(0.2)
await ctx.chan.send(None)
print('child broke IPC and terminating')
@ -265,7 +169,6 @@ def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages():
'''
async def main():
with trio.fail_after(3):
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'ipc_breaker',
@ -283,10 +186,7 @@ def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages():
print('parent waiting on context')
print(
'parent exited context\n'
'parent raising KBI..\n'
)
print('parent exited context')
raise KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):

View File

@ -6,7 +6,6 @@ from collections import Counter
import itertools
import platform
import pytest
import trio
import tractor
@ -144,16 +143,8 @@ def test_dynamic_pub_sub():
try:
trio.run(main)
except (
trio.TooSlowError,
ExceptionGroup,
) as err:
if isinstance(err, ExceptionGroup):
for suberr in err.exceptions:
if isinstance(suberr, trio.TooSlowError):
break
else:
pytest.fail('Never got a `TooSlowError` ?')
except trio.TooSlowError:
pass
@tractor.context
@ -307,69 +298,44 @@ async def inf_streamer(
async with (
ctx.open_stream() as stream,
trio.open_nursery() as tn,
trio.open_nursery() as n,
):
async def close_stream_on_sentinel():
async def bail_on_sentinel():
async for msg in stream:
if msg == 'done':
print(
'streamer RXed "done" sentinel msg!\n'
'CLOSING `MsgStream`!'
)
await stream.aclose()
else:
print(f'streamer received {msg}')
else:
print('streamer exited recv loop')
# start termination detector
tn.start_soon(close_stream_on_sentinel)
n.start_soon(bail_on_sentinel)
cap: int = 10000 # so that we don't spin forever when bug..
for val in range(cap):
for val in itertools.count():
try:
print(f'streamer sending {val}')
await stream.send(val)
if val > cap:
raise RuntimeError(
'Streamer never cancelled by setinel?'
)
await trio.sleep(0.001)
# close out the stream gracefully
except trio.ClosedResourceError:
print('transport closed on streamer side!')
assert stream.closed
# close out the stream gracefully
break
else:
raise RuntimeError(
'Streamer not cancelled before finished sending?'
)
print('streamer exited .open_streamer() block')
print('terminating streamer')
def test_local_task_fanout_from_stream(
debug_mode: bool,
):
def test_local_task_fanout_from_stream():
'''
Single stream with multiple local consumer tasks using the
``MsgStream.subscribe()` api.
Ensure all tasks receive all values after stream completes
sending.
Ensure all tasks receive all values after stream completes sending.
'''
consumers: int = 22
consumers = 22
async def main():
counts = Counter()
async with tractor.open_nursery(
debug_mode=debug_mode,
) as tn:
p: tractor.Portal = await tn.start_actor(
async with tractor.open_nursery() as tn:
p = await tn.start_actor(
'inf_streamer',
enable_modules=[__name__],
)
@ -377,6 +343,7 @@ def test_local_task_fanout_from_stream(
p.open_context(inf_streamer) as (ctx, _),
ctx.open_stream() as stream,
):
async def pull_and_count(name: str):
# name = trio.lowlevel.current_task().name
async with stream.subscribe() as recver:
@ -385,7 +352,7 @@ def test_local_task_fanout_from_stream(
tractor.trionics.BroadcastReceiver
)
async for val in recver:
print(f'bx {name} rx: {val}')
# print(f'{name}: {val}')
counts[name] += 1
print(f'{name} bcaster ended')
@ -395,14 +362,10 @@ def test_local_task_fanout_from_stream(
with trio.fail_after(3):
async with trio.open_nursery() as nurse:
for i in range(consumers):
nurse.start_soon(
pull_and_count,
i,
)
nurse.start_soon(pull_and_count, i)
# delay to let bcast consumers pull msgs
await trio.sleep(0.5)
print('terminating nursery of bcast rxer consumers!')
print('\nterminating')
await stream.send('done')
print('closed stream connection')

View File

@ -8,13 +8,15 @@ import platform
import time
from itertools import repeat
from exceptiongroup import (
BaseExceptionGroup,
ExceptionGroup,
)
import pytest
import trio
import tractor
from tractor._testing import (
tractor_test,
)
from conftest import no_windows
from conftest import tractor_test, no_windows
def is_win():
@ -45,19 +47,17 @@ async def do_nuthin():
],
ids=['no_args', 'unexpected_args'],
)
def test_remote_error(reg_addr, args_err):
'''
Verify an error raised in a subactor that is propagated
def test_remote_error(arb_addr, args_err):
"""Verify an error raised in a subactor that is propagated
to the parent nursery, contains the underlying boxed builtin
error type info and causes cancellation and reraising all the
way up the stack.
'''
"""
args, errtype = args_err
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
# on a remote type error caused by bad input args
@ -65,9 +65,7 @@ def test_remote_error(reg_addr, args_err):
# an exception group outside the nursery since the error
# here and the far end task error are one in the same?
portal = await nursery.run_in_actor(
assert_err,
name='errorer',
**args
assert_err, name='errorer', **args
)
# get result(s) from main task
@ -77,7 +75,7 @@ def test_remote_error(reg_addr, args_err):
# of this actor nursery.
await portal.result()
except tractor.RemoteActorError as err:
assert err.boxed_type == errtype
assert err.type == errtype
print("Look Maa that actor failed hard, hehh")
raise
@ -86,33 +84,20 @@ def test_remote_error(reg_addr, args_err):
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
assert excinfo.value.boxed_type == errtype
assert excinfo.value.type == errtype
else:
# the root task will also error on the `Portal.result()`
# call so we expect an error from there AND the child.
# |_ tho seems like on new `trio` this doesn't always
# happen?
with pytest.raises((
BaseExceptionGroup,
tractor.RemoteActorError,
)) as excinfo:
# the root task will also error on the `.result()` call
# so we expect an error from there AND the child.
with pytest.raises(BaseExceptionGroup) as excinfo:
trio.run(main)
# ensure boxed errors are `errtype`
err: BaseException = excinfo.value
if isinstance(err, BaseExceptionGroup):
suberrs: list[BaseException] = err.exceptions
else:
suberrs: list[BaseException] = [err]
for exc in suberrs:
assert exc.boxed_type == errtype
# ensure boxed errors
for exc in excinfo.value.exceptions:
assert exc.type == errtype
def test_multierror(
reg_addr: tuple[str, int],
):
def test_multierror(arb_addr):
'''
Verify we raise a ``BaseExceptionGroup`` out of a nursery where
more then one actor errors.
@ -120,7 +105,7 @@ def test_multierror(
'''
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
await nursery.run_in_actor(assert_err, name='errorer1')
@ -130,7 +115,7 @@ def test_multierror(
try:
await portal2.result()
except tractor.RemoteActorError as err:
assert err.boxed_type == AssertionError
assert err.type == AssertionError
print("Look Maa that first actor failed hard, hehh")
raise
@ -145,14 +130,14 @@ def test_multierror(
@pytest.mark.parametrize(
'num_subactors', range(25, 26),
)
def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
"""Verify we raise a ``BaseExceptionGroup`` out of a nursery where
more then one actor errors and also with a delay before failure
to test failure during an ongoing spawning.
"""
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
for i in range(num_subactors):
@ -182,7 +167,7 @@ def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
for exc in exceptions:
assert isinstance(exc, tractor.RemoteActorError)
assert exc.boxed_type == AssertionError
assert exc.type == AssertionError
async def do_nothing():
@ -190,20 +175,15 @@ async def do_nothing():
@pytest.mark.parametrize('mechanism', ['nursery_cancel', KeyboardInterrupt])
def test_cancel_single_subactor(reg_addr, mechanism):
'''
Ensure a ``ActorNursery.start_actor()`` spawned subactor
def test_cancel_single_subactor(arb_addr, mechanism):
"""Ensure a ``ActorNursery.start_actor()`` spawned subactor
cancels when the nursery is cancelled.
'''
"""
async def spawn_actor():
'''
Spawn an actor that blocks indefinitely then cancel via
either `ActorNursery.cancel()` or an exception raise.
'''
"""Spawn an actor that blocks indefinitely.
"""
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
portal = await nursery.start_actor(
@ -323,7 +303,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
await portal.run(func, **kwargs)
except tractor.RemoteActorError as err:
assert err.boxed_type == err_type
assert err.type == err_type
# we only expect this first error to propogate
# (all other daemons are cancelled before they
# can be scheduled)
@ -342,11 +322,11 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
assert len(err.exceptions) == num_actors
for exc in err.exceptions:
if isinstance(exc, tractor.RemoteActorError):
assert exc.boxed_type == err_type
assert exc.type == err_type
else:
assert isinstance(exc, trio.Cancelled)
elif isinstance(err, tractor.RemoteActorError):
assert err.boxed_type == err_type
assert err.type == err_type
assert n.cancelled is True
assert not n._children
@ -425,7 +405,7 @@ async def test_nested_multierrors(loglevel, start_method):
elif isinstance(subexc, tractor.RemoteActorError):
# on windows it seems we can't exactly be sure wtf
# will happen..
assert subexc.boxed_type in (
assert subexc.type in (
tractor.RemoteActorError,
trio.Cancelled,
BaseExceptionGroup,
@ -435,7 +415,7 @@ async def test_nested_multierrors(loglevel, start_method):
for subsub in subexc.exceptions:
if subsub in (tractor.RemoteActorError,):
subsub = subsub.boxed_type
subsub = subsub.type
assert type(subsub) in (
trio.Cancelled,
@ -450,16 +430,16 @@ async def test_nested_multierrors(loglevel, start_method):
# we get back the (sent) cancel signal instead
if is_win():
if isinstance(subexc, tractor.RemoteActorError):
assert subexc.boxed_type in (
assert subexc.type in (
BaseExceptionGroup,
tractor.RemoteActorError
)
else:
assert isinstance(subexc, BaseExceptionGroup)
else:
assert subexc.boxed_type is ExceptionGroup
assert subexc.type is ExceptionGroup
else:
assert subexc.boxed_type in (
assert subexc.type in (
tractor.RemoteActorError,
trio.Cancelled
)

View File

@ -1,917 +0,0 @@
'''
Low-level functional audits for our
"capability based messaging"-spec feats.
B~)
'''
import typing
from typing import (
Any,
Type,
Union,
)
from msgspec import (
structs,
msgpack,
Struct,
ValidationError,
)
import pytest
import tractor
from tractor import (
_state,
MsgTypeError,
Context,
)
from tractor.msg import (
_codec,
_ctxvar_MsgCodec,
NamespacePath,
MsgCodec,
mk_codec,
apply_codec,
current_codec,
)
from tractor.msg.types import (
_payload_msgs,
log,
PayloadMsg,
Started,
mk_msg_spec,
)
import trio
def mk_custom_codec(
pld_spec: Union[Type]|Any,
add_hooks: bool,
) -> MsgCodec:
'''
Create custom `msgpack` enc/dec-hooks and set a `Decoder`
which only loads `pld_spec` (like `NamespacePath`) types.
'''
uid: tuple[str, str] = tractor.current_actor().uid
# XXX NOTE XXX: despite defining `NamespacePath` as a type
# field on our `PayloadMsg.pld`, we still need a enc/dec_hook() pair
# to cast to/from that type on the wire. See the docs:
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
def enc_nsp(obj: Any) -> Any:
print(f'{uid} ENC HOOK')
match obj:
case NamespacePath():
print(
f'{uid}: `NamespacePath`-Only ENCODE?\n'
f'obj-> `{obj}`: {type(obj)}\n'
)
# if type(obj) != NamespacePath:
# breakpoint()
return str(obj)
print(
f'{uid}\n'
'CUSTOM ENCODE\n'
f'obj-arg-> `{obj}`: {type(obj)}\n'
)
logmsg: str = (
f'{uid}\n'
'FAILED ENCODE\n'
f'obj-> `{obj}: {type(obj)}`\n'
)
raise NotImplementedError(logmsg)
def dec_nsp(
obj_type: Type,
obj: Any,
) -> Any:
print(
f'{uid}\n'
'CUSTOM DECODE\n'
f'type-arg-> {obj_type}\n'
f'obj-arg-> `{obj}`: {type(obj)}\n'
)
nsp = None
if (
obj_type is NamespacePath
and isinstance(obj, str)
and ':' in obj
):
nsp = NamespacePath(obj)
# TODO: we could built a generic handler using
# JUST matching the obj_type part?
# nsp = obj_type(obj)
if nsp:
print(f'Returning NSP instance: {nsp}')
return nsp
logmsg: str = (
f'{uid}\n'
'FAILED DECODE\n'
f'type-> {obj_type}\n'
f'obj-arg-> `{obj}`: {type(obj)}\n\n'
f'current codec:\n'
f'{current_codec()}\n'
)
# TODO: figure out the ignore subsys for this!
# -[ ] option whether to defense-relay backc the msg
# inside an `Invalid`/`Ignore`
# -[ ] how to make this handling pluggable such that a
# `Channel`/`MsgTransport` can intercept and process
# back msgs either via exception handling or some other
# signal?
log.warning(logmsg)
# NOTE: this delivers the invalid
# value up to `msgspec`'s decoding
# machinery for error raising.
return obj
# raise NotImplementedError(logmsg)
nsp_codec: MsgCodec = mk_codec(
ipc_pld_spec=pld_spec,
# NOTE XXX: the encode hook MUST be used no matter what since
# our `NamespacePath` is not any of a `Any` native type nor
# a `msgspec.Struct` subtype - so `msgspec` has no way to know
# how to encode it unless we provide the custom hook.
#
# AGAIN that is, regardless of whether we spec an
# `Any`-decoded-pld the enc has no knowledge (by default)
# how to enc `NamespacePath` (nsp), so we add a custom
# hook to do that ALWAYS.
enc_hook=enc_nsp if add_hooks else None,
# XXX NOTE: pretty sure this is mutex with the `type=` to
# `Decoder`? so it won't work in tandem with the
# `ipc_pld_spec` passed above?
dec_hook=dec_nsp if add_hooks else None,
)
return nsp_codec
def chk_codec_applied(
expect_codec: MsgCodec,
enter_value: MsgCodec|None = None,
) -> MsgCodec:
'''
buncha sanity checks ensuring that the IPC channel's
context-vars are set to the expected codec and that are
ctx-var wrapper APIs match the same.
'''
# TODO: play with tricyle again, bc this is supposed to work
# the way we want?
#
# TreeVar
# task: trio.Task = trio.lowlevel.current_task()
# curr_codec = _ctxvar_MsgCodec.get_in(task)
# ContextVar
# task_ctx: Context = task.context
# assert _ctxvar_MsgCodec in task_ctx
# curr_codec: MsgCodec = task.context[_ctxvar_MsgCodec]
# NOTE: currently we use this!
# RunVar
curr_codec: MsgCodec = current_codec()
last_read_codec = _ctxvar_MsgCodec.get()
# assert curr_codec is last_read_codec
assert (
(same_codec := expect_codec) is
# returned from `mk_codec()`
# yielded value from `apply_codec()`
# read from current task's `contextvars.Context`
curr_codec is
last_read_codec
# the default `msgspec` settings
is not _codec._def_msgspec_codec
is not _codec._def_tractor_codec
)
if enter_value:
enter_value is same_codec
def iter_maybe_sends(
send_items: dict[Union[Type], Any] | list[tuple],
ipc_pld_spec: Union[Type] | Any,
add_codec_hooks: bool,
codec: MsgCodec|None = None,
) -> tuple[Any, bool]:
if isinstance(send_items, dict):
send_items = send_items.items()
for (
send_type_spec,
send_value,
) in send_items:
expect_roundtrip: bool = False
# values-to-typespec santiy
send_type = type(send_value)
assert send_type == send_type_spec or (
(subtypes := getattr(send_type_spec, '__args__', None))
and send_type in subtypes
)
spec_subtypes: set[Union[Type]] = (
getattr(
ipc_pld_spec,
'__args__',
{ipc_pld_spec,},
)
)
send_in_spec: bool = (
send_type == ipc_pld_spec
or (
ipc_pld_spec != Any
and # presume `Union` of types
send_type in spec_subtypes
)
or (
ipc_pld_spec == Any
and
send_type != NamespacePath
)
)
expect_roundtrip = (
send_in_spec
# any spec should support all other
# builtin py values that we send
# except our custom nsp type which
# we should be able to send as long
# as we provide the custom codec hooks.
or (
ipc_pld_spec == Any
and
send_type == NamespacePath
and
add_codec_hooks
)
)
if codec is not None:
# XXX FIRST XXX ensure roundtripping works
# before touching any IPC primitives/APIs.
wire_bytes: bytes = codec.encode(
Started(
cid='blahblah',
pld=send_value,
)
)
# NOTE: demonstrates the decoder loading
# to via our native SCIPP msg-spec
# (structurred-conc-inter-proc-protocol)
# implemented as per,
try:
msg: Started = codec.decode(wire_bytes)
if not expect_roundtrip:
pytest.fail(
f'NOT-EXPECTED able to roundtrip value given spec:\n'
f'ipc_pld_spec -> {ipc_pld_spec}\n'
f'value -> {send_value}: {send_type}\n'
)
pld = msg.pld
assert pld == send_value
except ValidationError:
if expect_roundtrip:
pytest.fail(
f'EXPECTED to roundtrip value given spec:\n'
f'ipc_pld_spec -> {ipc_pld_spec}\n'
f'value -> {send_value}: {send_type}\n'
)
yield (
str(send_type),
send_value,
expect_roundtrip,
)
def dec_type_union(
type_names: list[str],
) -> Type:
'''
Look up types by name, compile into a list and then create and
return a `typing.Union` from the full set.
'''
import importlib
types: list[Type] = []
for type_name in type_names:
for mod in [
typing,
importlib.import_module(__name__),
]:
if type_ref := getattr(
mod,
type_name,
False,
):
types.append(type_ref)
# special case handling only..
# ipc_pld_spec: Union[Type] = eval(
# pld_spec_str,
# {}, # globals
# {'typing': typing}, # locals
# )
return Union[*types]
def enc_type_union(
union_or_type: Union[Type]|Type,
) -> list[str]:
'''
Encode a type-union or single type to a list of type-name-strings
ready for IPC interchange.
'''
type_strs: list[str] = []
for typ in getattr(
union_or_type,
'__args__',
{union_or_type,},
):
type_strs.append(typ.__qualname__)
return type_strs
@tractor.context
async def send_back_values(
ctx: Context,
expect_debug: bool,
pld_spec_type_strs: list[str],
add_hooks: bool,
started_msg_bytes: bytes,
expect_ipc_send: dict[str, tuple[Any, bool]],
) -> None:
'''
Setup up a custom codec to load instances of `NamespacePath`
and ensure we can round trip a func ref with our parent.
'''
uid: tuple = tractor.current_actor().uid
# debug mode sanity check (prolly superfluous but, meh)
assert expect_debug == _state.debug_mode()
# init state in sub-actor should be default
chk_codec_applied(
expect_codec=_codec._def_tractor_codec,
)
# load pld spec from input str
ipc_pld_spec = dec_type_union(
pld_spec_type_strs,
)
pld_spec_str = str(ipc_pld_spec)
# same as on parent side config.
nsp_codec: MsgCodec = mk_custom_codec(
pld_spec=ipc_pld_spec,
add_hooks=add_hooks,
)
with (
apply_codec(nsp_codec) as codec,
):
chk_codec_applied(
expect_codec=nsp_codec,
enter_value=codec,
)
print(
f'{uid}: attempting `Started`-bytes DECODE..\n'
)
try:
msg: Started = nsp_codec.decode(started_msg_bytes)
expected_pld_spec_str: str = msg.pld
assert pld_spec_str == expected_pld_spec_str
# TODO: maybe we should add our own wrapper error so as to
# be interchange-lib agnostic?
# -[ ] the error type is wtv is raised from the hook so we
# could also require a type-class of errors for
# indicating whether the hook-failure can be handled by
# a nasty-dialog-unprot sub-sys?
except ValidationError:
# NOTE: only in the `Any` spec case do we expect this to
# work since otherwise no spec covers a plain-ol'
# `.pld: str`
if pld_spec_str == 'Any':
raise
else:
print(
f'{uid}: (correctly) unable to DECODE `Started`-bytes\n'
f'{started_msg_bytes}\n'
)
iter_send_val_items = iter(expect_ipc_send.values())
sent: list[Any] = []
for send_value, expect_send in iter_send_val_items:
try:
print(
f'{uid}: attempting to `.started({send_value})`\n'
f'=> expect_send: {expect_send}\n'
f'SINCE, ipc_pld_spec: {ipc_pld_spec}\n'
f'AND, codec: {codec}\n'
)
await ctx.started(send_value)
sent.append(send_value)
if not expect_send:
# XXX NOTE XXX THIS WON'T WORK WITHOUT SPECIAL
# `str` handling! or special debug mode IPC
# msgs!
await tractor.pause()
raise RuntimeError(
f'NOT-EXPECTED able to roundtrip value given spec:\n'
f'ipc_pld_spec -> {ipc_pld_spec}\n'
f'value -> {send_value}: {type(send_value)}\n'
)
break # move on to streaming block..
except tractor.MsgTypeError:
await tractor.pause()
if expect_send:
raise RuntimeError(
f'EXPECTED to `.started()` value given spec:\n'
f'ipc_pld_spec -> {ipc_pld_spec}\n'
f'value -> {send_value}: {type(send_value)}\n'
)
async with ctx.open_stream() as ipc:
print(
f'{uid}: Entering streaming block to send remaining values..'
)
for send_value, expect_send in iter_send_val_items:
send_type: Type = type(send_value)
print(
'------ - ------\n'
f'{uid}: SENDING NEXT VALUE\n'
f'ipc_pld_spec: {ipc_pld_spec}\n'
f'expect_send: {expect_send}\n'
f'val: {send_value}\n'
'------ - ------\n'
)
try:
await ipc.send(send_value)
print(f'***\n{uid}-CHILD sent {send_value!r}\n***\n')
sent.append(send_value)
# NOTE: should only raise above on
# `.started()` or a `Return`
# if not expect_send:
# raise RuntimeError(
# f'NOT-EXPECTED able to roundtrip value given spec:\n'
# f'ipc_pld_spec -> {ipc_pld_spec}\n'
# f'value -> {send_value}: {send_type}\n'
# )
except ValidationError:
print(f'{uid} FAILED TO SEND {send_value}!')
# await tractor.pause()
if expect_send:
raise RuntimeError(
f'EXPECTED to roundtrip value given spec:\n'
f'ipc_pld_spec -> {ipc_pld_spec}\n'
f'value -> {send_value}: {send_type}\n'
)
# continue
else:
print(
f'{uid}: finished sending all values\n'
'Should be exiting stream block!\n'
)
print(f'{uid}: exited streaming block!')
# TODO: this won't be true bc in streaming phase we DO NOT
# msgspec check outbound msgs!
# -[ ] once we implement the receiver side `InvalidMsg`
# then we can expect it here?
# assert (
# len(sent)
# ==
# len([val
# for val, expect in
# expect_ipc_send.values()
# if expect is True])
# )
def ex_func(*args):
print(f'ex_func({args})')
@pytest.mark.parametrize(
'ipc_pld_spec',
[
Any,
NamespacePath,
NamespacePath|None, # the "maybe" spec Bo
],
ids=[
'any_type',
'nsp_type',
'maybe_nsp_type',
]
)
@pytest.mark.parametrize(
'add_codec_hooks',
[
True,
False,
],
ids=['use_codec_hooks', 'no_codec_hooks'],
)
def test_codec_hooks_mod(
debug_mode: bool,
ipc_pld_spec: Union[Type]|Any,
# send_value: None|str|NamespacePath,
add_codec_hooks: bool,
):
'''
Audit the `.msg.MsgCodec` override apis details given our impl
uses `contextvars` to accomplish per `trio` task codec
application around an inter-proc-task-comms context.
'''
async def main():
nsp = NamespacePath.from_ref(ex_func)
send_items: dict[Union, Any] = {
Union[None]: None,
Union[NamespacePath]: nsp,
Union[str]: str(nsp),
}
# init default state for actor
chk_codec_applied(
expect_codec=_codec._def_tractor_codec,
)
async with tractor.open_nursery(
debug_mode=debug_mode,
) as an:
p: tractor.Portal = await an.start_actor(
'sub',
enable_modules=[__name__],
)
# TODO: 2 cases:
# - codec not modified -> decode nsp as `str`
# - codec modified with hooks -> decode nsp as
# `NamespacePath`
nsp_codec: MsgCodec = mk_custom_codec(
pld_spec=ipc_pld_spec,
add_hooks=add_codec_hooks,
)
with apply_codec(nsp_codec) as codec:
chk_codec_applied(
expect_codec=nsp_codec,
enter_value=codec,
)
expect_ipc_send: dict[str, tuple[Any, bool]] = {}
report: str = (
'Parent report on send values with\n'
f'ipc_pld_spec: {ipc_pld_spec}\n'
' ------ - ------\n'
)
for val_type_str, val, expect_send in iter_maybe_sends(
send_items,
ipc_pld_spec,
add_codec_hooks=add_codec_hooks,
):
report += (
f'send_value: {val}: {type(val)} '
f'=> expect_send: {expect_send}\n'
)
expect_ipc_send[val_type_str] = (val, expect_send)
print(
report +
' ------ - ------\n'
)
assert len(expect_ipc_send) == len(send_items)
# now try over real IPC with a the subactor
# expect_ipc_rountrip: bool = True
expected_started = Started(
cid='cid',
pld=str(ipc_pld_spec),
)
# build list of values we expect to receive from
# the subactor.
expect_to_send: list[Any] = [
val
for val, expect_send in expect_ipc_send.values()
if expect_send
]
pld_spec_type_strs: list[str] = enc_type_union(ipc_pld_spec)
# XXX should raise an mte (`MsgTypeError`)
# when `add_codec_hooks == False` bc the input
# `expect_ipc_send` kwarg has a nsp which can't be
# serialized!
#
# TODO:can we ensure this happens from the
# `Return`-side (aka the sub) as well?
if not add_codec_hooks:
try:
async with p.open_context(
send_back_values,
expect_debug=debug_mode,
pld_spec_type_strs=pld_spec_type_strs,
add_hooks=add_codec_hooks,
started_msg_bytes=nsp_codec.encode(expected_started),
# XXX NOTE bc we send a `NamespacePath` in this kwarg
expect_ipc_send=expect_ipc_send,
) as (ctx, first):
pytest.fail('ctx should fail to open without custom enc_hook!?')
# this test passes bc we can go no further!
except MsgTypeError:
# teardown nursery
await p.cancel_actor()
return
# TODO: send the original nsp here and
# test with `limit_msg_spec()` above?
# await tractor.pause()
print('PARENT opening IPC ctx!\n')
async with (
# XXX should raise an mte (`MsgTypeError`)
# when `add_codec_hooks == False`..
p.open_context(
send_back_values,
expect_debug=debug_mode,
pld_spec_type_strs=pld_spec_type_strs,
add_hooks=add_codec_hooks,
started_msg_bytes=nsp_codec.encode(expected_started),
expect_ipc_send=expect_ipc_send,
) as (ctx, first),
ctx.open_stream() as ipc,
):
# ensure codec is still applied across
# `tractor.Context` + its embedded nursery.
chk_codec_applied(
expect_codec=nsp_codec,
enter_value=codec,
)
print(
'root: ENTERING CONTEXT BLOCK\n'
f'type(first): {type(first)}\n'
f'first: {first}\n'
)
expect_to_send.remove(first)
# TODO: explicit values we expect depending on
# codec config!
# assert first == first_val
# assert first == f'{__name__}:ex_func'
async for next_sent in ipc:
print(
'Parent: child sent next value\n'
f'{next_sent}: {type(next_sent)}\n'
)
if expect_to_send:
expect_to_send.remove(next_sent)
else:
print('PARENT should terminate stream loop + block!')
# all sent values should have arrived!
assert not expect_to_send
await p.cancel_actor()
trio.run(main)
def chk_pld_type(
payload_spec: Type[Struct]|Any,
pld: Any,
expect_roundtrip: bool|None = None,
) -> bool:
pld_val_type: Type = type(pld)
# TODO: verify that the overridden subtypes
# DO NOT have modified type-annots from original!
# 'Start', .pld: FuncSpec
# 'StartAck', .pld: IpcCtxSpec
# 'Stop', .pld: UNSEt
# 'Error', .pld: ErrorData
codec: MsgCodec = mk_codec(
# NOTE: this ONLY accepts `PayloadMsg.pld` fields of a specified
# type union.
ipc_pld_spec=payload_spec,
)
# make a one-off dec to compare with our `MsgCodec` instance
# which does the below `mk_msg_spec()` call internally
ipc_msg_spec: Union[Type[Struct]]
msg_types: list[PayloadMsg[payload_spec]]
(
ipc_msg_spec,
msg_types,
) = mk_msg_spec(
payload_type_union=payload_spec,
)
_enc = msgpack.Encoder()
_dec = msgpack.Decoder(
type=ipc_msg_spec or Any, # like `PayloadMsg[Any]`
)
assert (
payload_spec
==
codec.pld_spec
)
# assert codec.dec == dec
#
# ^-XXX-^ not sure why these aren't "equal" but when cast
# to `str` they seem to match ?? .. kk
assert (
str(ipc_msg_spec)
==
str(codec.msg_spec)
==
str(_dec.type)
==
str(codec.dec.type)
)
# verify the boxed-type for all variable payload-type msgs.
if not msg_types:
breakpoint()
roundtrip: bool|None = None
pld_spec_msg_names: list[str] = [
td.__name__ for td in _payload_msgs
]
for typedef in msg_types:
skip_runtime_msg: bool = typedef.__name__ not in pld_spec_msg_names
if skip_runtime_msg:
continue
pld_field = structs.fields(typedef)[1]
assert pld_field.type is payload_spec # TODO-^ does this need to work to get all subtypes to adhere?
kwargs: dict[str, Any] = {
'cid': '666',
'pld': pld,
}
enc_msg: PayloadMsg = typedef(**kwargs)
_wire_bytes: bytes = _enc.encode(enc_msg)
wire_bytes: bytes = codec.enc.encode(enc_msg)
assert _wire_bytes == wire_bytes
ve: ValidationError|None = None
try:
dec_msg = codec.dec.decode(wire_bytes)
_dec_msg = _dec.decode(wire_bytes)
# decoded msg and thus payload should be exactly same!
assert (roundtrip := (
_dec_msg
==
dec_msg
==
enc_msg
))
if (
expect_roundtrip is not None
and expect_roundtrip != roundtrip
):
breakpoint()
assert (
pld
==
dec_msg.pld
==
enc_msg.pld
)
# assert (roundtrip := (_dec_msg == enc_msg))
except ValidationError as _ve:
ve = _ve
roundtrip: bool = False
if pld_val_type is payload_spec:
raise ValueError(
'Got `ValidationError` despite type-var match!?\n'
f'pld_val_type: {pld_val_type}\n'
f'payload_type: {payload_spec}\n'
) from ve
else:
# ow we good cuz the pld spec mismatched.
print(
'Got expected `ValidationError` since,\n'
f'{pld_val_type} is not {payload_spec}\n'
)
else:
if (
payload_spec is not Any
and
pld_val_type is not payload_spec
):
raise ValueError(
'DID NOT `ValidationError` despite expected type match!?\n'
f'pld_val_type: {pld_val_type}\n'
f'payload_type: {payload_spec}\n'
)
# full code decode should always be attempted!
if roundtrip is None:
breakpoint()
return roundtrip
def test_limit_msgspec():
async def main():
async with tractor.open_root_actor(
debug_mode=True
):
# ensure we can round-trip a boxing `PayloadMsg`
assert chk_pld_type(
payload_spec=Any,
pld=None,
expect_roundtrip=True,
)
# verify that a mis-typed payload value won't decode
assert not chk_pld_type(
payload_spec=int,
pld='doggy',
)
# parametrize the boxed `.pld` type as a custom-struct
# and ensure that parametrization propagates
# to all payload-msg-spec-able subtypes!
class CustomPayload(Struct):
name: str
value: Any
assert not chk_pld_type(
payload_spec=CustomPayload,
pld='doggy',
)
assert chk_pld_type(
payload_spec=CustomPayload,
pld=CustomPayload(name='doggy', value='urmom')
)
# yah, we can `.pause_from_sync()` now!
# breakpoint()
trio.run(main)

View File

@ -6,15 +6,14 @@ sub-sub-actor daemons.
'''
from typing import Optional
import asyncio
from contextlib import (
asynccontextmanager as acm,
aclosing,
)
from contextlib import asynccontextmanager as acm
import pytest
import trio
from trio_typing import TaskStatus
import tractor
from tractor import RemoteActorError
from async_generator import aclosing
async def aio_streamer(
@ -142,7 +141,7 @@ async def open_actor_local_nursery(
)
def test_actor_managed_trio_nursery_task_error_cancels_aio(
asyncio_mode: bool,
reg_addr: tuple,
arb_addr
):
'''
Verify that a ``trio`` nursery created managed in a child actor
@ -171,4 +170,4 @@ def test_actor_managed_trio_nursery_task_error_cancels_aio(
# verify boxed error
err = excinfo.value
assert err.boxed_type is NameError
assert isinstance(err.type(), NameError)

View File

@ -5,7 +5,9 @@ import trio
import tractor
from tractor import open_actor_cluster
from tractor.trionics import gather_contexts
from tractor._testing import tractor_test
from conftest import tractor_test
MESSAGE = 'tractoring at full speed'
@ -47,7 +49,7 @@ async def worker(
await ctx.started()
async with ctx.open_stream(
allow_overruns=True,
backpressure=True,
) as stream:
# TODO: this with the below assert causes a hang bug?

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -9,24 +9,25 @@ import itertools
import pytest
import tractor
from tractor._testing import tractor_test
import trio
from conftest import tractor_test
@tractor_test
async def test_reg_then_unreg(reg_addr):
async def test_reg_then_unreg(arb_addr):
actor = tractor.current_actor()
assert actor.is_arbiter
assert len(actor._registry) == 1 # only self is registered
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as n:
portal = await n.start_actor('actor', enable_modules=[__name__])
uid = portal.channel.uid
async with tractor.get_registry(*reg_addr) as aportal:
async with tractor.get_arbiter(*arb_addr) as aportal:
# this local actor should be the arbiter
assert actor is aportal.actor
@ -52,27 +53,15 @@ async def hi():
return the_line.format(tractor.current_actor().name)
async def say_hello(
other_actor: str,
reg_addr: tuple[str, int],
):
async def say_hello(other_actor):
await trio.sleep(1) # wait for other actor to spawn
async with tractor.find_actor(
other_actor,
registry_addrs=[reg_addr],
) as portal:
async with tractor.find_actor(other_actor) as portal:
assert portal is not None
return await portal.run(__name__, 'hi')
async def say_hello_use_wait(
other_actor: str,
reg_addr: tuple[str, int],
):
async with tractor.wait_for_actor(
other_actor,
registry_addr=reg_addr,
) as portal:
async def say_hello_use_wait(other_actor):
async with tractor.wait_for_actor(other_actor) as portal:
assert portal is not None
result = await portal.run(__name__, 'hi')
return result
@ -80,29 +69,21 @@ async def say_hello_use_wait(
@tractor_test
@pytest.mark.parametrize('func', [say_hello, say_hello_use_wait])
async def test_trynamic_trio(
func,
start_method,
reg_addr,
):
'''
Root actor acting as the "director" and running one-shot-task-actors
for the directed subs.
'''
async def test_trynamic_trio(func, start_method, arb_addr):
"""Main tractor entry point, the "master" process (for now
acts as the "director").
"""
async with tractor.open_nursery() as n:
print("Alright... Action!")
donny = await n.run_in_actor(
func,
other_actor='gretchen',
reg_addr=reg_addr,
name='donny',
)
gretchen = await n.run_in_actor(
func,
other_actor='donny',
reg_addr=reg_addr,
name='gretchen',
)
print(await gretchen.result())
@ -150,7 +131,7 @@ async def unpack_reg(actor_or_portal):
async def spawn_and_check_registry(
reg_addr: tuple,
arb_addr: tuple,
use_signal: bool,
remote_arbiter: bool = False,
with_streaming: bool = False,
@ -158,9 +139,9 @@ async def spawn_and_check_registry(
) -> None:
async with tractor.open_root_actor(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
):
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
# runtime needs to be up to call this
actor = tractor.current_actor()
@ -232,19 +213,17 @@ async def spawn_and_check_registry(
def test_subactors_unregister_on_cancel(
start_method,
use_signal,
reg_addr,
arb_addr,
with_streaming,
):
'''
Verify that cancelling a nursery results in all subactors
"""Verify that cancelling a nursery results in all subactors
deregistering themselves with the arbiter.
'''
"""
with pytest.raises(KeyboardInterrupt):
trio.run(
partial(
spawn_and_check_registry,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=False,
with_streaming=with_streaming,
@ -258,7 +237,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
daemon,
start_method,
use_signal,
reg_addr,
arb_addr,
with_streaming,
):
"""Verify that cancelling a nursery results in all subactors
@ -269,7 +248,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
trio.run(
partial(
spawn_and_check_registry,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=True,
with_streaming=with_streaming,
@ -283,7 +262,7 @@ async def streamer(agen):
async def close_chans_before_nursery(
reg_addr: tuple,
arb_addr: tuple,
use_signal: bool,
remote_arbiter: bool = False,
) -> None:
@ -296,9 +275,9 @@ async def close_chans_before_nursery(
entries_at_end = 1
async with tractor.open_root_actor(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
):
async with tractor.get_registry(*reg_addr) as aportal:
async with tractor.get_arbiter(*arb_addr) as aportal:
try:
get_reg = partial(unpack_reg, aportal)
@ -350,7 +329,7 @@ async def close_chans_before_nursery(
def test_close_channel_explicit(
start_method,
use_signal,
reg_addr,
arb_addr,
):
"""Verify that closing a stream explicitly and killing the actor's
"root nursery" **before** the containing nursery tears down also
@ -360,7 +339,7 @@ def test_close_channel_explicit(
trio.run(
partial(
close_chans_before_nursery,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=False,
),
@ -372,7 +351,7 @@ def test_close_channel_explicit_remote_arbiter(
daemon,
start_method,
use_signal,
reg_addr,
arb_addr,
):
"""Verify that closing a stream explicitly and killing the actor's
"root nursery" **before** the containing nursery tears down also
@ -382,7 +361,7 @@ def test_close_channel_explicit_remote_arbiter(
trio.run(
partial(
close_chans_before_nursery,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=True,
),

View File

@ -11,7 +11,8 @@ import platform
import shutil
import pytest
from tractor._testing import (
from conftest import (
examples_dir,
)
@ -19,8 +20,8 @@ from tractor._testing import (
@pytest.fixture
def run_example_in_subproc(
loglevel: str,
testdir: pytest.Testdir,
reg_addr: tuple[str, int],
testdir,
arb_addr: tuple[str, int],
):
@contextmanager

View File

@ -2,33 +2,21 @@
The hipster way to force SC onto the stdlib's "async": 'infection mode'.
'''
from typing import Optional, Iterable, Union
import asyncio
import builtins
from contextlib import ExitStack
import itertools
import importlib
import os
from pathlib import Path
import signal
from typing import (
Callable,
Iterable,
Union,
)
from exceptiongroup import BaseExceptionGroup
import pytest
import trio
import tractor
from tractor import (
current_actor,
Actor,
to_asyncio,
RemoteActorError,
ContextCancelled,
_state,
)
from tractor.trionics import BroadcastReceiver
from tractor._testing import expect_ctxc
async def sleep_and_err(
@ -36,8 +24,8 @@ async def sleep_and_err(
# just signature placeholders for compat with
# ``to_asyncio.open_channel_from()``
to_trio: trio.MemorySendChannel|None = None,
from_trio: asyncio.Queue|None = None,
to_trio: Optional[trio.MemorySendChannel] = None,
from_trio: Optional[asyncio.Queue] = None,
):
if to_trio:
@ -47,7 +35,7 @@ async def sleep_and_err(
assert 0
async def aio_sleep_forever():
async def sleep_forever():
await asyncio.sleep(float('inf'))
@ -55,10 +43,10 @@ async def trio_cancels_single_aio_task():
# spawn an ``asyncio`` task to run a func and return result
with trio.move_on_after(.2):
await tractor.to_asyncio.run_task(aio_sleep_forever)
await tractor.to_asyncio.run_task(sleep_forever)
def test_trio_cancels_aio_on_actor_side(reg_addr):
def test_trio_cancels_aio_on_actor_side(arb_addr):
'''
Spawn an infected actor that is cancelled by the ``trio`` side
task using std cancel scope apis.
@ -66,7 +54,7 @@ def test_trio_cancels_aio_on_actor_side(reg_addr):
'''
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr]
arbiter_addr=arb_addr
) as n:
await n.run_in_actor(
trio_cancels_single_aio_task,
@ -77,22 +65,14 @@ def test_trio_cancels_aio_on_actor_side(reg_addr):
async def asyncio_actor(
target: str,
expect_err: Exception|None = None
expect_err: Optional[Exception] = None
) -> None:
# ensure internal runtime state is consistent
actor: Actor = tractor.current_actor()
assert (
actor.is_infected_aio()
and
actor._infected_aio
and
_state._runtime_vars['_is_infected_aio']
)
target: Callable = globals()[target]
assert tractor.current_actor().is_infected_aio()
target = globals()[target]
if '.' in expect_err:
modpath, _, name = expect_err.rpartition('.')
@ -113,7 +93,7 @@ async def asyncio_actor(
raise
def test_aio_simple_error(reg_addr):
def test_aio_simple_error(arb_addr):
'''
Verify a simple remote asyncio error propagates back through trio
to the parent actor.
@ -122,7 +102,7 @@ def test_aio_simple_error(reg_addr):
'''
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr]
arbiter_addr=arb_addr
) as n:
await n.run_in_actor(
asyncio_actor,
@ -131,26 +111,15 @@ def test_aio_simple_error(reg_addr):
infect_asyncio=True,
)
with pytest.raises(
expected_exception=(RemoteActorError, ExceptionGroup),
) as excinfo:
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
err = excinfo.value
# might get multiple `trio.Cancelled`s as well inside an inception
if isinstance(err, ExceptionGroup):
err = next(itertools.dropwhile(
lambda exc: not isinstance(exc, tractor.RemoteActorError),
err.exceptions
))
assert err
assert isinstance(err, RemoteActorError)
assert err.boxed_type is AssertionError
assert err.type == AssertionError
def test_tractor_cancels_aio(reg_addr):
def test_tractor_cancels_aio(arb_addr):
'''
Verify we can cancel a spawned asyncio task gracefully.
@ -159,7 +128,7 @@ def test_tractor_cancels_aio(reg_addr):
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
asyncio_actor,
target='aio_sleep_forever',
target='sleep_forever',
expect_err='trio.Cancelled',
infect_asyncio=True,
)
@ -169,7 +138,7 @@ def test_tractor_cancels_aio(reg_addr):
trio.run(main)
def test_trio_cancels_aio(reg_addr):
def test_trio_cancels_aio(arb_addr):
'''
Much like the above test with ``tractor.Portal.cancel_actor()``
except we just use a standard ``trio`` cancellation api.
@ -183,7 +152,7 @@ def test_trio_cancels_aio(reg_addr):
async with tractor.open_nursery() as n:
await n.run_in_actor(
asyncio_actor,
target='aio_sleep_forever',
target='sleep_forever',
expect_err='trio.Cancelled',
infect_asyncio=True,
)
@ -214,18 +183,17 @@ async def trio_ctx(
# spawn another asyncio task for the cuck of it.
n.start_soon(
tractor.to_asyncio.run_task,
aio_sleep_forever,
sleep_forever,
)
await trio.sleep_forever()
@pytest.mark.parametrize(
'parent_cancels',
['context', 'actor', False],
'parent_cancels', [False, True],
ids='parent_actor_cancels_child={}'.format
)
def test_context_spawns_aio_task_that_errors(
reg_addr,
arb_addr,
parent_cancels: bool,
):
'''
@ -245,53 +213,26 @@ def test_context_spawns_aio_task_that_errors(
# debug_mode=True,
loglevel='cancel',
)
async with (
expect_ctxc(
yay=parent_cancels == 'actor',
),
p.open_context(
async with p.open_context(
trio_ctx,
) as (ctx, first),
):
) as (ctx, first):
assert first == 'start'
if parent_cancels == 'actor':
if parent_cancels:
await p.cancel_actor()
elif parent_cancels == 'context':
await ctx.cancel()
else:
await trio.sleep_forever()
async with expect_ctxc(
yay=parent_cancels == 'actor',
):
await ctx.result()
if parent_cancels == 'context':
# to tear down sub-acor
await p.cancel_actor()
return ctx.outcome
if parent_cancels:
# bc the parent made the cancel request,
# the error is not raised locally but instead
# the context is exited silently
res = trio.run(main)
assert isinstance(res, ContextCancelled)
assert 'root' in res.canceller[0]
else:
expect = RemoteActorError
with pytest.raises(expect) as excinfo:
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
err = excinfo.value
assert isinstance(err, expect)
assert err.boxed_type is AssertionError
assert isinstance(err, RemoteActorError)
if parent_cancels:
assert err.type == trio.Cancelled
else:
assert err.type == AssertionError
async def aio_cancel():
@ -300,53 +241,29 @@ async def aio_cancel():
'''
await asyncio.sleep(0.5)
task = asyncio.current_task()
# cancel and enter sleep
task = asyncio.current_task()
task.cancel()
await aio_sleep_forever()
await sleep_forever()
def test_aio_cancelled_from_aio_causes_trio_cancelled(reg_addr):
'''
When the `asyncio.Task` cancels itself the `trio` side cshould
also cancel and teardown and relay the cancellation cross-process
to the caller (parent).
def test_aio_cancelled_from_aio_causes_trio_cancelled(arb_addr):
'''
async def main():
an: tractor.ActorNursery
async with tractor.open_nursery() as an:
p: tractor.Portal = await an.run_in_actor(
async with tractor.open_nursery() as n:
await n.run_in_actor(
asyncio_actor,
target='aio_cancel',
expect_err='tractor.to_asyncio.AsyncioCancelled',
infect_asyncio=True,
)
# NOTE: normally the `an.__aexit__()` waits on the
# portal's result but we do it explicitly here
# to avoid indent levels.
with trio.fail_after(1):
await p.wait_for_result()
with pytest.raises(
expected_exception=(RemoteActorError, ExceptionGroup),
) as excinfo:
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
# might get multiple `trio.Cancelled`s as well inside an inception
err: RemoteActorError|ExceptionGroup = excinfo.value
if isinstance(err, ExceptionGroup):
err = next(itertools.dropwhile(
lambda exc: not isinstance(exc, tractor.RemoteActorError),
err.exceptions
))
assert err
# relayed boxed error should be our `trio`-task's
# cancel-signal-proxy-equivalent of `asyncio.CancelledError`.
assert err.boxed_type == to_asyncio.AsyncioCancelled
# ensure boxed error is correct
assert excinfo.value.type == to_asyncio.AsyncioCancelled
# TODO: verify open_channel_from will fail on this..
@ -387,6 +304,7 @@ async def push_from_aio_task(
async def stream_from_aio(
exit_early: bool = False,
raise_err: bool = False,
aio_raise_err: bool = False,
@ -467,7 +385,7 @@ async def stream_from_aio(
'fan_out', [False, True],
ids='fan_out_w_chan_subscribe={}'.format
)
def test_basic_interloop_channel_stream(reg_addr, fan_out):
def test_basic_interloop_channel_stream(arb_addr, fan_out):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
@ -475,14 +393,13 @@ def test_basic_interloop_channel_stream(reg_addr, fan_out):
infect_asyncio=True,
fan_out=fan_out,
)
# should raise RAE diectly
await portal.result()
trio.run(main)
# TODO: parametrize the above test and avoid the duplication here?
def test_trio_error_cancels_intertask_chan(reg_addr):
def test_trio_error_cancels_intertask_chan(arb_addr):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
@ -493,14 +410,15 @@ def test_trio_error_cancels_intertask_chan(reg_addr):
# should trigger remote actor error
await portal.result()
with pytest.raises(RemoteActorError) as excinfo:
with pytest.raises(BaseExceptionGroup) as excinfo:
trio.run(main)
# ensure boxed error type
excinfo.value.boxed_type is Exception
# ensure boxed errors
for exc in excinfo.value.exceptions:
assert exc.type == Exception
def test_trio_closes_early_and_channel_exits(reg_addr):
def test_trio_closes_early_and_channel_exits(arb_addr):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
@ -508,14 +426,14 @@ def test_trio_closes_early_and_channel_exits(reg_addr):
exit_early=True,
infect_asyncio=True,
)
# should raise RAE diectly
# should trigger remote actor error
await portal.result()
# should be a quiet exit on a simple channel exit
trio.run(main)
def test_aio_errors_and_channel_propagates_and_closes(reg_addr):
def test_aio_errors_and_channel_propagates_and_closes(arb_addr):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
@ -523,17 +441,15 @@ def test_aio_errors_and_channel_propagates_and_closes(reg_addr):
aio_raise_err=True,
infect_asyncio=True,
)
# should trigger RAE directly, not an eg.
# should trigger remote actor error
await portal.result()
with pytest.raises(
# NOTE: bc we directly wait on `Portal.result()` instead
# of capturing it inside the `ActorNursery` machinery.
expected_exception=RemoteActorError,
) as excinfo:
with pytest.raises(BaseExceptionGroup) as excinfo:
trio.run(main)
excinfo.value.boxed_type is Exception
# ensure boxed errors
for exc in excinfo.value.exceptions:
assert exc.type == Exception
@tractor.context
@ -594,7 +510,7 @@ async def trio_to_aio_echo_server(
ids='raise_error={}'.format,
)
def test_echoserver_detailed_mechanics(
reg_addr,
arb_addr,
raise_error_mid_stream,
):
@ -634,8 +550,7 @@ def test_echoserver_detailed_mechanics(
pass
else:
pytest.fail(
'stream not stopped after sentinel ?!'
)
"stream wasn't stopped after sentinel?!")
# TODO: the case where this blocks and
# is cancelled by kbi or out of task cancellation
@ -647,273 +562,3 @@ def test_echoserver_detailed_mechanics(
else:
trio.run(main)
@tractor.context
async def manage_file(
ctx: tractor.Context,
tmp_path_str: str,
send_sigint_to: str,
trio_side_is_shielded: bool = True,
bg_aio_task: bool = False,
):
'''
Start an `asyncio` task that just sleeps after registering a context
with `Actor.lifetime_stack`. Trigger a SIGINT to kill the actor tree
and ensure the stack is closed in the infected mode child.
To verify the teardown state just write a tmpfile to the `testdir`
and delete it on actor close.
'''
tmp_path: Path = Path(tmp_path_str)
tmp_file: Path = tmp_path / f'{" ".join(ctx._actor.uid)}.file'
# create a the tmp file and tell the parent where it's at
assert not tmp_file.is_file()
tmp_file.touch()
stack: ExitStack = current_actor().lifetime_stack
stack.callback(tmp_file.unlink)
await ctx.started((
str(tmp_file),
os.getpid(),
))
# expect to be cancelled from here!
try:
# NOTE: turns out you don't even need to sched an aio task
# since the original issue, even though seemingly was due to
# the guest-run being abandoned + a `._debug.pause()` inside
# `._runtime._async_main()` (which was originally trying to
# debug the `.lifetime_stack` not closing), IS NOT actually
# the core issue?
#
# further notes:
#
# - `trio` only issues the " RuntimeWarning: Trio guest run
# got abandoned without properly finishing... weird stuff
# might happen" IFF you DO run a asyncio task here, BUT
# - the original issue of the `.lifetime_stack` not closing
# will still happen even if you don't run an `asyncio` task
# here even though the "abandon" messgage won't be shown..
#
# => ????? honestly i'm lost but it seems to be some issue
# with `asyncio` and SIGINT..
#
# honestly, this REALLY reminds me why i haven't used
# `asyncio` by choice in years.. XD
#
async with trio.open_nursery() as tn:
if bg_aio_task:
tn.start_soon(
tractor.to_asyncio.run_task,
aio_sleep_forever,
)
# XXX don't-need/doesn't-make-a-diff right
# since we're already doing it from parent?
# if send_sigint_to == 'child':
# os.kill(
# os.getpid(),
# signal.SIGINT,
# )
# XXX spend a half sec doing shielded checkpointing to
# ensure that despite the `trio`-side task ignoring the
# SIGINT, the `asyncio` side won't abandon the guest-run!
if trio_side_is_shielded:
with trio.CancelScope(shield=True):
for i in range(5):
await trio.sleep(0.1)
await trio.sleep_forever()
# signalled manually at the OS level (aka KBI) by the parent actor.
except KeyboardInterrupt:
print('child raised KBI..')
assert tmp_file.exists()
raise
raise RuntimeError('shoulda received a KBI?')
@pytest.mark.parametrize(
'trio_side_is_shielded',
[
False,
True,
],
ids=[
'trio_side_no_shielding',
'trio_side_does_shielded_work',
],
)
@pytest.mark.parametrize(
'send_sigint_to',
[
'child',
'parent',
],
ids='send_SIGINT_to={}'.format,
)
@pytest.mark.parametrize(
'bg_aio_task',
[
False,
# NOTE: (and see notes in `manage_file()` above as well) if
# we FOR SURE SPAWN AN AIO TASK in the child it seems the
# "silent-abandon" case (as is described in detail in
# `to_asyncio.run_as_asyncio_guest()`) does not happen and
# `asyncio`'s loop will at least abandon the `trio` side
# loudly? .. prolly the state-spot to start looking for
# a soln that results in NO ABANDONMENT.. XD
True,
],
ids=[
'bg_aio_task',
'just_trio_slee',
],
)
@pytest.mark.parametrize(
'wait_for_ctx',
[
False,
True,
],
ids=[
'raise_KBI_in_rent',
'wait_for_ctx',
],
)
def test_sigint_closes_lifetime_stack(
tmp_path: Path,
wait_for_ctx: bool,
bg_aio_task: bool,
trio_side_is_shielded: bool,
debug_mode: bool,
send_sigint_to: str,
):
'''
Ensure that an infected child can use the `Actor.lifetime_stack`
to make a file on boot and it's automatically cleaned up by the
actor-lifetime-linked exit stack closure.
'''
async def main():
try:
an: tractor.ActorNursery
async with tractor.open_nursery(
debug_mode=debug_mode,
) as an:
p: tractor.Portal = await an.start_actor(
'file_mngr',
enable_modules=[__name__],
infect_asyncio=True,
)
async with p.open_context(
manage_file,
tmp_path_str=str(tmp_path),
send_sigint_to=send_sigint_to,
bg_aio_task=bg_aio_task,
trio_side_is_shielded=trio_side_is_shielded,
) as (ctx, first):
path_str, cpid = first
tmp_file: Path = Path(path_str)
assert tmp_file.exists()
# XXX originally to simulate what (hopefully)
# the below now triggers.. had to manually
# trigger a SIGINT from a ctl-c in the root.
# await trio.sleep_forever()
# XXX NOTE XXX signal infected-`asyncio` child to
# OS-cancel with SIGINT; this should trigger the
# bad `asyncio` cancel behaviour that can cause
# a guest-run abandon as was seen causing
# shm-buffer leaks in `piker`'s live quote stream
# susbys!
#
await trio.sleep(.2)
pid: int = (
cpid if send_sigint_to == 'child'
else os.getpid()
)
os.kill(
pid,
signal.SIGINT,
)
# XXX CASE 1: without the bug fixed, in
# the non-KBI-raised-in-parent case, this
# timeout should trigger!
if wait_for_ctx:
print('waiting for ctx outcome in parent..')
try:
with trio.fail_after(1):
await ctx.wait_for_result()
except tractor.ContextCancelled as ctxc:
assert ctxc.canceller == ctx.chan.uid
raise
# XXX CASE 2: this seems to be the source of the
# original issue which exhibited BEFORE we put
# a `Actor.cancel_soon()` inside
# `run_as_asyncio_guest()`..
else:
raise KeyboardInterrupt
pytest.fail('should have raised some kinda error?!?')
except (
KeyboardInterrupt,
ContextCancelled,
):
# XXX CASE 2: without the bug fixed, in the
# KBI-raised-in-parent case, the actor teardown should
# never get run (silently abaondoned by `asyncio`..) and
# thus the file should leak!
assert not tmp_file.exists()
assert ctx.maybe_error
trio.run(main)
# TODO: debug_mode tests once we get support for `asyncio`!
#
# -[ ] need tests to wrap both scripts:
# - [ ] infected_asyncio_echo_server.py
# - [ ] debugging/asyncio_bp.py
# -[ ] consider moving ^ (some of) these ^ to `test_debugger`?
#
# -[ ] missing impl outstanding includes:
# - [x] for sync pauses we need to ensure we open yet another
# `greenback` portal in the asyncio task
# => completed using `.bestow_portal(task)` inside
# `.to_asyncio._run_asyncio_task()` right?
# -[ ] translation func to get from `asyncio` task calling to
# `._debug.wait_for_parent_stdin_hijack()` which does root
# call to do TTY locking.
#
def test_sync_breakpoint():
'''
Verify we can do sync-func/code breakpointing using the
`breakpoint()` builtin inside infected mode actors.
'''
pytest.xfail('This support is not implemented yet!')
def test_debug_mode_crash_handling():
'''
Verify mult-actor crash handling works with a combo of infected-`asyncio`-mode
and normal `trio` actors despite nested process trees.
'''
pytest.xfail('This support is not implemented yet!')

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@ import trio
import tractor
import pytest
from tractor._testing import tractor_test
from conftest import tractor_test
def test_must_define_ctx():
@ -38,13 +38,10 @@ async def async_gen_stream(sequence):
assert cs.cancelled_caught
# TODO: deprecated either remove entirely
# or re-impl in terms of `MsgStream` one-sides
# wrapper, but at least remove `Portal.open_stream_from()`
@tractor.stream
async def context_stream(
ctx: tractor.Context,
sequence: list[int],
sequence
):
for i in sequence:
await ctx.send_yield(i)
@ -58,7 +55,7 @@ async def context_stream(
async def stream_from_single_subactor(
reg_addr,
arb_addr,
start_method,
stream_func,
):
@ -67,7 +64,7 @@ async def stream_from_single_subactor(
# only one per host address, spawns an actor if None
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
start_method=start_method,
) as nursery:
@ -118,13 +115,13 @@ async def stream_from_single_subactor(
@pytest.mark.parametrize(
'stream_func', [async_gen_stream, context_stream]
)
def test_stream_from_single_subactor(reg_addr, start_method, stream_func):
def test_stream_from_single_subactor(arb_addr, start_method, stream_func):
"""Verify streaming from a spawned async generator.
"""
trio.run(
partial(
stream_from_single_subactor,
reg_addr,
arb_addr,
start_method,
stream_func=stream_func,
),
@ -228,14 +225,14 @@ async def a_quadruple_example():
return result_stream
async def cancel_after(wait, reg_addr):
async with tractor.open_root_actor(registry_addrs=[reg_addr]):
async def cancel_after(wait, arb_addr):
async with tractor.open_root_actor(arbiter_addr=arb_addr):
with trio.move_on_after(wait):
return await a_quadruple_example()
@pytest.fixture(scope='module')
def time_quad_ex(reg_addr, ci_env, spawn_backend):
def time_quad_ex(arb_addr, ci_env, spawn_backend):
if spawn_backend == 'mp':
"""no idea but the mp *nix runs are flaking out here often...
"""
@ -243,7 +240,7 @@ def time_quad_ex(reg_addr, ci_env, spawn_backend):
timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4
start = time.time()
results = trio.run(cancel_after, timeout, reg_addr)
results = trio.run(cancel_after, timeout, arb_addr)
diff = time.time() - start
assert results
return results, diff
@ -263,14 +260,14 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
list(map(lambda i: i/10, range(3, 9)))
)
def test_not_fast_enough_quad(
reg_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
arb_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
):
"""Verify we can cancel midway through the quad example and all actors
cancel gracefully.
"""
results, diff = time_quad_ex
delay = max(diff - cancel_delay, 0)
results = trio.run(cancel_after, delay, reg_addr)
results = trio.run(cancel_after, delay, arb_addr)
system = platform.system()
if system in ('Windows', 'Darwin') and results is not None:
# In CI envoirments it seems later runs are quicker then the first
@ -283,7 +280,7 @@ def test_not_fast_enough_quad(
@tractor_test
async def test_respawn_consumer_task(
reg_addr,
arb_addr,
spawn_backend,
loglevel,
):

View File

@ -7,7 +7,7 @@ import pytest
import trio
import tractor
from tractor._testing import tractor_test
from conftest import tractor_test
@pytest.mark.trio
@ -24,7 +24,7 @@ async def test_no_runtime():
@tractor_test
async def test_self_is_registered(reg_addr):
async def test_self_is_registered(arb_addr):
"Verify waiting on the arbiter to register itself using the standard api."
actor = tractor.current_actor()
assert actor.is_arbiter
@ -34,20 +34,20 @@ async def test_self_is_registered(reg_addr):
@tractor_test
async def test_self_is_registered_localportal(reg_addr):
async def test_self_is_registered_localportal(arb_addr):
"Verify waiting on the arbiter to register itself using a local portal."
actor = tractor.current_actor()
assert actor.is_arbiter
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
assert isinstance(portal, tractor._portal.LocalPortal)
with trio.fail_after(0.2):
sockaddr = await portal.run_from_ns(
'self', 'wait_for_actor', name='root')
assert sockaddr[0] == reg_addr
assert sockaddr[0] == arb_addr
def test_local_actor_async_func(reg_addr):
def test_local_actor_async_func(arb_addr):
"""Verify a simple async function in-process.
"""
nums = []
@ -55,7 +55,7 @@ def test_local_actor_async_func(reg_addr):
async def print_loop():
async with tractor.open_root_actor(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
):
# arbiter is started in-proc if dne
assert tractor.current_actor().is_arbiter

View File

@ -7,10 +7,8 @@ import time
import pytest
import trio
import tractor
from tractor._testing import (
tractor_test,
)
from conftest import (
tractor_test,
sig_prog,
_INT_SIGNAL,
_INT_RETURN_CODE,
@ -30,9 +28,9 @@ def test_abort_on_sigint(daemon):
@tractor_test
async def test_cancel_remote_arbiter(daemon, reg_addr):
async def test_cancel_remote_arbiter(daemon, arb_addr):
assert not tractor.current_actor().is_arbiter
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
await portal.cancel_actor()
time.sleep(0.1)
@ -41,16 +39,16 @@ async def test_cancel_remote_arbiter(daemon, reg_addr):
# no arbiter socket should exist
with pytest.raises(OSError):
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
pass
def test_register_duplicate_name(daemon, reg_addr):
def test_register_duplicate_name(daemon, arb_addr):
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as n:
assert not tractor.current_actor().is_arbiter

View File

@ -1,364 +0,0 @@
'''
Audit sub-sys APIs from `.msg._ops`
mostly for ensuring correct `contextvars`
related settings around IPC contexts.
'''
from contextlib import (
asynccontextmanager as acm,
)
from msgspec import (
Struct,
)
import pytest
import trio
import tractor
from tractor import (
Context,
MsgTypeError,
current_ipc_ctx,
Portal,
)
from tractor.msg import (
_ops as msgops,
Return,
)
from tractor.msg import (
_codec,
)
from tractor.msg.types import (
log,
)
class PldMsg(
Struct,
# TODO: with multiple structs in-spec we need to tag them!
# -[ ] offer a built-in `PldMsg` type to inherit from which takes
# case of these details?
#
# https://jcristharif.com/msgspec/structs.html#tagged-unions
# tag=True,
# tag_field='msg_type',
):
field: str
maybe_msg_spec = PldMsg|None
@acm
async def maybe_expect_raises(
raises: BaseException|None = None,
ensure_in_message: list[str]|None = None,
post_mortem: bool = False,
timeout: int = 3,
) -> None:
'''
Async wrapper for ensuring errors propagate from the inner scope.
'''
if tractor._state.debug_mode():
timeout += 999
with trio.fail_after(timeout):
try:
yield
except BaseException as _inner_err:
inner_err = _inner_err
# wasn't-expected to error..
if raises is None:
raise
else:
assert type(inner_err) is raises
# maybe check for error txt content
if ensure_in_message:
part: str
err_repr: str = repr(inner_err)
for part in ensure_in_message:
for i, arg in enumerate(inner_err.args):
if part in err_repr:
break
# if part never matches an arg, then we're
# missing a match.
else:
raise ValueError(
'Failed to find error message content?\n\n'
f'expected: {ensure_in_message!r}\n'
f'part: {part!r}\n\n'
f'{inner_err.args}'
)
if post_mortem:
await tractor.post_mortem()
else:
if raises:
raise RuntimeError(
f'Expected a {raises.__name__!r} to be raised?'
)
@tractor.context(
pld_spec=maybe_msg_spec,
)
async def child(
ctx: Context,
started_value: int|PldMsg|None,
return_value: str|None,
validate_pld_spec: bool,
raise_on_started_mte: bool = True,
) -> None:
'''
Call ``Context.started()`` more then once (an error).
'''
expect_started_mte: bool = started_value == 10
# sanaity check that child RPC context is the current one
curr_ctx: Context = current_ipc_ctx()
assert ctx is curr_ctx
rx: msgops.PldRx = ctx._pld_rx
curr_pldec: _codec.MsgDec = rx.pld_dec
ctx_meta: dict = getattr(
child,
'_tractor_context_meta',
None,
)
if ctx_meta:
assert (
ctx_meta['pld_spec']
is curr_pldec.spec
is curr_pldec.pld_spec
)
# 2 cases: hdndle send-side and recv-only validation
# - when `raise_on_started_mte == True`, send validate
# - else, parent-recv-side only validation
mte: MsgTypeError|None = None
try:
await ctx.started(
value=started_value,
validate_pld_spec=validate_pld_spec,
)
except MsgTypeError as _mte:
mte = _mte
log.exception('started()` raised an MTE!\n')
if not expect_started_mte:
raise RuntimeError(
'Child-ctx-task SHOULD NOT HAVE raised an MTE for\n\n'
f'{started_value!r}\n'
)
boxed_div: str = '------ - ------'
assert boxed_div not in mte._message
assert boxed_div not in mte.tb_str
assert boxed_div not in repr(mte)
assert boxed_div not in str(mte)
mte_repr: str = repr(mte)
for line in mte.message.splitlines():
assert line in mte_repr
# since this is a *local error* there should be no
# boxed traceback content!
assert not mte.tb_str
# propagate to parent?
if raise_on_started_mte:
raise
# no-send-side-error fallthrough
if (
validate_pld_spec
and
expect_started_mte
):
raise RuntimeError(
'Child-ctx-task SHOULD HAVE raised an MTE for\n\n'
f'{started_value!r}\n'
)
assert (
not expect_started_mte
or
not validate_pld_spec
)
# if wait_for_parent_to_cancel:
# ...
#
# ^-TODO-^ logic for diff validation policies on each side:
#
# -[ ] ensure that if we don't validate on the send
# side, that we are eventually error-cancelled by our
# parent due to the bad `Started` payload!
# -[ ] the boxed error should be srced from the parent's
# runtime NOT ours!
# -[ ] we should still error on bad `return_value`s
# despite the parent not yet error-cancelling us?
# |_ how do we want the parent side to look in that
# case?
# -[ ] maybe the equiv of "during handling of the
# above error another occurred" for the case where
# the parent sends a MTE to this child and while
# waiting for the child to terminate it gets back
# the MTE for this case?
#
# XXX should always fail on recv side since we can't
# really do much else beside terminate and relay the
# msg-type-error from this RPC task ;)
return return_value
@pytest.mark.parametrize(
'return_value',
[
'yo',
None,
],
ids=[
'return[invalid-"yo"]',
'return[valid-None]',
],
)
@pytest.mark.parametrize(
'started_value',
[
10,
PldMsg(field='yo'),
],
ids=[
'Started[invalid-10]',
'Started[valid-PldMsg]',
],
)
@pytest.mark.parametrize(
'pld_check_started_value',
[
True,
False,
],
ids=[
'check-started-pld',
'no-started-pld-validate',
],
)
def test_basic_payload_spec(
debug_mode: bool,
loglevel: str,
return_value: str|None,
started_value: int|PldMsg,
pld_check_started_value: bool,
):
'''
Validate the most basic `PldRx` msg-type-spec semantics around
a IPC `Context` endpoint start, started-sync, and final return
value depending on set payload types and the currently applied
pld-spec.
'''
invalid_return: bool = return_value == 'yo'
invalid_started: bool = started_value == 10
async def main():
async with tractor.open_nursery(
debug_mode=debug_mode,
loglevel=loglevel,
) as an:
p: Portal = await an.start_actor(
'child',
enable_modules=[__name__],
)
# since not opened yet.
assert current_ipc_ctx() is None
if invalid_started:
msg_type_str: str = 'Started'
bad_value: int = 10
elif invalid_return:
msg_type_str: str = 'Return'
bad_value: str = 'yo'
else:
# XXX but should never be used below then..
msg_type_str: str = ''
bad_value: str = ''
maybe_mte: MsgTypeError|None = None
should_raise: Exception|None = (
MsgTypeError if (
invalid_return
or
invalid_started
) else None
)
async with (
maybe_expect_raises(
raises=should_raise,
ensure_in_message=[
f"invalid `{msg_type_str}` msg payload",
f'{bad_value}',
f'has type {type(bad_value)!r}',
'not match type-spec',
f'`{msg_type_str}.pld: PldMsg|NoneType`',
],
# only for debug
# post_mortem=True,
),
p.open_context(
child,
return_value=return_value,
started_value=started_value,
validate_pld_spec=pld_check_started_value,
) as (ctx, first),
):
# now opened with 'child' sub
assert current_ipc_ctx() is ctx
assert type(first) is PldMsg
assert first.field == 'yo'
try:
res: None|PldMsg = await ctx.result(hide_tb=False)
assert res is None
except MsgTypeError as mte:
maybe_mte = mte
if not invalid_return:
raise
# expected this invalid `Return.pld` so audit
# the error state + meta-data
assert mte.expected_msg_type is Return
assert mte.cid == ctx.cid
mte_repr: str = repr(mte)
for line in mte.message.splitlines():
assert line in mte_repr
assert mte.tb_str
# await tractor.pause(shield=True)
# verify expected remote mte deats
assert ctx._local_error is None
assert (
mte is
ctx._remote_error is
ctx.maybe_error is
ctx.outcome
)
if should_raise is None:
assert maybe_mte is None
await p.cancel_actor()
trio.run(main)

View File

@ -5,7 +5,8 @@ import pytest
import trio
import tractor
from tractor.experimental import msgpub
from tractor._testing import tractor_test
from conftest import tractor_test
def test_type_checks():
@ -159,7 +160,7 @@ async def test_required_args(callwith_expecterror):
)
def test_multi_actor_subs_arbiter_pub(
loglevel,
reg_addr,
arb_addr,
pub_actor,
):
"""Try out the neato @pub decorator system.
@ -169,7 +170,7 @@ def test_multi_actor_subs_arbiter_pub(
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
enable_modules=[__name__],
) as n:
@ -254,12 +255,12 @@ def test_multi_actor_subs_arbiter_pub(
def test_single_subactor_pub_multitask_subs(
loglevel,
reg_addr,
arb_addr,
):
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
enable_modules=[__name__],
) as n:

View File

@ -34,6 +34,7 @@ def test_resource_only_entered_once(key_on):
global _resource
_resource = 0
kwargs = {}
key = None
if key_on == 'key_value':
key = 'some_common_key'
@ -138,7 +139,7 @@ def test_open_local_sub_to_stream():
N local tasks using ``trionics.maybe_open_context():``.
'''
timeout: float = 3.6 if platform.system() != "Windows" else 10
timeout = 3 if platform.system() != "Windows" else 10
async def main():

View File

@ -1,8 +1,6 @@
'''
RPC (or maybe better labelled as "RTS: remote task scheduling"?)
related API and error checks.
'''
"""
RPC related
"""
import itertools
import pytest
@ -15,19 +13,9 @@ async def sleep_back_actor(
func_name,
func_defined,
exposed_mods,
*,
reg_addr: tuple,
):
if actor_name:
async with tractor.find_actor(
actor_name,
# NOTE: must be set manually since
# the subactor doesn't have the reg_addr
# fixture code run in it!
# TODO: maybe we should just set this once in the
# _state mod and derive to all children?
registry_addrs=[reg_addr],
) as portal:
async with tractor.find_actor(actor_name) as portal:
try:
await portal.run(__name__, func_name)
except tractor.RemoteActorError as err:
@ -36,7 +24,7 @@ async def sleep_back_actor(
if not exposed_mods:
expect = tractor.ModuleNotExposed
assert err.boxed_type is expect
assert err.type is expect
raise
else:
await trio.sleep(float('inf'))
@ -54,25 +42,14 @@ async def short_sleep():
(['tmp_mod'], 'import doggy', ModuleNotFoundError),
(['tmp_mod'], '4doggy', SyntaxError),
],
ids=[
'no_mods',
'this_mod',
'this_mod_bad_func',
'fail_to_import',
'fail_on_syntax',
],
ids=['no_mods', 'this_mod', 'this_mod_bad_func', 'fail_to_import',
'fail_on_syntax'],
)
def test_rpc_errors(
reg_addr,
to_call,
testdir,
):
'''
Test errors when making various RPC requests to an actor
def test_rpc_errors(arb_addr, to_call, testdir):
"""Test errors when making various RPC requests to an actor
that either doesn't have the requested module exposed or doesn't define
the named function.
'''
"""
exposed_mods, funcname, inside_err = to_call
subactor_exposed_mods = []
func_defined = globals().get(funcname, False)
@ -100,13 +77,8 @@ def test_rpc_errors(
# spawn a subactor which calls us back
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
enable_modules=exposed_mods.copy(),
# NOTE: will halt test in REPL if uncommented, so only
# do that if actually debugging subactor but keep it
# disabled for the test.
# debug_mode=True,
) as n:
actor = tractor.current_actor()
@ -123,7 +95,6 @@ def test_rpc_errors(
exposed_mods=exposed_mods,
func_defined=True if func_defined else False,
enable_modules=subactor_exposed_mods,
reg_addr=reg_addr,
)
def run():
@ -134,20 +105,18 @@ def test_rpc_errors(
run()
else:
# underlying errors aren't propagated upwards (yet)
with pytest.raises(
expected_exception=(remote_err, ExceptionGroup),
) as err:
with pytest.raises(remote_err) as err:
run()
# get raw instance from pytest wrapper
value = err.value
# might get multiple `trio.Cancelled`s as well inside an inception
if isinstance(value, ExceptionGroup):
if isinstance(value, trio.MultiError):
value = next(itertools.dropwhile(
lambda exc: not isinstance(exc, tractor.RemoteActorError),
value.exceptions
))
if getattr(value, 'type', None):
assert value.boxed_type is inside_err
assert value.type is inside_err

View File

@ -8,7 +8,7 @@ import pytest
import trio
import tractor
from tractor._testing import tractor_test
from conftest import tractor_test
_file_path: str = ''
@ -64,8 +64,7 @@ async def test_lifetime_stack_wipes_tmpfile(
except (
tractor.RemoteActorError,
# tractor.BaseExceptionGroup,
BaseExceptionGroup,
tractor.BaseExceptionGroup,
):
pass

View File

@ -1,167 +0,0 @@
"""
Shared mem primitives and APIs.
"""
import uuid
# import numpy
import pytest
import trio
import tractor
from tractor._shm import (
open_shm_list,
attach_shm_list,
)
@tractor.context
async def child_attach_shml_alot(
ctx: tractor.Context,
shm_key: str,
) -> None:
await ctx.started(shm_key)
# now try to attach a boatload of times in a loop..
for _ in range(1000):
shml = attach_shm_list(
key=shm_key,
readonly=False,
)
assert shml.shm.name == shm_key
await trio.sleep(0.001)
def test_child_attaches_alot():
async def main():
async with tractor.open_nursery() as an:
# allocate writeable list in parent
key = f'shml_{uuid.uuid4()}'
shml = open_shm_list(
key=key,
)
portal = await an.start_actor(
'shm_attacher',
enable_modules=[__name__],
)
async with (
portal.open_context(
child_attach_shml_alot,
shm_key=shml.key,
) as (ctx, start_val),
):
assert start_val == key
await ctx.result()
await portal.cancel_actor()
trio.run(main)
@tractor.context
async def child_read_shm_list(
ctx: tractor.Context,
shm_key: str,
use_str: bool,
frame_size: int,
) -> None:
# attach in child
shml = attach_shm_list(
key=shm_key,
# dtype=str if use_str else float,
)
await ctx.started(shml.key)
async with ctx.open_stream() as stream:
async for i in stream:
print(f'(child): reading shm list index: {i}')
if use_str:
expect = str(float(i))
else:
expect = float(i)
if frame_size == 1:
val = shml[i]
assert expect == val
print(f'(child): reading value: {val}')
else:
frame = shml[i - frame_size:i]
print(f'(child): reading frame: {frame}')
@pytest.mark.parametrize(
'use_str',
[False, True],
ids=lambda i: f'use_str_values={i}',
)
@pytest.mark.parametrize(
'frame_size',
[1, 2**6, 2**10],
ids=lambda i: f'frame_size={i}',
)
def test_parent_writer_child_reader(
use_str: bool,
frame_size: int,
):
async def main():
async with tractor.open_nursery(
# debug_mode=True,
) as an:
portal = await an.start_actor(
'shm_reader',
enable_modules=[__name__],
debug_mode=True,
)
# allocate writeable list in parent
key = 'shm_list'
seq_size = int(2 * 2 ** 10)
shml = open_shm_list(
key=key,
size=seq_size,
dtype=str if use_str else float,
readonly=False,
)
async with (
portal.open_context(
child_read_shm_list,
shm_key=key,
use_str=use_str,
frame_size=frame_size,
) as (ctx, sent),
ctx.open_stream() as stream,
):
assert sent == key
for i in range(seq_size):
val = float(i)
if use_str:
val = str(val)
# print(f'(parent): writing {val}')
shml[i] = val
# only on frame fills do we
# signal to the child that a frame's
# worth is ready.
if (i % frame_size) == 0:
print(f'(parent): signalling frame full on {val}')
await stream.send(i)
else:
print(f'(parent): signalling final frame on {val}')
await stream.send(i)
await portal.cancel_actor()
trio.run(main)

View File

@ -2,15 +2,13 @@
Spawning basics
"""
from typing import (
Any,
)
from typing import Optional
import pytest
import trio
import tractor
from tractor._testing import tractor_test
from conftest import tractor_test
data_to_pass_down = {'doggy': 10, 'kitty': 4}
@ -18,21 +16,24 @@ data_to_pass_down = {'doggy': 10, 'kitty': 4}
async def spawn(
is_arbiter: bool,
data: dict,
reg_addr: tuple[str, int],
arb_addr: tuple[str, int],
):
namespaces = [__name__]
await trio.sleep(0.1)
async with tractor.open_root_actor(
arbiter_addr=reg_addr,
arbiter_addr=arb_addr,
):
actor = tractor.current_actor()
assert actor.is_arbiter == is_arbiter
data = data_to_pass_down
if actor.is_arbiter:
async with tractor.open_nursery() as nursery:
async with tractor.open_nursery(
) as nursery:
# forks here
portal = await nursery.run_in_actor(
@ -40,7 +41,7 @@ async def spawn(
is_arbiter=False,
name='sub-actor',
data=data,
reg_addr=reg_addr,
arb_addr=arb_addr,
enable_modules=namespaces,
)
@ -54,14 +55,12 @@ async def spawn(
return 10
def test_local_arbiter_subactor_global_state(
reg_addr,
):
def test_local_arbiter_subactor_global_state(arb_addr):
result = trio.run(
spawn,
True,
data_to_pass_down,
reg_addr,
arb_addr,
)
assert result == 10
@ -95,9 +94,7 @@ async def test_movie_theatre_convo(start_method):
await portal.cancel_actor()
async def cellar_door(
return_value: str|None,
):
async def cellar_door(return_value: Optional[str]):
return return_value
@ -107,18 +104,16 @@ async def cellar_door(
)
@tractor_test
async def test_most_beautiful_word(
start_method: str,
return_value: Any,
debug_mode: bool,
start_method,
return_value
):
'''
The main ``tractor`` routine.
'''
with trio.fail_after(1):
async with tractor.open_nursery(
debug_mode=debug_mode,
) as n:
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
cellar_door,
return_value=return_value,
@ -145,7 +140,7 @@ async def check_loglevel(level):
def test_loglevel_propagated_to_subactor(
start_method,
capfd,
reg_addr,
arb_addr,
):
if start_method == 'mp_forkserver':
pytest.skip(
@ -157,7 +152,7 @@ def test_loglevel_propagated_to_subactor(
async with tractor.open_nursery(
name='arbiter',
start_method=start_method,
arbiter_addr=reg_addr,
arbiter_addr=arb_addr,
) as tn:
await tn.run_in_actor(

View File

@ -66,13 +66,13 @@ async def ensure_sequence(
async def open_sequence_streamer(
sequence: list[int],
reg_addr: tuple[str, int],
arb_addr: tuple[str, int],
start_method: str,
) -> tractor.MsgStream:
async with tractor.open_nursery(
arbiter_addr=reg_addr,
arbiter_addr=arb_addr,
start_method=start_method,
) as tn:
@ -86,14 +86,14 @@ async def open_sequence_streamer(
) as (ctx, first):
assert first is None
async with ctx.open_stream(allow_overruns=True) as stream:
async with ctx.open_stream(backpressure=True) as stream:
yield stream
await portal.cancel_actor()
def test_stream_fan_out_to_local_subscriptions(
reg_addr,
arb_addr,
start_method,
):
@ -103,7 +103,7 @@ def test_stream_fan_out_to_local_subscriptions(
async with open_sequence_streamer(
sequence,
reg_addr,
arb_addr,
start_method,
) as stream:
@ -138,7 +138,7 @@ def test_stream_fan_out_to_local_subscriptions(
]
)
def test_consumer_and_parent_maybe_lag(
reg_addr,
arb_addr,
start_method,
task_delays,
):
@ -150,7 +150,7 @@ def test_consumer_and_parent_maybe_lag(
async with open_sequence_streamer(
sequence,
reg_addr,
arb_addr,
start_method,
) as stream:
@ -211,7 +211,7 @@ def test_consumer_and_parent_maybe_lag(
def test_faster_task_to_recv_is_cancelled_by_slower(
reg_addr,
arb_addr,
start_method,
):
'''
@ -225,7 +225,7 @@ def test_faster_task_to_recv_is_cancelled_by_slower(
async with open_sequence_streamer(
sequence,
reg_addr,
arb_addr,
start_method,
) as stream:
@ -302,7 +302,7 @@ def test_subscribe_errors_after_close():
def test_ensure_slow_consumers_lag_out(
reg_addr,
arb_addr,
start_method,
):
'''This is a pure local task test; no tractor
@ -413,8 +413,8 @@ def test_ensure_slow_consumers_lag_out(
seq = brx._state.subs[brx.key]
assert seq == len(brx._state.queue) - 1
# all no_overruns entries in the underlying
# channel should have been copied into the bcaster
# all backpressured entries in the underlying
# channel should have been copied into the caster
# queue trailing-window
async for i in rx:
print(f'bped: {i}')

View File

@ -5,7 +5,7 @@ want to see changed.
'''
import pytest
import trio
from trio import TaskStatus
from trio_typing import TaskStatus
@pytest.mark.parametrize(

View File

@ -15,53 +15,72 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
tractor: structured concurrent ``trio``-"actors".
tractor: structured concurrent "actors".
"""
from ._clustering import (
open_actor_cluster as open_actor_cluster,
)
from ._context import (
Context as Context, # the type
context as context, # a func-decorator
)
from exceptiongroup import BaseExceptionGroup
from ._clustering import open_actor_cluster
from ._ipc import Channel
from ._streaming import (
MsgStream as MsgStream,
stream as stream,
Context,
MsgStream,
stream,
context,
)
from ._discovery import (
get_registry as get_registry,
find_actor as find_actor,
wait_for_actor as wait_for_actor,
query_actor as query_actor,
)
from ._supervise import (
open_nursery as open_nursery,
ActorNursery as ActorNursery,
get_arbiter,
find_actor,
wait_for_actor,
query_actor,
)
from ._supervise import open_nursery
from ._state import (
current_actor as current_actor,
is_root_process as is_root_process,
current_ipc_ctx as current_ipc_ctx,
current_actor,
is_root_process,
)
from ._exceptions import (
ContextCancelled as ContextCancelled,
ModuleNotExposed as ModuleNotExposed,
MsgTypeError as MsgTypeError,
RemoteActorError as RemoteActorError,
TransportClosed as TransportClosed,
RemoteActorError,
ModuleNotExposed,
ContextCancelled,
)
from .devx import (
breakpoint as breakpoint,
pause as pause,
pause_from_sync as pause_from_sync,
post_mortem as post_mortem,
from ._debug import (
breakpoint,
post_mortem,
)
from . import msg as msg
from . import msg
from ._root import (
run_daemon as run_daemon,
open_root_actor as open_root_actor,
run_daemon,
open_root_actor,
)
from ._ipc import Channel as Channel
from ._portal import Portal as Portal
from ._runtime import Actor as Actor
from ._portal import Portal
from ._runtime import Actor
__all__ = [
'Actor',
'Channel',
'Context',
'ContextCancelled',
'ModuleNotExposed',
'MsgStream',
'BaseExceptionGroup',
'Portal',
'RemoteActorError',
'breakpoint',
'context',
'current_actor',
'find_actor',
'get_arbiter',
'is_root_process',
'msg',
'open_actor_cluster',
'open_nursery',
'open_root_actor',
'post_mortem',
'query_actor',
'run_daemon',
'stream',
'to_asyncio',
'wait_for_actor',
]

View File

@ -18,6 +18,8 @@
This is the "bootloader" for actors started using the native trio backend.
"""
import sys
import trio
import argparse
from ast import literal_eval
@ -35,8 +37,9 @@ def parse_ipaddr(arg):
return (str(host), int(port))
from ._entry import _trio_main
if __name__ == "__main__":
__tracebackhide__: bool = True
parser = argparse.ArgumentParser()
parser.add_argument("--uid", type=parse_uid)

File diff suppressed because it is too large Load Diff

922
tractor/_debug.py 100644
View File

@ -0,0 +1,922 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Multi-core debugging for da peeps!
"""
from __future__ import annotations
import bdb
import os
import sys
import signal
from functools import (
partial,
cached_property,
)
from contextlib import asynccontextmanager as acm
from typing import (
Any,
Optional,
Callable,
AsyncIterator,
AsyncGenerator,
)
from types import FrameType
import pdbp
import tractor
import trio
from trio_typing import TaskStatus
from .log import get_logger
from ._discovery import get_root
from ._state import (
is_root_process,
debug_mode,
)
from ._exceptions import (
is_multi_cancelled,
ContextCancelled,
)
from ._ipc import Channel
log = get_logger(__name__)
__all__ = ['breakpoint', 'post_mortem']
class Lock:
'''
Actor global debug lock state.
Mostly to avoid a lot of ``global`` declarations for now XD.
'''
repl: MultiActorPdb | None = None
# placeholder for function to set a ``trio.Event`` on debugger exit
# pdb_release_hook: Optional[Callable] = None
_trio_handler: Callable[
[int, Optional[FrameType]], Any
] | int | None = None
# actor-wide variable pointing to current task name using debugger
local_task_in_debug: str | None = None
# NOTE: set by the current task waiting on the root tty lock from
# the CALLER side of the `lock_tty_for_child()` context entry-call
# and must be cancelled if this actor is cancelled via IPC
# request-message otherwise deadlocks with the parent actor may
# ensure
_debugger_request_cs: Optional[trio.CancelScope] = None
# NOTE: set only in the root actor for the **local** root spawned task
# which has acquired the lock (i.e. this is on the callee side of
# the `lock_tty_for_child()` context entry).
_root_local_task_cs_in_debug: Optional[trio.CancelScope] = None
# actor tree-wide actor uid that supposedly has the tty lock
global_actor_in_debug: Optional[tuple[str, str]] = None
local_pdb_complete: Optional[trio.Event] = None
no_remote_has_tty: Optional[trio.Event] = None
# lock in root actor preventing multi-access to local tty
_debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock()
_orig_sigint_handler: Optional[Callable] = None
_blocked: set[tuple[str, str]] = set()
@classmethod
def shield_sigint(cls):
cls._orig_sigint_handler = signal.signal(
signal.SIGINT,
shield_sigint_handler,
)
@classmethod
def unshield_sigint(cls):
# always restore ``trio``'s sigint handler. see notes below in
# the pdb factory about the nightmare that is that code swapping
# out the handler when the repl activates...
signal.signal(signal.SIGINT, cls._trio_handler)
cls._orig_sigint_handler = None
@classmethod
def release(cls):
try:
cls._debug_lock.release()
except RuntimeError:
# uhhh makes no sense but been seeing the non-owner
# release error even though this is definitely the task
# that locked?
owner = cls._debug_lock.statistics().owner
if owner:
raise
# actor-local state, irrelevant for non-root.
cls.global_actor_in_debug = None
cls.local_task_in_debug = None
try:
# sometimes the ``trio`` might already be terminated in
# which case this call will raise.
if cls.local_pdb_complete is not None:
cls.local_pdb_complete.set()
finally:
# restore original sigint handler
cls.unshield_sigint()
cls.repl = None
class TractorConfig(pdbp.DefaultConfig):
'''
Custom ``pdbp`` goodness :surfer:
'''
use_pygments: bool = True
sticky_by_default: bool = False
enable_hidden_frames: bool = False
# much thanks @mdmintz for the hot tip!
# fixes line spacing issue when resizing terminal B)
truncate_long_lines: bool = False
class MultiActorPdb(pdbp.Pdb):
'''
Add teardown hooks to the regular ``pdbp.Pdb``.
'''
# override the pdbp config with our coolio one
DefaultConfig = TractorConfig
# def preloop(self):
# print('IN PRELOOP')
# super().preloop()
# TODO: figure out how to disallow recursive .set_trace() entry
# since that'll cause deadlock for us.
def set_continue(self):
try:
super().set_continue()
finally:
Lock.release()
def set_quit(self):
try:
super().set_quit()
finally:
Lock.release()
# XXX NOTE: we only override this because apparently the stdlib pdb
# bois likes to touch the SIGINT handler as much as i like to touch
# my d$%&.
def _cmdloop(self):
self.cmdloop()
@cached_property
def shname(self) -> str | None:
'''
Attempt to return the login shell name with a special check for
the infamous `xonsh` since it seems to have some issues much
different from std shells when it comes to flushing the prompt?
'''
# SUPER HACKY and only really works if `xonsh` is not used
# before spawning further sub-shells..
shpath = os.getenv('SHELL', None)
if shpath:
if (
os.getenv('XONSH_LOGIN', default=False)
or 'xonsh' in shpath
):
return 'xonsh'
return os.path.basename(shpath)
return None
@acm
async def _acquire_debug_lock_from_root_task(
uid: tuple[str, str]
) -> AsyncIterator[trio.StrictFIFOLock]:
'''
Acquire a root-actor local FIFO lock which tracks mutex access of
the process tree's global debugger breakpoint.
This lock avoids tty clobbering (by preventing multiple processes
reading from stdstreams) and ensures multi-actor, sequential access
to the ``pdb`` repl.
'''
task_name = trio.lowlevel.current_task().name
log.runtime(
f"Attempting to acquire TTY lock, remote task: {task_name}:{uid}"
)
we_acquired = False
try:
log.runtime(
f"entering lock checkpoint, remote task: {task_name}:{uid}"
)
we_acquired = True
# NOTE: if the surrounding cancel scope from the
# `lock_tty_for_child()` caller is cancelled, this line should
# unblock and NOT leave us in some kind of
# a "child-locked-TTY-but-child-is-uncontactable-over-IPC"
# condition.
await Lock._debug_lock.acquire()
if Lock.no_remote_has_tty is None:
# mark the tty lock as being in use so that the runtime
# can try to avoid clobbering any connection from a child
# that's currently relying on it.
Lock.no_remote_has_tty = trio.Event()
Lock.global_actor_in_debug = uid
log.runtime(f"TTY lock acquired, remote task: {task_name}:{uid}")
# NOTE: critical section: this yield is unshielded!
# IF we received a cancel during the shielded lock entry of some
# next-in-queue requesting task, then the resumption here will
# result in that ``trio.Cancelled`` being raised to our caller
# (likely from ``lock_tty_for_child()`` below)! In
# this case the ``finally:`` below should trigger and the
# surrounding caller side context should cancel normally
# relaying back to the caller.
yield Lock._debug_lock
finally:
if (
we_acquired
and Lock._debug_lock.locked()
):
Lock._debug_lock.release()
# IFF there are no more requesting tasks queued up fire, the
# "tty-unlocked" event thereby alerting any monitors of the lock that
# we are now back in the "tty unlocked" state. This is basically
# and edge triggered signal around an empty queue of sub-actor
# tasks that may have tried to acquire the lock.
stats = Lock._debug_lock.statistics()
if (
not stats.owner
):
log.runtime(f"No more tasks waiting on tty lock! says {uid}")
if Lock.no_remote_has_tty is not None:
Lock.no_remote_has_tty.set()
Lock.no_remote_has_tty = None
Lock.global_actor_in_debug = None
log.runtime(
f"TTY lock released, remote task: {task_name}:{uid}"
)
@tractor.context
async def lock_tty_for_child(
ctx: tractor.Context,
subactor_uid: tuple[str, str]
) -> str:
'''
Lock the TTY in the root process of an actor tree in a new
inter-actor-context-task such that the ``pdbp`` debugger console
can be mutex-allocated to the calling sub-actor for REPL control
without interference by other processes / threads.
NOTE: this task must be invoked in the root process of the actor
tree. It is meant to be invoked as an rpc-task and should be
highly reliable at releasing the mutex complete!
'''
task_name = trio.lowlevel.current_task().name
if tuple(subactor_uid) in Lock._blocked:
log.warning(
f'Actor {subactor_uid} is blocked from acquiring debug lock\n'
f"remote task: {task_name}:{subactor_uid}"
)
ctx._enter_debugger_on_cancel = False
await ctx.cancel(f'Debug lock blocked for {subactor_uid}')
return 'pdb_lock_blocked'
# TODO: when we get to true remote debugging
# this will deliver stdin data?
log.debug(
"Attempting to acquire TTY lock\n"
f"remote task: {task_name}:{subactor_uid}"
)
log.debug(f"Actor {subactor_uid} is WAITING on stdin hijack lock")
Lock.shield_sigint()
try:
with (
trio.CancelScope(shield=True) as debug_lock_cs,
):
Lock._root_local_task_cs_in_debug = debug_lock_cs
async with _acquire_debug_lock_from_root_task(subactor_uid):
# indicate to child that we've locked stdio
await ctx.started('Locked')
log.debug(
f"Actor {subactor_uid} acquired stdin hijack lock"
)
# wait for unlock pdb by child
async with ctx.open_stream() as stream:
assert await stream.receive() == 'pdb_unlock'
return "pdb_unlock_complete"
finally:
Lock._root_local_task_cs_in_debug = None
Lock.unshield_sigint()
async def wait_for_parent_stdin_hijack(
actor_uid: tuple[str, str],
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED
):
'''
Connect to the root actor via a ``Context`` and invoke a task which
locks a root-local TTY lock: ``lock_tty_for_child()``; this func
should be called in a new task from a child actor **and never the
root*.
This function is used by any sub-actor to acquire mutex access to
the ``pdb`` REPL and thus the root's TTY for interactive debugging
(see below inside ``_breakpoint()``). It can be used to ensure that
an intermediate nursery-owning actor does not clobber its children
if they are in debug (see below inside
``maybe_wait_for_debugger()``).
'''
with trio.CancelScope(shield=True) as cs:
Lock._debugger_request_cs = cs
try:
async with get_root() as portal:
# this syncs to child's ``Context.started()`` call.
async with portal.open_context(
tractor._debug.lock_tty_for_child,
subactor_uid=actor_uid,
) as (ctx, val):
log.debug('locked context')
assert val == 'Locked'
async with ctx.open_stream() as stream:
# unblock local caller
try:
assert Lock.local_pdb_complete
task_status.started(cs)
await Lock.local_pdb_complete.wait()
finally:
# TODO: shielding currently can cause hangs...
# with trio.CancelScope(shield=True):
await stream.send('pdb_unlock')
# sync with callee termination
assert await ctx.result() == "pdb_unlock_complete"
log.debug('exitting child side locking task context')
except ContextCancelled:
log.warning('Root actor cancelled debug lock')
raise
finally:
Lock.local_task_in_debug = None
log.debug('Exiting debugger from child')
def mk_mpdb() -> tuple[MultiActorPdb, Callable]:
pdb = MultiActorPdb()
# signal.signal = pdbp.hideframe(signal.signal)
Lock.shield_sigint()
# XXX: These are the important flags mentioned in
# https://github.com/python-trio/trio/issues/1155
# which resolve the traceback spews to console.
pdb.allow_kbdint = True
pdb.nosigint = True
return pdb, Lock.unshield_sigint
async def _breakpoint(
debug_func,
# TODO:
# shield: bool = False
) -> None:
'''
Breakpoint entry for engaging debugger instance sync-interaction,
from async code, executing in actor runtime (task).
'''
__tracebackhide__ = True
actor = tractor.current_actor()
pdb, undo_sigint = mk_mpdb()
task_name = trio.lowlevel.current_task().name
# TODO: is it possible to debug a trio.Cancelled except block?
# right now it seems like we can kinda do with by shielding
# around ``tractor.breakpoint()`` but not if we move the shielded
# scope here???
# with trio.CancelScope(shield=shield):
# await trio.lowlevel.checkpoint()
if (
not Lock.local_pdb_complete
or Lock.local_pdb_complete.is_set()
):
Lock.local_pdb_complete = trio.Event()
# TODO: need a more robust check for the "root" actor
if (
not is_root_process()
and actor._parent_chan # a connected child
):
if Lock.local_task_in_debug:
# Recurrence entry case: this task already has the lock and
# is likely recurrently entering a breakpoint
if Lock.local_task_in_debug == task_name:
# noop on recurrent entry case but we want to trigger
# a checkpoint to allow other actors error-propagate and
# potetially avoid infinite re-entries in some subactor.
await trio.lowlevel.checkpoint()
return
# if **this** actor is already in debug mode block here
# waiting for the control to be released - this allows
# support for recursive entries to `tractor.breakpoint()`
log.warning(f"{actor.uid} already has a debug lock, waiting...")
await Lock.local_pdb_complete.wait()
await trio.sleep(0.1)
# mark local actor as "in debug mode" to avoid recurrent
# entries/requests to the root process
Lock.local_task_in_debug = task_name
# this **must** be awaited by the caller and is done using the
# root nursery so that the debugger can continue to run without
# being restricted by the scope of a new task nursery.
# TODO: if we want to debug a trio.Cancelled triggered exception
# we have to figure out how to avoid having the service nursery
# cancel on this task start? I *think* this works below:
# ```python
# actor._service_n.cancel_scope.shield = shield
# ```
# but not entirely sure if that's a sane way to implement it?
try:
with trio.CancelScope(shield=True):
await actor._service_n.start(
wait_for_parent_stdin_hijack,
actor.uid,
)
Lock.repl = pdb
except RuntimeError:
Lock.release()
if actor._cancel_called:
# service nursery won't be usable and we
# don't want to lock up the root either way since
# we're in (the midst of) cancellation.
return
raise
elif is_root_process():
# we also wait in the root-parent for any child that
# may have the tty locked prior
# TODO: wait, what about multiple root tasks acquiring it though?
if Lock.global_actor_in_debug == actor.uid:
# re-entrant root process already has it: noop.
return
# XXX: since we need to enter pdb synchronously below,
# we have to release the lock manually from pdb completion
# callbacks. Can't think of a nicer way then this atm.
if Lock._debug_lock.locked():
log.warning(
'Root actor attempting to shield-acquire active tty lock'
f' owned by {Lock.global_actor_in_debug}')
# must shield here to avoid hitting a ``Cancelled`` and
# a child getting stuck bc we clobbered the tty
with trio.CancelScope(shield=True):
await Lock._debug_lock.acquire()
else:
# may be cancelled
await Lock._debug_lock.acquire()
Lock.global_actor_in_debug = actor.uid
Lock.local_task_in_debug = task_name
Lock.repl = pdb
try:
# block here one (at the appropriate frame *up*) where
# ``breakpoint()`` was awaited and begin handling stdio.
log.debug("Entering the synchronous world of pdb")
debug_func(actor, pdb)
except bdb.BdbQuit:
Lock.release()
raise
# XXX: apparently we can't do this without showing this frame
# in the backtrace on first entry to the REPL? Seems like an odd
# behaviour that should have been fixed by now. This is also why
# we scrapped all the @cm approaches that were tried previously.
# finally:
# __tracebackhide__ = True
# # frame = sys._getframe()
# # last_f = frame.f_back
# # last_f.f_globals['__tracebackhide__'] = True
# # signal.signal = pdbp.hideframe(signal.signal)
def shield_sigint_handler(
signum: int,
frame: 'frame', # type: ignore # noqa
# pdb_obj: Optional[MultiActorPdb] = None,
*args,
) -> None:
'''
Specialized, debugger-aware SIGINT handler.
In childred we always ignore to avoid deadlocks since cancellation
should always be managed by the parent supervising actor. The root
is always cancelled on ctrl-c.
'''
__tracebackhide__ = True
uid_in_debug = Lock.global_actor_in_debug
actor = tractor.current_actor()
# print(f'{actor.uid} in HANDLER with ')
def do_cancel():
# If we haven't tried to cancel the runtime then do that instead
# of raising a KBI (which may non-gracefully destroy
# a ``trio.run()``).
if not actor._cancel_called:
actor.cancel_soon()
# If the runtime is already cancelled it likely means the user
# hit ctrl-c again because teardown didn't full take place in
# which case we do the "hard" raising of a local KBI.
else:
raise KeyboardInterrupt
any_connected = False
if uid_in_debug is not None:
# try to see if the supposed (sub)actor in debug still
# has an active connection to *this* actor, and if not
# it's likely they aren't using the TTY lock / debugger
# and we should propagate SIGINT normally.
chans = actor._peers.get(tuple(uid_in_debug))
if chans:
any_connected = any(chan.connected() for chan in chans)
if not any_connected:
log.warning(
'A global actor reported to be in debug '
'but no connection exists for this child:\n'
f'{uid_in_debug}\n'
'Allowing SIGINT propagation..'
)
return do_cancel()
# only set in the actor actually running the REPL
pdb_obj = Lock.repl
# root actor branch that reports whether or not a child
# has locked debugger.
if (
is_root_process()
and uid_in_debug is not None
# XXX: only if there is an existing connection to the
# (sub-)actor in debug do we ignore SIGINT in this
# parent! Otherwise we may hang waiting for an actor
# which has already terminated to unlock.
and any_connected
):
# we are root and some actor is in debug mode
# if uid_in_debug is not None:
if pdb_obj:
name = uid_in_debug[0]
if name != 'root':
log.pdb(
f"Ignoring SIGINT, child in debug mode: `{uid_in_debug}`"
)
else:
log.pdb(
"Ignoring SIGINT while in debug mode"
)
elif (
is_root_process()
):
if pdb_obj:
log.pdb(
"Ignoring SIGINT since debug mode is enabled"
)
if (
Lock._root_local_task_cs_in_debug
and not Lock._root_local_task_cs_in_debug.cancel_called
):
Lock._root_local_task_cs_in_debug.cancel()
# revert back to ``trio`` handler asap!
Lock.unshield_sigint()
# child actor that has locked the debugger
elif not is_root_process():
chan: Channel = actor._parent_chan
if not chan or not chan.connected():
log.warning(
'A global actor reported to be in debug '
'but no connection exists for its parent:\n'
f'{uid_in_debug}\n'
'Allowing SIGINT propagation..'
)
return do_cancel()
task = Lock.local_task_in_debug
if (
task
and pdb_obj
):
log.pdb(
f"Ignoring SIGINT while task in debug mode: `{task}`"
)
# TODO: how to handle the case of an intermediary-child actor
# that **is not** marked in debug mode? See oustanding issue:
# https://github.com/goodboy/tractor/issues/320
# elif debug_mode():
else: # XXX: shouldn't ever get here?
print("WTFWTFWTF")
raise KeyboardInterrupt
# NOTE: currently (at least on ``fancycompleter`` 0.9.2)
# it looks to be that the last command that was run (eg. ll)
# will be repeated by default.
# maybe redraw/print last REPL output to console since
# we want to alert the user that more input is expect since
# nothing has been done dur to ignoring sigint.
if (
pdb_obj # only when this actor has a REPL engaged
):
# XXX: yah, mega hack, but how else do we catch this madness XD
if pdb_obj.shname == 'xonsh':
pdb_obj.stdout.write(pdb_obj.prompt)
pdb_obj.stdout.flush()
# TODO: make this work like sticky mode where if there is output
# detected as written to the tty we redraw this part underneath
# and erase the past draw of this same bit above?
# pdb_obj.sticky = True
# pdb_obj._print_if_sticky()
# also see these links for an approach from ``ptk``:
# https://github.com/goodboy/tractor/issues/130#issuecomment-663752040
# https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py
# XXX LEGACY: lol, see ``pdbpp`` issue:
# https://github.com/pdbpp/pdbpp/issues/496
def _set_trace(
actor: tractor.Actor | None = None,
pdb: MultiActorPdb | None = None,
):
__tracebackhide__ = True
actor = actor or tractor.current_actor()
# start 2 levels up in user code
frame: Optional[FrameType] = sys._getframe()
if frame:
frame = frame.f_back # type: ignore
if (
frame
and pdb
and actor is not None
):
log.pdb(f"\nAttaching pdb to actor: {actor.uid}\n")
# no f!#$&* idea, but when we're in async land
# we need 2x frames up?
frame = frame.f_back
else:
pdb, undo_sigint = mk_mpdb()
# we entered the global ``breakpoint()`` built-in from sync
# code?
Lock.local_task_in_debug = 'sync'
pdb.set_trace(frame=frame)
breakpoint = partial(
_breakpoint,
_set_trace,
)
def _post_mortem(
actor: tractor.Actor,
pdb: MultiActorPdb,
) -> None:
'''
Enter the ``pdbpp`` port mortem entrypoint using our custom
debugger instance.
'''
log.pdb(f"\nAttaching to pdb in crashed actor: {actor.uid}\n")
# TODO: you need ``pdbpp`` master (at least this commit
# https://github.com/pdbpp/pdbpp/commit/b757794857f98d53e3ebbe70879663d7d843a6c2)
# to fix this and avoid the hang it causes. See issue:
# https://github.com/pdbpp/pdbpp/issues/480
# TODO: help with a 3.10+ major release if/when it arrives.
pdbp.xpm(Pdb=lambda: pdb)
post_mortem = partial(
_breakpoint,
_post_mortem,
)
async def _maybe_enter_pm(err):
if (
debug_mode()
# NOTE: don't enter debug mode recursively after quitting pdb
# Iow, don't re-enter the repl if the `quit` command was issued
# by the user.
and not isinstance(err, bdb.BdbQuit)
# XXX: if the error is the likely result of runtime-wide
# cancellation, we don't want to enter the debugger since
# there's races between when the parent actor has killed all
# comms and when the child tries to contact said parent to
# acquire the tty lock.
# Really we just want to mostly avoid catching KBIs here so there
# might be a simpler check we can do?
and not is_multi_cancelled(err)
):
log.debug("Actor crashed, entering debug mode")
try:
await post_mortem()
finally:
Lock.release()
return True
else:
return False
@acm
async def acquire_debug_lock(
subactor_uid: tuple[str, str],
) -> AsyncGenerator[None, tuple]:
'''
Grab root's debug lock on entry, release on exit.
This helper is for actor's who don't actually need
to acquired the debugger but want to wait until the
lock is free in the process-tree root.
'''
if not debug_mode():
yield None
return
async with trio.open_nursery() as n:
cs = await n.start(
wait_for_parent_stdin_hijack,
subactor_uid,
)
yield None
cs.cancel()
async def maybe_wait_for_debugger(
poll_steps: int = 2,
poll_delay: float = 0.1,
child_in_debug: bool = False,
) -> None:
if (
not debug_mode()
and not child_in_debug
):
return
if (
is_root_process()
):
# If we error in the root but the debugger is
# engaged we don't want to prematurely kill (and
# thus clobber access to) the local tty since it
# will make the pdb repl unusable.
# Instead try to wait for pdb to be released before
# tearing down.
sub_in_debug = None
for _ in range(poll_steps):
if Lock.global_actor_in_debug:
sub_in_debug = tuple(Lock.global_actor_in_debug)
log.debug('Root polling for debug')
with trio.CancelScope(shield=True):
await trio.sleep(poll_delay)
# TODO: could this make things more deterministic? wait
# to see if a sub-actor task will be scheduled and grab
# the tty lock on the next tick?
# XXX: doesn't seem to work
# await trio.testing.wait_all_tasks_blocked(cushion=0)
debug_complete = Lock.no_remote_has_tty
if (
(debug_complete and
not debug_complete.is_set())
):
log.debug(
'Root has errored but pdb is in use by '
f'child {sub_in_debug}\n'
'Waiting on tty lock to release..')
await debug_complete.wait()
await trio.sleep(poll_delay)
continue
else:
log.debug(
'Root acquired TTY LOCK'
)

View File

@ -15,71 +15,50 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Discovery (protocols) API for automatic addressing and location
management of (service) actors.
Actor discovery API.
"""
from __future__ import annotations
from typing import (
Optional,
Union,
AsyncGenerator,
AsyncContextManager,
TYPE_CHECKING,
)
from contextlib import asynccontextmanager as acm
from tractor.log import get_logger
from .trionics import gather_contexts
from ._ipc import _connect_chan, Channel
from ._portal import (
Portal,
open_portal,
LocalPortal,
)
from ._state import (
current_actor,
_runtime_vars,
)
if TYPE_CHECKING:
from ._runtime import Actor
log = get_logger(__name__)
from ._state import current_actor, _runtime_vars
@acm
async def get_registry(
async def get_arbiter(
host: str,
port: int,
) -> AsyncGenerator[
Portal | LocalPortal | None,
None,
]:
) -> AsyncGenerator[Union[Portal, LocalPortal], None]:
'''Return a portal instance connected to a local or remote
arbiter.
'''
Return a portal instance connected to a local or remote
registry-service actor; if a connection already exists re-use it
(presumably to call a `.register_actor()` registry runtime RPC
ep).
actor = current_actor()
'''
actor: Actor = current_actor()
if actor.is_registrar:
if not actor:
raise RuntimeError("No actor instance has been defined yet?")
if actor.is_arbiter:
# we're already the arbiter
# (likely a re-entrant call from the arbiter actor)
yield LocalPortal(
actor,
Channel((host, port))
)
yield LocalPortal(actor, Channel((host, port)))
else:
# TODO: try to look pre-existing connection from
# `Actor._peers` and use it instead?
async with (
_connect_chan(host, port) as chan,
open_portal(chan) as regstr_ptl,
):
yield regstr_ptl
async with _connect_chan(host, port) as chan:
async with open_portal(chan) as arb_portal:
yield arb_portal
@acm
@ -87,104 +66,62 @@ async def get_root(
**kwargs,
) -> AsyncGenerator[Portal, None]:
# TODO: rename mailbox to `_root_maddr` when we finally
# add and impl libp2p multi-addrs?
host, port = _runtime_vars['_root_mailbox']
assert host is not None
async with (
_connect_chan(host, port) as chan,
open_portal(chan, **kwargs) as portal,
):
async with _connect_chan(host, port) as chan:
async with open_portal(chan, **kwargs) as portal:
yield portal
def get_peer_by_name(
name: str,
# uuid: str|None = None,
) -> list[Channel]|None: # at least 1
'''
Scan for an existing connection (set) to a named actor
and return any channels from `Actor._peers`.
This is an optimization method over querying the registrar for
the same info.
'''
actor: Actor = current_actor()
to_scan: dict[tuple, list[Channel]] = actor._peers.copy()
pchan: Channel|None = actor._parent_chan
if pchan:
to_scan[pchan.uid].append(pchan)
for aid, chans in to_scan.items():
_, peer_name = aid
if name == peer_name:
if not chans:
log.warning(
'No IPC chans for matching peer {peer_name}\n'
)
continue
return chans
return None
@acm
async def query_actor(
name: str,
regaddr: tuple[str, int]|None = None,
arbiter_sockaddr: Optional[tuple[str, int]] = None,
) -> AsyncGenerator[
tuple[str, int]|None,
None,
]:
) -> AsyncGenerator[tuple[str, int], None]:
'''
Lookup a transport address (by actor name) via querying a registrar
listening @ `regaddr`.
Simple address lookup for a given actor name.
Returns the transport protocol (socket) address or `None` if no
entry under that name exists.
Returns the (socket) address or ``None``.
'''
actor: Actor = current_actor()
if (
name == 'registrar'
and actor.is_registrar
):
raise RuntimeError(
'The current actor IS the registry!?'
)
actor = current_actor()
async with get_arbiter(
*arbiter_sockaddr or actor._arb_addr
) as arb_portal:
maybe_peers: list[Channel]|None = get_peer_by_name(name)
if maybe_peers:
yield maybe_peers[0].raddr
return
reg_portal: Portal
regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0]
async with get_registry(*regaddr) as reg_portal:
# TODO: return portals to all available actors - for now
# just the last one that registered
sockaddr: tuple[str, int] = await reg_portal.run_from_ns(
sockaddr = await arb_portal.run_from_ns(
'self',
'find_actor',
name=name,
)
yield sockaddr
# TODO: return portals to all available actors - for now just
# the last one that registered
if name == 'arbiter' and actor.is_arbiter:
raise RuntimeError("The current actor is the arbiter")
yield sockaddr if sockaddr else None
@acm
async def maybe_open_portal(
addr: tuple[str, int],
async def find_actor(
name: str,
):
arbiter_sockaddr: tuple[str, int] | None = None
) -> AsyncGenerator[Optional[Portal], None]:
'''
Ask the arbiter to find actor(s) by name.
Returns a connected portal to the last registered matching actor
known to the arbiter.
'''
async with query_actor(
name=name,
regaddr=addr,
arbiter_sockaddr=arbiter_sockaddr,
) as sockaddr:
pass
if sockaddr:
async with _connect_chan(*sockaddr) as chan:
@ -194,121 +131,26 @@ async def maybe_open_portal(
yield None
@acm
async def find_actor(
name: str,
registry_addrs: list[tuple[str, int]]|None = None,
only_first: bool = True,
raise_on_none: bool = False,
) -> AsyncGenerator[
Portal | list[Portal] | None,
None,
]:
'''
Ask the arbiter to find actor(s) by name.
Returns a connected portal to the last registered matching actor
known to the arbiter.
'''
# optimization path, use any pre-existing peer channel
maybe_peers: list[Channel]|None = get_peer_by_name(name)
if maybe_peers and only_first:
async with open_portal(maybe_peers[0]) as peer_portal:
yield peer_portal
return
if not registry_addrs:
# XXX NOTE: make sure to dynamically read the value on
# every call since something may change it globally (eg.
# like in our discovery test suite)!
from . import _root
registry_addrs = (
_runtime_vars['_registry_addrs']
or
_root._default_lo_addrs
)
maybe_portals: list[
AsyncContextManager[tuple[str, int]]
] = list(
maybe_open_portal(
addr=addr,
name=name,
)
for addr in registry_addrs
)
portals: list[Portal]
async with gather_contexts(
mngrs=maybe_portals,
) as portals:
# log.runtime(
# 'Gathered portals:\n'
# f'{portals}'
# )
# NOTE: `gather_contexts()` will return a
# `tuple[None, None, ..., None]` if no contact
# can be made with any regstrar at any of the
# N provided addrs!
if not any(portals):
if raise_on_none:
raise RuntimeError(
f'No actor "{name}" found registered @ {registry_addrs}'
)
yield None
return
portals: list[Portal] = list(portals)
if only_first:
yield portals[0]
else:
# TODO: currently this may return multiple portals
# given there are multi-homed or multiple registrars..
# SO, we probably need de-duplication logic?
yield portals
@acm
async def wait_for_actor(
name: str,
registry_addr: tuple[str, int] | None = None,
arbiter_sockaddr: tuple[str, int] | None = None
) -> AsyncGenerator[Portal, None]:
'''
Wait on at least one peer actor to register `name` with the
registrar, yield a `Portal to the first registree.
"""Wait on an actor to register with the arbiter.
'''
actor: Actor = current_actor()
A portal to the first registered actor is returned.
"""
actor = current_actor()
# optimization path, use any pre-existing peer channel
maybe_peers: list[Channel]|None = get_peer_by_name(name)
if maybe_peers:
async with open_portal(maybe_peers[0]) as peer_portal:
yield peer_portal
return
regaddr: tuple[str, int] = (
registry_addr
or
actor.reg_addrs[0]
)
# TODO: use `.trionics.gather_contexts()` like
# above in `find_actor()` as well?
reg_portal: Portal
async with get_registry(*regaddr) as reg_portal:
sockaddrs = await reg_portal.run_from_ns(
async with get_arbiter(
*arbiter_sockaddr or actor._arb_addr,
) as arb_portal:
sockaddrs = await arb_portal.run_from_ns(
'self',
'wait_for_actor',
name=name,
)
# get latest registered addr by default?
# TODO: offer multi-portal yields in multi-homed case?
sockaddr: tuple[str, int] = sockaddrs[-1]
sockaddr = sockaddrs[-1]
async with _connect_chan(*sockaddr) as chan:
async with open_portal(chan) as portal:

View File

@ -20,9 +20,6 @@ Sub-process entry points.
"""
from __future__ import annotations
from functools import partial
import multiprocessing as mp
import os
import textwrap
from typing import (
Any,
TYPE_CHECKING,
@ -35,7 +32,6 @@ from .log import (
get_logger,
)
from . import _state
from .devx import _debug
from .to_asyncio import run_as_asyncio_guest
from ._runtime import (
async_main,
@ -51,8 +47,8 @@ log = get_logger(__name__)
def _mp_main(
actor: Actor,
accept_addrs: list[tuple[str, int]],
actor: Actor, # type: ignore
accept_addr: tuple[str, int],
forkserver_info: tuple[Any, Any, Any, Any, Any],
start_method: SpawnMethodKey,
parent_addr: tuple[str, int] | None = None,
@ -60,31 +56,29 @@ def _mp_main(
) -> None:
'''
The routine called *after fork* which invokes a fresh `trio.run()`
The routine called *after fork* which invokes a fresh ``trio.run``
'''
actor._forkserver_info = forkserver_info
from ._spawn import try_set_start_method
spawn_ctx: mp.context.BaseContext = try_set_start_method(start_method)
assert spawn_ctx
spawn_ctx = try_set_start_method(start_method)
if actor.loglevel is not None:
log.info(
f'Setting loglevel for {actor.uid} to {actor.loglevel}'
)
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
get_console_log(actor.loglevel)
# TODO: use scops headers like for `trio` below!
# (well after we libify it maybe..)
assert spawn_ctx
log.info(
f'Started new {spawn_ctx.current_process()} for {actor.uid}'
# f"parent_addr is {parent_addr}"
)
_state._current_actor: Actor = actor
f"Started new {spawn_ctx.current_process()} for {actor.uid}")
_state._current_actor = actor
log.debug(f"parent_addr is {parent_addr}")
trio_main = partial(
async_main,
actor=actor,
accept_addrs=accept_addrs,
actor,
accept_addr,
parent_addr=parent_addr
)
try:
@ -97,114 +91,12 @@ def _mp_main(
pass # handle it the same way trio does?
finally:
log.info(
f'`mp`-subactor {actor.uid} exited'
)
# TODO: move this func to some kinda `.devx._conc_lang.py` eventually
# as we work out our multi-domain state-flow-syntax!
def nest_from_op(
input_op: str,
#
# ?TODO? an idea for a syntax to the state of concurrent systems
# as a "3-domain" (execution, scope, storage) model and using
# a minimal ascii/utf-8 operator-set.
#
# try not to take any of this seriously yet XD
#
# > is a "play operator" indicating (CPU bound)
# exec/work/ops required at the "lowest level computing"
#
# execution primititves (tasks, threads, actors..) denote their
# lifetime with '(' and ')' since parentheses normally are used
# in many langs to denote function calls.
#
# starting = (
# >( opening/starting; beginning of the thread-of-exec (toe?)
# (> opened/started, (finished spawning toe)
# |_<Task: blah blah..> repr of toe, in py these look like <objs>
#
# >) closing/exiting/stopping,
# )> closed/exited/stopped,
# |_<Task: blah blah..>
# [OR <), )< ?? ]
#
# ending = )
# >c) cancelling to close/exit
# c)> cancelled (caused close), OR?
# |_<Actor: ..>
# OR maybe "<c)" which better indicates the cancel being
# "delivered/returned" / returned" to LHS?
#
# >x) erroring to eventuall exit
# x)> errored and terminated
# |_<Actor: ...>
#
# scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc.
# >{ opening
# {> opened
# }> closed
# >} closing
#
# storage: like queues, shm-buffers, files, etc..
# >[ opening
# [> opened
# |_<FileObj: ..>
#
# >] closing
# ]> closed
# IPC ops: channels, transports, msging
# => req msg
# <= resp msg
# <=> 2-way streaming (of msgs)
# <- recv 1 msg
# -> send 1 msg
#
# TODO: still not sure on R/L-HS approach..?
# =>( send-req to exec start (task, actor, thread..)
# (<= recv-req to ^
#
# (<= recv-req ^
# <=( recv-resp opened remote exec primitive
# <=) recv-resp closed
#
# )<=c req to stop due to cancel
# c=>) req to stop due to cancel
#
# =>{ recv-req to open
# <={ send-status that it closed
tree_str: str,
# NOTE: so move back-from-the-left of the `input_op` by
# this amount.
back_from_op: int = 0,
) -> str:
'''
Depth-increment the input (presumably hierarchy/supervision)
input "tree string" below the provided `input_op` execution
operator, so injecting a `"\n|_{input_op}\n"`and indenting the
`tree_str` to nest content aligned with the ops last char.
'''
return (
f'{input_op}\n'
+
textwrap.indent(
tree_str,
prefix=(
len(input_op)
-
(back_from_op + 1)
) * ' ',
)
)
log.info(f"Actor {actor.uid} terminated")
def _trio_main(
actor: Actor,
actor: Actor, # type: ignore
*,
parent_addr: tuple[str, int] | None = None,
infect_asyncio: bool = False,
@ -214,73 +106,33 @@ def _trio_main(
Entry point for a `trio_run_in_process` subactor.
'''
_debug.hide_runtime_frames()
log.info(f"Started new trio process for {actor.uid}")
if actor.loglevel is not None:
log.info(
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
get_console_log(actor.loglevel)
log.info(
f"Started {actor.uid}")
_state._current_actor = actor
log.debug(f"parent_addr is {parent_addr}")
trio_main = partial(
async_main,
actor,
parent_addr=parent_addr
)
if actor.loglevel is not None:
get_console_log(actor.loglevel)
actor_info: str = (
f'|_{actor}\n'
f' uid: {actor.uid}\n'
f' pid: {os.getpid()}\n'
f' parent_addr: {parent_addr}\n'
f' loglevel: {actor.loglevel}\n'
)
log.info(
'Starting new `trio` subactor:\n'
+
nest_from_op(
input_op='>(', # see syntax ideas above
tree_str=actor_info,
back_from_op=1,
)
)
logmeth = log.info
exit_status: str = (
'Subactor exited\n'
+
nest_from_op(
input_op=')>', # like a "closed-to-play"-icon from super perspective
tree_str=actor_info,
back_from_op=1,
)
)
try:
if infect_asyncio:
actor._infected_aio = True
run_as_asyncio_guest(trio_main)
else:
trio.run(trio_main)
except KeyboardInterrupt:
logmeth = log.cancel
exit_status: str = (
'Actor received KBI (aka an OS-cancel)\n'
+
nest_from_op(
input_op='c)>', # closed due to cancel (see above)
tree_str=actor_info,
)
)
except BaseException as err:
logmeth = log.error
exit_status: str = (
'Main actor task exited due to crash?\n'
+
nest_from_op(
input_op='x)>', # closed by error
tree_str=actor_info,
)
)
# NOTE since we raise a tb will already be shown on the
# console, thus we do NOT use `.exception()` above.
raise err
log.warning(f"Actor {actor.uid} received KBI")
finally:
logmeth(exit_status)
log.info(f"Actor {actor.uid} terminated")

File diff suppressed because it is too large Load Diff

View File

@ -19,64 +19,38 @@ Inter-process comms abstractions
"""
from __future__ import annotations
import platform
import struct
import typing
from collections.abc import (
AsyncGenerator,
AsyncIterator,
)
from contextlib import (
asynccontextmanager as acm,
contextmanager as cm,
)
import platform
from pprint import pformat
import struct
import typing
from typing import (
Any,
Callable,
runtime_checkable,
Optional,
Protocol,
Type,
TypeVar,
)
import msgspec
from tricycle import BufferedReceiveStream
import msgspec
import trio
from async_generator import asynccontextmanager
from tractor.log import get_logger
from tractor._exceptions import (
MsgTypeError,
pack_from_raise,
TransportClosed,
_mk_send_mte,
_mk_recv_mte,
)
from tractor.msg import (
_ctxvar_MsgCodec,
# _codec, XXX see `self._codec` sanity/debug checks
MsgCodec,
types as msgtypes,
pretty_struct,
)
from .log import get_logger
from ._exceptions import TransportClosed
log = get_logger(__name__)
_is_windows = platform.system() == 'Windows'
log = get_logger(__name__)
def get_stream_addrs(
stream: trio.SocketStream
) -> tuple[
tuple[str, int], # local
tuple[str, int], # remote
]:
'''
Return the `trio` streaming transport prot's socket-addrs for
both the local and remote sides as a pair.
'''
# rn, should both be IP sockets
def get_stream_addrs(stream: trio.SocketStream) -> tuple:
# should both be IP sockets
lsockname = stream.socket.getsockname()
rsockname = stream.socket.getpeername()
return (
@ -85,22 +59,16 @@ def get_stream_addrs(
)
# from tractor.msg.types import MsgType
# ?TODO? this should be our `Union[*msgtypes.__spec__]` alias now right..?
# => BLEH, except can't bc prots must inherit typevar or param-spec
# vars..
MsgType = TypeVar('MsgType')
MsgType = TypeVar("MsgType")
# TODO: consider using a generic def and indexing with our eventual
# msg definition/types?
# - https://docs.python.org/3/library/typing.html#typing.Protocol
# - https://jcristharif.com/msgspec/usage.html#structs
# TODO: break up this mod into a subpkg so we can start adding new
# backends and move this type stuff into a dedicated file.. Bo
#
@runtime_checkable
class MsgTransport(Protocol[MsgType]):
#
# ^-TODO-^ consider using a generic def and indexing with our
# eventual msg definition/types?
# - https://docs.python.org/3/library/typing.html#typing.Protocol
stream: trio.SocketStream
drained: list[MsgType]
@ -135,37 +103,20 @@ class MsgTransport(Protocol[MsgType]):
...
# TODO: typing oddity.. not sure why we have to inherit here, but it
# seems to be an issue with `get_msg_transport()` returning
# a `Type[Protocol]`; probably should make a `mypy` issue?
# TODO: not sure why we have to inherit here, but it seems to be an
# issue with ``get_msg_transport()`` returning a ``Type[Protocol]``;
# probably should make a `mypy` issue?
class MsgpackTCPStream(MsgTransport):
'''
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
using the ``msgspec`` codec lib.
'''
layer_key: int = 4
name_key: str = 'tcp'
# TODO: better naming for this?
# -[ ] check how libp2p does naming for such things?
codec_key: str = 'msgpack'
def __init__(
self,
stream: trio.SocketStream,
prefix_size: int = 4,
# XXX optionally provided codec pair for `msgspec`:
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
#
# TODO: define this as a `Codec` struct which can be
# overriden dynamically by the application/runtime?
codec: tuple[
Callable[[Any], Any]|None, # coder
Callable[[type, Any], Any]|None, # decoder
]|None = None,
) -> None:
self.stream = stream
@ -175,44 +126,30 @@ class MsgpackTCPStream(MsgTransport):
self._laddr, self._raddr = get_stream_addrs(stream)
# create read loop instance
self._aiter_pkts = self._iter_packets()
self._agen = self._iter_packets()
self._send_lock = trio.StrictFIFOLock()
# public i guess?
self.drained: list[dict] = []
self.recv_stream = BufferedReceiveStream(
transport_stream=stream
)
self.recv_stream = BufferedReceiveStream(transport_stream=stream)
self.prefix_size = prefix_size
# allow for custom IPC msg interchange format
# dynamic override Bo
self._task = trio.lowlevel.current_task()
# XXX for ctxvar debug only!
# self._codec: MsgCodec = (
# codec
# or
# _codec._ctxvar_MsgCodec.get()
# )
# TODO: struct aware messaging coders
self.encode = msgspec.msgpack.Encoder().encode
self.decode = msgspec.msgpack.Decoder().decode # dict[str, Any])
async def _iter_packets(self) -> AsyncGenerator[dict, None]:
'''
Yield `bytes`-blob decoded packets from the underlying TCP
stream using the current task's `MsgCodec`.
This is a streaming routine implemented as an async generator
func (which was the original design, but could be changed?)
and is allocated by a `.__call__()` inside `.__init__()` where
it is assigned to the `._aiter_pkts` attr.
'''Yield packets from the underlying stream.
'''
import msgspec # noqa
decodes_failed: int = 0
while True:
try:
header: bytes = await self.recv_stream.receive_exactly(4)
header = await self.recv_stream.receive_exactly(4)
except (
ValueError,
ConnectionResetError,
@ -221,122 +158,25 @@ class MsgpackTCPStream(MsgTransport):
# seem to be getting racy failures here on
# arbiter/registry name subs..
trio.BrokenResourceError,
) as trans_err:
loglevel = 'transport'
match trans_err:
# case (
# ConnectionResetError()
# ):
# loglevel = 'transport'
# peer actor (graceful??) TCP EOF but `tricycle`
# seems to raise a 0-bytes-read?
case ValueError() if (
'unclean EOF' in trans_err.args[0]
):
pass
# peer actor (task) prolly shutdown quickly due
# to cancellation
case trio.BrokenResourceError() if (
'Connection reset by peer' in trans_err.args[0]
):
pass
# unless the disconnect condition falls under "a
# normal operation breakage" we usualy console warn
# about it.
case _:
loglevel: str = 'warning'
raise TransportClosed(
message=(
f'IPC transport already closed by peer\n'
f'x)> {type(trans_err)}\n'
f' |_{self}\n'
),
loglevel=loglevel,
) from trans_err
# XXX definitely can happen if transport is closed
# manually by another `trio.lowlevel.Task` in the
# same actor; we use this in some simulated fault
# testing for ex, but generally should never happen
# under normal operation!
#
# NOTE: as such we always re-raise this error from the
# RPC msg loop!
except trio.ClosedResourceError as closure_err:
raise TransportClosed(
message=(
f'IPC transport already manually closed locally?\n'
f'x)> {type(closure_err)} \n'
f' |_{self}\n'
),
loglevel='error',
raise_on_report=(
closure_err.args[0] == 'another task closed this fd'
or
closure_err.args[0] in ['another task closed this fd']
),
) from closure_err
# graceful TCP EOF disconnect
if header == b'':
raise TransportClosed(
message=(
f'IPC transport already gracefully closed\n'
f')>\n'
f'|_{self}\n'
),
loglevel='transport',
# cause=??? # handy or no?
f'transport {self} was already closed prior ro read'
)
if header == b'':
raise TransportClosed(
f'transport {self} was already closed prior ro read'
)
size: int
size, = struct.unpack("<I", header)
log.transport(f'received header {size}') # type: ignore
msg_bytes: bytes = await self.recv_stream.receive_exactly(size)
msg_bytes = await self.recv_stream.receive_exactly(size)
log.transport(f"received {msg_bytes}") # type: ignore
try:
# NOTE: lookup the `trio.Task.context`'s var for
# the current `MsgCodec`.
codec: MsgCodec = _ctxvar_MsgCodec.get()
# XXX for ctxvar debug only!
# if self._codec.pld_spec != codec.pld_spec:
# assert (
# task := trio.lowlevel.current_task()
# ) is not self._task
# self._task = task
# self._codec = codec
# log.runtime(
# f'Using new codec in {self}.recv()\n'
# f'codec: {self._codec}\n\n'
# f'msg_bytes: {msg_bytes}\n'
# )
yield codec.decode(msg_bytes)
# XXX NOTE: since the below error derives from
# `DecodeError` we need to catch is specially
# and always raise such that spec violations
# are never allowed to be caught silently!
except msgspec.ValidationError as verr:
msgtyperr: MsgTypeError = _mk_recv_mte(
msg=msg_bytes,
codec=codec,
src_validation_error=verr,
)
# XXX deliver up to `Channel.recv()` where
# a re-raise and `Error`-pack can inject the far
# end actor `.uid`.
yield msgtyperr
yield self.decode(msg_bytes)
except (
msgspec.DecodeError,
UnicodeDecodeError,
@ -350,91 +190,25 @@ class MsgpackTCPStream(MsgTransport):
except UnicodeDecodeError:
msg_str = msg_bytes
log.exception(
'Failed to decode msg?\n'
f'{codec}\n\n'
'Rxed bytes from wire:\n\n'
f'{msg_str!r}\n'
log.error(
'`msgspec` failed to decode!?\n'
'dumping bytes:\n'
f'{msg_str!r}'
)
decodes_failed += 1
else:
raise
async def send(
self,
msg: msgtypes.MsgType,
strict_types: bool = True,
hide_tb: bool = False,
) -> None:
'''
Send a msgpack encoded py-object-blob-as-msg over TCP.
If `strict_types == True` then a `MsgTypeError` will be raised on any
invalid msg type
'''
__tracebackhide__: bool = hide_tb
# XXX see `trio._sync.AsyncContextManagerMixin` for details
# on the `.acquire()`/`.release()` sequencing..
async def send(self, msg: Any) -> None:
async with self._send_lock:
# NOTE: lookup the `trio.Task.context`'s var for
# the current `MsgCodec`.
codec: MsgCodec = _ctxvar_MsgCodec.get()
# XXX for ctxvar debug only!
# if self._codec.pld_spec != codec.pld_spec:
# self._codec = codec
# log.runtime(
# f'Using new codec in {self}.send()\n'
# f'codec: {self._codec}\n\n'
# f'msg: {msg}\n'
# )
if type(msg) not in msgtypes.__msg_types__:
if strict_types:
raise _mk_send_mte(
msg,
codec=codec,
)
else:
log.warning(
'Sending non-`Msg`-spec msg?\n\n'
f'{msg}\n'
)
try:
bytes_data: bytes = codec.encode(msg)
except TypeError as _err:
typerr = _err
msgtyperr: MsgTypeError = _mk_send_mte(
msg,
codec=codec,
message=(
f'IPC-msg-spec violation in\n\n'
f'{pretty_struct.Struct.pformat(msg)}'
),
src_type_error=typerr,
)
raise msgtyperr from typerr
bytes_data: bytes = self.encode(msg)
# supposedly the fastest says,
# https://stackoverflow.com/a/54027962
size: bytes = struct.pack("<I", len(bytes_data))
return await self.stream.send_all(size + bytes_data)
# ?TODO? does it help ever to dynamically show this
# frame?
# try:
# <the-above_code>
# except BaseException as _err:
# err = _err
# if not isinstance(err, MsgTypeError):
# __tracebackhide__: bool = False
# raise
return await self.stream.send_all(size + bytes_data)
@property
def laddr(self) -> tuple[str, int]:
@ -445,7 +219,7 @@ class MsgpackTCPStream(MsgTransport):
return self._raddr
async def recv(self) -> Any:
return await self._aiter_pkts.asend(None)
return await self._agen.asend(None)
async def drain(self) -> AsyncIterator[dict]:
'''
@ -462,7 +236,7 @@ class MsgpackTCPStream(MsgTransport):
yield msg
def __aiter__(self):
return self._aiter_pkts
return self._agen
def connected(self) -> bool:
return self.stream.socket.fileno() != -1
@ -493,7 +267,7 @@ class Channel:
def __init__(
self,
destaddr: tuple[str, int]|None,
destaddr: Optional[tuple[str, int]],
msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'),
@ -511,31 +285,18 @@ class Channel:
# Either created in ``.connect()`` or passed in by
# user in ``.from_stream()``.
self._stream: trio.SocketStream|None = None
self._transport: MsgTransport|None = None
self._stream: Optional[trio.SocketStream] = None
self.msgstream: Optional[MsgTransport] = None
# set after handshake - always uid of far end
self.uid: tuple[str, str]|None = None
self.uid: Optional[tuple[str, str]] = None
self._aiter_msgs = self._iter_msgs()
self._exc: Exception|None = None # set if far end actor errors
self._agen = self._aiter_recv()
self._exc: Optional[Exception] = None # set if far end actor errors
self._closed: bool = False
# flag set by ``Portal.cancel_actor()`` indicating remote
# (possibly peer) cancellation of the far end actor
# runtime.
self._cancel_called: bool = False
@property
def msgstream(self) -> MsgTransport:
log.info(
'`Channel.msgstream` is an old name, use `._transport`'
)
return self._transport
@property
def transport(self) -> MsgTransport:
return self._transport
# flag set on ``Portal.cancel_actor()`` indicating
# remote (peer) cancellation of the far end actor runtime.
self._cancel_called: bool = False # set on ``Portal.cancel_actor()``
@classmethod
def from_stream(
@ -546,78 +307,37 @@ class Channel:
) -> Channel:
src, dst = get_stream_addrs(stream)
chan = Channel(
destaddr=dst,
**kwargs,
)
chan = Channel(destaddr=dst, **kwargs)
# set immediately here from provided instance
chan._stream: trio.SocketStream = stream
chan._stream = stream
chan.set_msg_transport(stream)
return chan
def set_msg_transport(
self,
stream: trio.SocketStream,
type_key: tuple[str, str]|None = None,
# XXX optionally provided codec pair for `msgspec`:
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
codec: MsgCodec|None = None,
type_key: Optional[tuple[str, str]] = None,
) -> MsgTransport:
type_key = (
type_key
or
self._transport_key
)
# get transport type, then
self._transport = get_msg_transport(
type_key
# instantiate an instance of the msg-transport
)(
stream,
codec=codec,
)
return self._transport
type_key = type_key or self._transport_key
self.msgstream = get_msg_transport(type_key)(stream)
return self.msgstream
@cm
def apply_codec(
self,
codec: MsgCodec,
) -> None:
'''
Temporarily override the underlying IPC msg codec for
dynamic enforcement of messaging schema.
'''
orig: MsgCodec = self._transport.codec
try:
self._transport.codec = codec
yield
finally:
self._transport.codec = orig
# TODO: do a .src/.dst: str for maddrs?
def __repr__(self) -> str:
if not self._transport:
return '<Channel with inactive transport?>'
if self.msgstream:
return repr(
self._transport.stream.socket._sock
).replace( # type: ignore
"socket.socket",
"Channel",
)
self.msgstream.stream.socket._sock).replace( # type: ignore
"socket.socket", "Channel")
return object.__repr__(self)
@property
def laddr(self) -> tuple[str, int]|None:
return self._transport.laddr if self._transport else None
def laddr(self) -> Optional[tuple[str, int]]:
return self.msgstream.laddr if self.msgstream else None
@property
def raddr(self) -> tuple[str, int]|None:
return self._transport.raddr if self._transport else None
def raddr(self) -> Optional[tuple[str, int]]:
return self.msgstream.raddr if self.msgstream else None
async def connect(
self,
@ -636,62 +356,26 @@ class Channel:
*destaddr,
**kwargs
)
transport = self.set_msg_transport(stream)
msgstream = self.set_msg_transport(stream)
log.transport(
f'Opened channel[{type(transport)}]: {self.laddr} -> {self.raddr}'
f'Opened channel[{type(msgstream)}]: {self.laddr} -> {self.raddr}'
)
return transport
return msgstream
# TODO: something like,
# `pdbp.hideframe_on(errors=[MsgTypeError])`
# instead of the `try/except` hack we have rn..
# seems like a pretty useful thing to have in general
# along with being able to filter certain stack frame(s / sets)
# possibly based on the current log-level?
async def send(
self,
payload: Any,
async def send(self, item: Any) -> None:
hide_tb: bool = False,
log.transport(f"send `{item}`") # type: ignore
assert self.msgstream
) -> None:
'''
Send a coded msg-blob over the transport.
'''
__tracebackhide__: bool = hide_tb
try:
log.transport(
'=> send IPC msg:\n\n'
f'{pformat(payload)}\n'
)
# assert self._transport # but why typing?
await self._transport.send(
payload,
hide_tb=hide_tb,
)
except BaseException as _err:
err = _err # bind for introspection
if not isinstance(_err, MsgTypeError):
# assert err
__tracebackhide__: bool = False
else:
assert err.cid
raise
await self.msgstream.send(item)
async def recv(self) -> Any:
assert self._transport
return await self._transport.recv()
assert self.msgstream
return await self.msgstream.recv()
# TODO: auto-reconnect features like 0mq/nanomsg?
# -[ ] implement it manually with nods to SC prot
# possibly on multiple transport backends?
# -> seems like that might be re-inventing scalability
# prots tho no?
# try:
# return await self._transport.recv()
# return await self.msgstream.recv()
# except trio.BrokenResourceError:
# if self._autorecon:
# await self._reconnect()
@ -704,8 +388,8 @@ class Channel:
f'Closing channel to {self.uid} '
f'{self.laddr} -> {self.raddr}'
)
assert self._transport
await self._transport.stream.aclose()
assert self.msgstream
await self.msgstream.stream.aclose()
self._closed = True
async def __aenter__(self):
@ -716,11 +400,8 @@ class Channel:
await self.aclose(*args)
def __aiter__(self):
return self._aiter_msgs
return self._agen
# ?TODO? run any reconnection sequence?
# -[ ] prolly should be impl-ed as deco-API?
#
# async def _reconnect(self) -> None:
# """Handle connection failures by polling until a reconnect can be
# established.
@ -738,6 +419,7 @@ class Channel:
# else:
# log.transport("Stream connection re-established!")
# # TODO: run any reconnection sequence
# # on_recon = self._recon_seq
# # if on_recon:
# # await on_recon(self)
@ -751,42 +433,23 @@ class Channel:
# " for re-establishment")
# await trio.sleep(1)
async def _iter_msgs(
async def _aiter_recv(
self
) -> AsyncGenerator[Any, None]:
'''
Yield `MsgType` IPC msgs decoded and deliverd from
an underlying `MsgTransport` protocol.
This is a streaming routine alo implemented as an async-gen
func (same a `MsgTransport._iter_pkts()`) gets allocated by
a `.__call__()` inside `.__init__()` where it is assigned to
the `._aiter_msgs` attr.
Async iterate items from underlying stream.
'''
assert self._transport
assert self.msgstream
while True:
try:
async for msg in self._transport:
match msg:
# NOTE: if transport/interchange delivers
# a type error, we pack it with the far
# end peer `Actor.uid` and relay the
# `Error`-msg upward to the `._rpc` stack
# for normal RAE handling.
case MsgTypeError():
yield pack_from_raise(
local_err=msg,
cid=msg.cid,
# XXX we pack it here bc lower
# layers have no notion of an
# actor-id ;)
src_uid=self.uid,
)
case _:
yield msg
async for item in self.msgstream:
yield item
# sent = yield item
# if sent is not None:
# # optimization, passing None through all the
# # time is pointless
# await self.msgstream.send(sent)
except trio.BrokenResourceError:
# if not self._autorecon:
@ -799,14 +462,12 @@ class Channel:
# continue
def connected(self) -> bool:
return self._transport.connected() if self._transport else False
return self.msgstream.connected() if self.msgstream else False
@acm
@asynccontextmanager
async def _connect_chan(
host: str,
port: int
host: str, port: int
) -> typing.AsyncGenerator[Channel, None]:
'''
Create and connect a channel with disconnect on context manager
@ -816,5 +477,4 @@ async def _connect_chan(
chan = Channel((host, port))
await chan.connect()
yield chan
with trio.CancelScope(shield=True):
await chan.aclose()

View File

@ -1,151 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Multiaddress parser and utils according the spec(s) defined by
`libp2p` and used in dependent project such as `ipfs`:
- https://docs.libp2p.io/concepts/fundamentals/addressing/
- https://github.com/libp2p/specs/blob/master/addressing/README.md
'''
from typing import Iterator
from bidict import bidict
# TODO: see if we can leverage libp2p ecosys projects instead of
# rolling our own (parser) impls of the above addressing specs:
# - https://github.com/libp2p/py-libp2p
# - https://docs.libp2p.io/concepts/nat/circuit-relay/#relay-addresses
# prots: bidict[int, str] = bidict({
prots: bidict[int, str] = {
'ipv4': 3,
'ipv6': 3,
'wg': 3,
'tcp': 4,
'udp': 4,
# TODO: support the next-gen shite Bo
# 'quic': 4,
# 'ssh': 7, # via rsyscall bootstrapping
}
prot_params: dict[str, tuple[str]] = {
'ipv4': ('addr',),
'ipv6': ('addr',),
'wg': ('addr', 'port', 'pubkey'),
'tcp': ('port',),
'udp': ('port',),
# 'quic': ('port',),
# 'ssh': ('port',),
}
def iter_prot_layers(
multiaddr: str,
) -> Iterator[
tuple[
int,
list[str]
]
]:
'''
Unpack a libp2p style "multiaddress" into multiple "segments"
for each "layer" of the protocoll stack (in OSI terms).
'''
tokens: list[str] = multiaddr.split('/')
root, tokens = tokens[0], tokens[1:]
assert not root # there is a root '/' on LHS
itokens = iter(tokens)
prot: str | None = None
params: list[str] = []
for token in itokens:
# every prot path should start with a known
# key-str.
if token in prots:
if prot is None:
prot: str = token
else:
yield prot, params
prot = token
params = []
elif token not in prots:
params.append(token)
else:
yield prot, params
def parse_maddr(
multiaddr: str,
) -> dict[str, str | int | dict]:
'''
Parse a libp2p style "multiaddress" into its distinct protocol
segments where each segment is of the form:
`../<protocol>/<param0>/<param1>/../<paramN>`
and is loaded into a (order preserving) `layers: dict[str,
dict[str, Any]` which holds each protocol-layer-segment of the
original `str` path as a separate entry according to its approx
OSI "layer number".
Any `paramN` in the path must be distinctly defined by a str-token in the
(module global) `prot_params` table.
For eg. for wireguard which requires an address, port number and publickey
the protocol params are specified as the entry:
'wg': ('addr', 'port', 'pubkey'),
and are thus parsed from a maddr in that order:
`'/wg/1.1.1.1/51820/<pubkey>'`
'''
layers: dict[str, str | int | dict] = {}
for (
prot_key,
params,
) in iter_prot_layers(multiaddr):
layer: int = prots[prot_key] # OSI layer used for sorting
ep: dict[str, int | str] = {'layer': layer}
layers[prot_key] = ep
# TODO; validation and resolving of names:
# - each param via a validator provided as part of the
# prot_params def? (also see `"port"` case below..)
# - do a resolv step that will check addrs against
# any loaded network.resolv: dict[str, str]
rparams: list = list(reversed(params))
for key in prot_params[prot_key]:
val: str | int = rparams.pop()
# TODO: UGHH, dunno what we should do for validation
# here, put it in the params spec somehow?
if key == 'port':
val = int(val)
ep[key] = val
return layers

View File

@ -15,70 +15,73 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Memory "portal" contruct.
"Memory portals" are both an API and set of IPC wrapping primitives
for managing structured concurrency "cancel-scope linked" tasks
running in disparate virtual memory domains - at least in different
OS processes, possibly on different (hardware) hosts.
Memory boundary "Portals": an API for structured
concurrency linked tasks running in disparate memory domains.
'''
from __future__ import annotations
from contextlib import asynccontextmanager as acm
import importlib
import inspect
from typing import (
Any,
Callable,
AsyncGenerator,
TYPE_CHECKING,
Any, Optional,
Callable, AsyncGenerator,
Type,
)
from functools import partial
from dataclasses import dataclass
from pprint import pformat
import warnings
import trio
from async_generator import asynccontextmanager
from .trionics import maybe_open_nursery
from ._state import (
current_actor,
)
from ._state import current_actor
from ._ipc import Channel
from .log import get_logger
from .msg import (
# Error,
PayloadMsg,
NamespacePath,
Return,
)
from .msg import NamespacePath
from ._exceptions import (
# unpack_error,
unpack_error,
NoResult,
)
from ._context import (
Context,
open_context_from_portal,
ContextCancelled,
)
from ._streaming import (
Context,
MsgStream,
)
if TYPE_CHECKING:
from ._runtime import Actor
log = get_logger(__name__)
def _unwrap_msg(
msg: dict[str, Any],
channel: Channel
) -> Any:
__tracebackhide__ = True
try:
return msg['return']
except KeyError:
# internal error should never get here
assert msg.get('cid'), "Received internal error at portal?"
raise unpack_error(msg, channel) from None
class MessagingError(Exception):
'Some kind of unexpected SC messaging dialog issue'
class Portal:
'''
A 'portal' to a memory-domain-separated `Actor`.
A 'portal' to a(n) (remote) ``Actor``.
A portal is "opened" (and eventually closed) by one side of an
inter-actor communication context. The side which opens the portal
is equivalent to a "caller" in function parlance and usually is
either the called actor's parent (in process tree hierarchy terms)
or a client interested in scheduling work to be done remotely in a
process which has a separate (virtual) memory domain.
far process.
The portal api allows the "caller" actor to invoke remote routines
and receive results through an underlying ``tractor.Channel`` as
@ -88,45 +91,22 @@ class Portal:
like having a "portal" between the seperate actor memory spaces.
'''
# global timeout for remote cancel requests sent to
# connected (peer) actors.
cancel_timeout: float = 0.5
# the timeout for a remote cancel request sent to
# a(n) (peer) actor.
cancel_timeout = 0.5
def __init__(
self,
channel: Channel,
) -> None:
self._chan: Channel = channel
def __init__(self, channel: Channel) -> None:
self.channel = channel
# during the portal's lifetime
self._final_result_pld: Any|None = None
self._final_result_msg: PayloadMsg|None = None
self._result_msg: Optional[dict] = None
# When set to a ``Context`` (when _submit_for_result is called)
# it is expected that ``result()`` will be awaited at some
# point.
self._expect_result_ctx: Context|None = None
self._expect_result: Optional[Context] = None
self._streams: set[MsgStream] = set()
self.actor: Actor = current_actor()
self.actor = current_actor()
@property
def chan(self) -> Channel:
return self._chan
@property
def channel(self) -> Channel:
'''
Proxy to legacy attr name..
Consider the shorter `Portal.chan` instead of `.channel` ;)
'''
log.debug(
'Consider the shorter `Portal.chan` instead of `.channel` ;)'
)
return self.chan
# TODO: factor this out into a `.highlevel` API-wrapper that uses
# a single `.open_context()` call underneath.
async def _submit_for_result(
self,
ns: str,
@ -134,34 +114,32 @@ class Portal:
**kwargs
) -> None:
if self._expect_result_ctx is not None:
raise RuntimeError(
'A pending main result has already been submitted'
)
assert self._expect_result is None, \
"A pending main result has already been submitted"
self._expect_result_ctx: Context = await self.actor.start_remote_task(
self._expect_result = await self.actor.start_remote_task(
self.channel,
nsf=NamespacePath(f'{ns}:{func}'),
kwargs=kwargs,
portal=self,
ns,
func,
kwargs
)
# TODO: we should deprecate this API right? since if we remove
# `.run_in_actor()` (and instead move it to a `.highlevel`
# wrapper api (around a single `.open_context()` call) we don't
# really have any notion of a "main" remote task any more?
#
# @api_frame
async def wait_for_result(
async def _return_once(
self,
hide_tb: bool = True,
) -> Any:
ctx: Context,
) -> dict[str, Any]:
assert ctx._remote_func_type == 'asyncfunc' # single response
msg = await ctx._recv_chan.receive()
return msg
async def result(self) -> Any:
'''
Return the final result delivered by a `Return`-msg from the
remote peer actor's "main" task's `return` statement.
Return the result(s) from the remote actor's "main" task.
'''
__tracebackhide__: bool = hide_tb
# __tracebackhide__ = True
# Check for non-rpc errors slapped on the
# channel for which we always raise
exc = self.channel._exc
@ -169,7 +147,7 @@ class Portal:
raise exc
# not expecting a "main" result
if self._expect_result_ctx is None:
if self._expect_result is None:
log.warning(
f"Portal for {self.channel.uid} not expecting a final"
" result?\nresult() should only be called if subactor"
@ -177,41 +155,14 @@ class Portal:
return NoResult
# expecting a "main" result
assert self._expect_result_ctx
assert self._expect_result
if self._final_result_msg is None:
try:
(
self._final_result_msg,
self._final_result_pld,
) = await self._expect_result_ctx._pld_rx.recv_msg_w_pld(
ipc=self._expect_result_ctx,
expect_msg=Return,
if self._result_msg is None:
self._result_msg = await self._return_once(
self._expect_result
)
except BaseException as err:
# TODO: wrap this into `@api_frame` optionally with
# some kinda filtering mechanism like log levels?
__tracebackhide__: bool = False
raise err
return self._final_result_pld
# TODO: factor this out into a `.highlevel` API-wrapper that uses
# a single `.open_context()` call underneath.
async def result(
self,
*args,
**kwargs,
) -> Any|Exception:
typname: str = type(self).__name__
log.warning(
f'`{typname}.result()` is DEPRECATED!\n'
f'Use `{typname}.wait_for_result()` instead!\n'
)
return await self.wait_for_result(
*args,
**kwargs,
)
return _unwrap_msg(self._result_msg, self.channel)
async def _cancel_streams(self):
# terminate all locally running async generator
@ -242,60 +193,30 @@ class Portal:
) -> bool:
'''
Cancel the actor runtime (and thus process) on the far
end of this portal.
**NOTE** THIS CANCELS THE ENTIRE RUNTIME AND THE
SUBPROCESS, it DOES NOT just cancel the remote task. If you
want to have a handle to cancel a remote ``tri.Task`` look
at `.open_context()` and the definition of
`._context.Context.cancel()` which CAN be used for this
purpose.
Cancel the actor on the other end of this portal.
'''
__runtimeframe__: int = 1 # noqa
chan: Channel = self.channel
if not chan.connected():
log.runtime(
'This channel is already closed, skipping cancel request..'
)
if not self.channel.connected():
log.cancel("This channel is already closed can't cancel")
return False
reminfo: str = (
f'c)=> {self.channel.uid}\n'
f' |_{chan}\n'
)
log.cancel(
f'Requesting actor-runtime cancel for peer\n\n'
f'{reminfo}'
)
f"Sending actor cancel request to {self.channel.uid} on "
f"{self.channel}")
self.channel._cancel_called = True
# XXX the one spot we set it?
self.channel._cancel_called: bool = True
try:
# send cancel cmd - might not get response
# XXX: sure would be nice to make this work with
# a proper shield
with trio.move_on_after(
timeout
or
self.cancel_timeout
) as cs:
cs.shield: bool = True
await self.run_from_ns(
'self',
'cancel',
)
# XXX: sure would be nice to make this work with a proper shield
with trio.move_on_after(timeout or self.cancel_timeout) as cs:
cs.shield = True
await self.run_from_ns('self', 'cancel')
return True
if cs.cancelled_caught:
# may timeout and we never get an ack (obvi racy)
# but that doesn't mean it wasn't cancelled.
log.debug(
'May have failed to cancel peer?\n'
f'{reminfo}'
)
log.cancel(f"May have failed to cancel {self.channel.uid}")
# if we get here some weird cancellation case happened
return False
@ -304,15 +225,11 @@ class Portal:
trio.ClosedResourceError,
trio.BrokenResourceError,
):
log.debug(
'IPC chan for actor already closed or broken?\n\n'
f'{self.channel.uid}\n'
f' |_{self.channel}\n'
)
log.cancel(
f"{self.channel} for {self.channel.uid} was already "
"closed or broken?")
return False
# TODO: do we still need this for low level `Actor`-runtime
# method calls or can we also remove it?
async def run_from_ns(
self,
namespace_path: str,
@ -331,33 +248,25 @@ class Portal:
A special namespace `self` can be used to invoke `Actor`
instance methods in the remote runtime. Currently this
should only ever be used for `Actor` (method) runtime
internals!
should only be used solely for ``tractor`` runtime
internals.
'''
__runtimeframe__: int = 1 # noqa
nsf = NamespacePath(
f'{namespace_path}:{function_name}'
)
ctx: Context = await self.actor.start_remote_task(
chan=self.channel,
nsf=nsf,
kwargs=kwargs,
portal=self,
)
return await ctx._pld_rx.recv_pld(
ipc=ctx,
expect_msg=Return,
ctx = await self.actor.start_remote_task(
self.channel,
namespace_path,
function_name,
kwargs,
)
ctx._portal = self
msg = await self._return_once(ctx)
return _unwrap_msg(msg, self.channel)
# TODO: factor this out into a `.highlevel` API-wrapper that uses
# a single `.open_context()` call underneath.
async def run(
self,
func: str,
fn_name: str|None = None,
fn_name: Optional[str] = None,
**kwargs
) -> Any:
'''
Submit a remote function to be scheduled and run by actor, in
@ -367,8 +276,6 @@ class Portal:
remote rpc task or a local async generator instance.
'''
__runtimeframe__: int = 1 # noqa
if isinstance(func, str):
warnings.warn(
"`Portal.run(namespace: str, funcname: str)` is now"
@ -378,9 +285,8 @@ class Portal:
DeprecationWarning,
stacklevel=2,
)
fn_mod_path: str = func
fn_mod_path = func
assert isinstance(fn_name, str)
nsf = NamespacePath(f'{fn_mod_path}:{fn_name}')
else: # function reference was passed directly
if (
@ -393,36 +299,27 @@ class Portal:
raise TypeError(
f'{func} must be a non-streaming async function!')
nsf = NamespacePath.from_ref(func)
fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple()
ctx = await self.actor.start_remote_task(
self.channel,
nsf=nsf,
kwargs=kwargs,
portal=self,
fn_mod_path,
fn_name,
kwargs,
)
return await ctx._pld_rx.recv_pld(
ipc=ctx,
expect_msg=Return,
ctx._portal = self
return _unwrap_msg(
await self._return_once(ctx),
self.channel,
)
# TODO: factor this out into a `.highlevel` API-wrapper that uses
# a single `.open_context()` call underneath.
@acm
@asynccontextmanager
async def open_stream_from(
self,
async_gen_func: Callable, # typing: ignore
**kwargs,
) -> AsyncGenerator[MsgStream, None]:
'''
Legacy one-way streaming API.
TODO: re-impl on top `Portal.open_context()` + an async gen
around `Context.open_stream()`.
'''
__runtimeframe__: int = 1 # noqa
if not inspect.isasyncgenfunction(async_gen_func):
if not (
@ -432,12 +329,15 @@ class Portal:
raise TypeError(
f'{async_gen_func} must be an async generator function!')
ctx: Context = await self.actor.start_remote_task(
fn_mod_path, fn_name = NamespacePath.from_ref(
async_gen_func).to_tuple()
ctx = await self.actor.start_remote_task(
self.channel,
nsf=NamespacePath.from_ref(async_gen_func),
kwargs=kwargs,
portal=self,
fn_mod_path,
fn_name,
kwargs
)
ctx._portal = self
# ensure receive-only stream entrypoint
assert ctx._remote_func_type == 'asyncgen'
@ -445,14 +345,13 @@ class Portal:
try:
# deliver receive only stream
async with MsgStream(
ctx=ctx,
rx_chan=ctx._rx_chan,
) as stream:
self._streams.add(stream)
ctx._stream = stream
yield stream
ctx, ctx._recv_chan,
) as rchan:
self._streams.add(rchan)
yield rchan
finally:
# cancel the far end task on consumer close
# NOTE: this is a special case since we assume that if using
# this ``.open_fream_from()`` api, the stream is one a one
@ -471,14 +370,177 @@ class Portal:
# XXX: should this always be done?
# await recv_chan.aclose()
self._streams.remove(stream)
self._streams.remove(rchan)
# NOTE: impl is found in `._context`` mod to make
# reading/groking the details simpler code-org-wise. This
# method does not have to be used over that `@acm` module func
# directly, it is for conventience and from the original API
# design.
open_context = open_context_from_portal
@asynccontextmanager
async def open_context(
self,
func: Callable,
**kwargs,
) -> AsyncGenerator[tuple[Context, Any], None]:
'''
Open an inter-actor task context.
This is a synchronous API which allows for deterministic
setup/teardown of a remote task. The yielded ``Context`` further
allows for opening bidirectional streams, explicit cancellation
and synchronized final result collection. See ``tractor.Context``.
'''
# conduct target func method structural checks
if not inspect.iscoroutinefunction(func) and (
getattr(func, '_tractor_contex_function', False)
):
raise TypeError(
f'{func} must be an async generator function!')
fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple()
ctx = await self.actor.start_remote_task(
self.channel,
fn_mod_path,
fn_name,
kwargs
)
assert ctx._remote_func_type == 'context'
msg = await ctx._recv_chan.receive()
try:
# the "first" value here is delivered by the callee's
# ``Context.started()`` call.
first = msg['started']
ctx._started_called = True
except KeyError:
assert msg.get('cid'), ("Received internal error at context?")
if msg.get('error'):
# raise kerr from unpack_error(msg, self.channel)
raise unpack_error(msg, self.channel) from None
else:
raise MessagingError(
f'Context for {ctx.cid} was expecting a `started` message'
f' but received a non-error msg:\n{pformat(msg)}'
)
_err: Optional[BaseException] = None
ctx._portal = self
uid = self.channel.uid
cid = ctx.cid
etype: Optional[Type[BaseException]] = None
# deliver context instance and .started() msg value in open tuple.
try:
async with trio.open_nursery() as scope_nursery:
ctx._scope_nursery = scope_nursery
# do we need this?
# await trio.lowlevel.checkpoint()
yield ctx, first
except ContextCancelled as err:
_err = err
if not ctx._cancel_called:
# context was cancelled at the far end but was
# not part of this end requesting that cancel
# so raise for the local task to respond and handle.
raise
# if the context was cancelled by client code
# then we don't need to raise since user code
# is expecting this and the block should exit.
else:
log.debug(f'Context {ctx} cancelled gracefully')
except (
BaseException,
# more specifically, we need to handle these but not
# sure it's worth being pedantic:
# Exception,
# trio.Cancelled,
# KeyboardInterrupt,
) as err:
etype = type(err)
# the context cancels itself on any cancel
# causing error.
if ctx.chan.connected():
log.cancel(
'Context cancelled for task, sending cancel request..\n'
f'task:{cid}\n'
f'actor:{uid}'
)
await ctx.cancel()
else:
log.warning(
'IPC connection for context is broken?\n'
f'task:{cid}\n'
f'actor:{uid}'
)
raise
finally:
# in the case where a runtime nursery (due to internal bug)
# or a remote actor transmits an error we want to be
# sure we get the error the underlying feeder mem chan.
# if it's not raised here it *should* be raised from the
# msg loop nursery right?
if ctx.chan.connected():
log.info(
'Waiting on final context-task result for\n'
f'task: {cid}\n'
f'actor: {uid}'
)
result = await ctx.result()
log.runtime(
f'Context {fn_name} returned '
f'value from callee `{result}`'
)
# though it should be impossible for any tasks
# operating *in* this scope to have survived
# we tear down the runtime feeder chan last
# to avoid premature stream clobbers.
if ctx._recv_chan is not None:
# should we encapsulate this in the context api?
await ctx._recv_chan.aclose()
if etype:
if ctx._cancel_called:
log.cancel(
f'Context {fn_name} cancelled by caller with\n{etype}'
)
elif _err is not None:
log.cancel(
f'Context for task cancelled by callee with {etype}\n'
f'target: `{fn_name}`\n'
f'task:{cid}\n'
f'actor:{uid}'
)
# XXX: (MEGA IMPORTANT) if this is a root opened process we
# wait for any immediate child in debug before popping the
# context from the runtime msg loop otherwise inside
# ``Actor._push_result()`` the msg will be discarded and in
# the case where that msg is global debugger unlock (via
# a "stop" msg for a stream), this can result in a deadlock
# where the root is waiting on the lock to clear but the
# child has already cleared it and clobbered IPC.
from ._debug import maybe_wait_for_debugger
await maybe_wait_for_debugger()
# remove the context from runtime tracking
self.actor._contexts.pop(
(self.channel.uid, ctx.cid),
None,
)
@dataclass
@ -493,12 +555,7 @@ class LocalPortal:
actor: 'Actor' # type: ignore # noqa
channel: Channel
async def run_from_ns(
self,
ns: str,
func_name: str,
**kwargs,
) -> Any:
async def run_from_ns(self, ns: str, func_name: str, **kwargs) -> Any:
'''
Run a requested local function from a namespace path and
return it's result.
@ -509,11 +566,11 @@ class LocalPortal:
return await func(**kwargs)
@acm
@asynccontextmanager
async def open_portal(
channel: Channel,
tn: trio.Nursery|None = None,
nursery: Optional[trio.Nursery] = None,
start_msg_loop: bool = True,
shield: bool = False,
@ -521,19 +578,15 @@ async def open_portal(
'''
Open a ``Portal`` through the provided ``channel``.
Spawns a background task to handle RPC processing, normally
done by the actor-runtime implicitly via a call to
`._rpc.process_messages()`. just after connection establishment.
Spawns a background task to handle message processing (normally
done by the actor-runtime implicitly).
'''
actor = current_actor()
assert actor
was_connected: bool = False
was_connected = False
async with maybe_open_nursery(
tn,
shield=shield,
) as tn:
async with maybe_open_nursery(nursery, shield=shield) as nursery:
if not channel.connected():
await channel.connect()
@ -542,10 +595,10 @@ async def open_portal(
if channel.uid is None:
await actor._do_handshake(channel)
msg_loop_cs: trio.CancelScope|None = None
msg_loop_cs: Optional[trio.CancelScope] = None
if start_msg_loop:
from ._runtime import process_messages
msg_loop_cs = await tn.start(
msg_loop_cs = await nursery.start(
partial(
process_messages,
actor,
@ -562,10 +615,12 @@ async def open_portal(
await portal.aclose()
if was_connected:
await channel.aclose()
# gracefully signal remote channel-msg loop
await channel.send(None)
# await channel.aclose()
# cancel background msg loop task
if msg_loop_cs is not None:
if msg_loop_cs:
msg_loop_cs.cancel()
tn.cancel_scope.cancel()
nursery.cancel_scope.cancel()

View File

@ -18,28 +18,26 @@
Root actor runtime ignition(s).
'''
from contextlib import asynccontextmanager as acm
from contextlib import asynccontextmanager
from functools import partial
import importlib
import inspect
import logging
import os
import signal
import sys
from typing import Callable
import os
import typing
import warnings
from exceptiongroup import BaseExceptionGroup
import trio
from ._runtime import (
Actor,
Arbiter,
# TODO: rename and make a non-actor subtype?
# Arbiter as Registry,
async_main,
)
from .devx import _debug
from . import _debug
from . import _spawn
from . import _state
from . import log
@ -48,28 +46,22 @@ from ._exceptions import is_multi_cancelled
# set at startup and after forks
_default_host: str = '127.0.0.1'
_default_port: int = 1616
# default registry always on localhost
_default_lo_addrs: list[tuple[str, int]] = [(
_default_host,
_default_port,
)]
_default_arbiter_host: str = '127.0.0.1'
_default_arbiter_port: int = 1616
logger = log.get_logger('tractor')
@acm
@asynccontextmanager
async def open_root_actor(
*,
# defaults are above
registry_addrs: list[tuple[str, int]]|None = None,
arbiter_addr: tuple[str, int] | None = None,
# defaults are above
arbiter_addr: tuple[str, int]|None = None,
registry_addr: tuple[str, int] | None = None,
name: str | None = 'root',
@ -80,8 +72,6 @@ async def open_root_actor(
# enables the multi-process debugger support
debug_mode: bool = False,
maybe_enable_greenback: bool = True, # `.pause_from_sync()/breakpoint()` support
enable_stack_on_sig: bool = False,
# internal logging
loglevel: str | None = None,
@ -89,79 +79,27 @@ async def open_root_actor(
enable_modules: list | None = None,
rpc_module_paths: list | None = None,
# NOTE: allow caller to ensure that only one registry exists
# and that this call creates it.
ensure_registry: bool = False,
hide_tb: bool = True,
) -> Actor:
) -> typing.Any:
'''
Runtime init entry point for ``tractor``.
'''
__tracebackhide__: bool = hide_tb
_debug.hide_runtime_frames()
# TODO: stick this in a `@cm` defined in `devx._debug`?
#
# Override the global debugger hook to make it play nice with
# ``trio``, see much discussion in:
# https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
builtin_bp_handler: Callable = sys.breakpointhook
orig_bp_path: str|None = os.environ.get(
'PYTHONBREAKPOINT',
None,
)
if (
debug_mode
and maybe_enable_greenback
and (
maybe_mod := await _debug.maybe_init_greenback(
raise_not_found=False,
)
)
):
logger.info(
f'Found `greenback` installed @ {maybe_mod}\n'
'Enabling `tractor.pause_from_sync()` support!\n'
)
os.environ['PYTHONBREAKPOINT'] = (
'tractor.devx._debug._sync_pause_from_builtin'
)
_state._runtime_vars['use_greenback'] = True
else:
# TODO: disable `breakpoint()` by default (without
# `greenback`) since it will break any multi-actor
# usage by a clobbered TTY's stdstreams!
def block_bps(*args, **kwargs):
raise RuntimeError(
'Trying to use `breakpoint()` eh?\n\n'
'Welp, `tractor` blocks `breakpoint()` built-in calls by default!\n'
'If you need to use it please install `greenback` and set '
'`debug_mode=True` when opening the runtime '
'(either via `.open_nursery()` or `open_root_actor()`)\n'
)
sys.breakpointhook = block_bps
# lol ok,
# https://docs.python.org/3/library/sys.html#sys.breakpointhook
os.environ['PYTHONBREAKPOINT'] = "0"
builtin_bp_handler = sys.breakpointhook
orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None)
os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace'
# attempt to retreive ``trio``'s sigint handler and stash it
# on our debugger lock state.
_debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT)
_debug.Lock._trio_handler = signal.getsignal(signal.SIGINT)
# mark top most level process as root actor
_state._runtime_vars['_is_root'] = True
# caps based rpc list
enable_modules = (
enable_modules
or
[]
)
enable_modules = enable_modules or []
if rpc_module_paths:
warnings.warn(
@ -177,34 +115,29 @@ async def open_root_actor(
if arbiter_addr is not None:
warnings.warn(
'`arbiter_addr` is now deprecated\n'
'Use `registry_addrs: list[tuple]` instead..',
'`arbiter_addr` is now deprecated and has been renamed to'
'`registry_addr`.\nUse that instead..',
DeprecationWarning,
stacklevel=2,
)
registry_addrs = [arbiter_addr]
registry_addrs: list[tuple[str, int]] = (
registry_addrs
or
_default_lo_addrs
registry_addr = (host, port) = (
registry_addr
or arbiter_addr
or (
_default_arbiter_host,
_default_arbiter_port,
)
)
assert registry_addrs
loglevel = (
loglevel
or log._default_loglevel
).upper()
loglevel = (loglevel or log._default_loglevel).upper()
if (
debug_mode
and _spawn._spawn_method == 'trio'
):
if debug_mode and _spawn._spawn_method == 'trio':
_state._runtime_vars['_debug_mode'] = True
# expose internal debug module to every actor allowing for
# use of ``await tractor.pause()``
enable_modules.append('tractor.devx._debug')
# expose internal debug module to every actor allowing
# for use of ``await tractor.breakpoint()``
enable_modules.append('tractor._debug')
# if debug mode get's enabled *at least* use that level of
# logging for some informative console prompts.
@ -217,178 +150,97 @@ async def open_root_actor(
):
loglevel = 'PDB'
elif debug_mode:
raise RuntimeError(
"Debug mode is only supported for the `trio` backend!"
)
assert loglevel
_log = log.get_console_log(loglevel)
assert _log
log.get_console_log(loglevel)
# TODO: factor this into `.devx._stackscope`!!
if (
debug_mode
and
enable_stack_on_sig
):
from .devx._stackscope import enable_stack_on_sig
enable_stack_on_sig()
# closed into below ping task-func
ponged_addrs: list[tuple[str, int]] = []
async def ping_tpt_socket(
addr: tuple[str, int],
timeout: float = 1,
) -> None:
'''
Attempt temporary connection to see if a registry is
listening at the requested address by a tranport layer
ping.
If a connection can't be made quickly we assume none no
server is listening at that addr.
'''
try:
# TODO: this connect-and-bail forces us to have to
# carefully rewrap TCP 104-connection-reset errors as
# EOF so as to avoid propagating cancel-causing errors
# to the channel-msg loop machinery. Likely it would
# be better to eventually have a "discovery" protocol
# with basic handshake instead?
with trio.move_on_after(timeout):
async with _connect_chan(*addr):
ponged_addrs.append(addr)
# make a temporary connection to see if an arbiter exists,
# if one can't be made quickly we assume none exists.
arbiter_found = False
# TODO: this connect-and-bail forces us to have to carefully
# rewrap TCP 104-connection-reset errors as EOF so as to avoid
# propagating cancel-causing errors to the channel-msg loop
# machinery. Likely it would be better to eventually have
# a "discovery" protocol with basic handshake instead.
with trio.move_on_after(1):
async with _connect_chan(host, port):
arbiter_found = True
except OSError:
# TODO: make this a "discovery" log level?
logger.info(
f'No actor registry found @ {addr}\n'
)
logger.warning(f"No actor registry found @ {host}:{port}")
async with trio.open_nursery() as tn:
for addr in registry_addrs:
tn.start_soon(
ping_tpt_socket,
tuple(addr), # TODO: just drop this requirement?
)
trans_bind_addrs: list[tuple[str, int]] = []
# Create a new local root-actor instance which IS NOT THE
# REGISTRAR
if ponged_addrs:
if ensure_registry:
raise RuntimeError(
f'Failed to open `{name}`@{ponged_addrs}: '
'registry socket(s) already bound'
)
# create a local actor and start up its main routine/task
if arbiter_found:
# we were able to connect to an arbiter
logger.info(
f'Registry(s) seem(s) to exist @ {ponged_addrs}'
)
logger.info(f"Arbiter seems to exist @ {host}:{port}")
actor = Actor(
name=name or 'anonymous',
registry_addrs=ponged_addrs,
name or 'anonymous',
arbiter_addr=registry_addr,
loglevel=loglevel,
enable_modules=enable_modules,
)
# DO NOT use the registry_addrs as the transport server
# addrs for this new non-registar, root-actor.
for host, port in ponged_addrs:
# NOTE: zero triggers dynamic OS port allocation
trans_bind_addrs.append((host, 0))
host, port = (host, 0)
# Start this local actor as the "registrar", aka a regular
# actor who manages the local registry of "mailboxes" of
# other process-tree-local sub-actors.
else:
# start this local actor as the arbiter (aka a regular actor who
# manages the local registry of "mailboxes")
# NOTE that if the current actor IS THE REGISTAR, the
# following init steps are taken:
# - the tranport layer server is bound to each (host, port)
# pair defined in provided registry_addrs, or the default.
trans_bind_addrs = registry_addrs
# - it is normally desirable for any registrar to stay up
# indefinitely until either all registered (child/sub)
# actors are terminated (via SC supervision) or,
# a re-election process has taken place.
# NOTE: all of ^ which is not implemented yet - see:
# https://github.com/goodboy/tractor/issues/216
# https://github.com/goodboy/tractor/pull/348
# https://github.com/goodboy/tractor/issues/296
# Note that if the current actor is the arbiter it is desirable
# for it to stay up indefinitely until a re-election process has
# taken place - which is not implemented yet FYI).
actor = Arbiter(
name or 'registrar',
registry_addrs=registry_addrs,
name or 'arbiter',
arbiter_addr=registry_addr,
loglevel=loglevel,
enable_modules=enable_modules,
)
# Start up main task set via core actor-runtime nurseries.
try:
# assign process-local actor
_state._current_actor = actor
# start local channel-server and fake the portal API
# NOTE: this won't block since we provide the nursery
ml_addrs_str: str = '\n'.join(
f'@{addr}' for addr in trans_bind_addrs
)
logger.info(
f'Starting local {actor.uid} on the following transport addrs:\n'
f'{ml_addrs_str}'
)
logger.info(f"Starting local {actor} @ {host}:{port}")
# start the actor runtime in a new task
async with trio.open_nursery() as nursery:
# ``_runtime.async_main()`` creates an internal nursery
# and blocks here until any underlying actor(-process)
# tree has terminated thereby conducting so called
# "end-to-end" structured concurrency throughout an
# entire hierarchical python sub-process set; all
# "actor runtime" primitives are SC-compat and thus all
# transitively spawned actors/processes must be as
# well.
# ``_runtime.async_main()`` creates an internal nursery and
# thus blocks here until the entire underlying actor tree has
# terminated thereby conducting structured concurrency.
await nursery.start(
partial(
async_main,
actor,
accept_addrs=trans_bind_addrs,
accept_addr=(host, port),
parent_addr=None
)
)
try:
yield actor
except (
Exception,
BaseExceptionGroup,
) as err:
# XXX NOTE XXX see equiv note inside
# `._runtime.Actor._stream_handler()` where in the
# non-root or root-that-opened-this-mahually case we
# wait for the local actor-nursery to exit before
# exiting the transport channel handler.
entered: bool = await _debug._maybe_enter_pm(
err,
api_frame=inspect.currentframe(),
)
if (
not entered
and
not is_multi_cancelled(err)
):
logger.exception('Root actor crashed\n')
# ALWAYS re-raise any error bubbled up from the
# runtime!
entered = await _debug._maybe_enter_pm(err)
if not entered and not is_multi_cancelled(err):
logger.exception("Root actor crashed:")
# always re-raise
raise
finally:
@ -401,29 +253,18 @@ async def open_root_actor(
# for an in nurseries:
# tempn.start_soon(an.exited.wait)
logger.info(
'Closing down root actor'
)
await actor.cancel(None) # self cancel
logger.cancel("Shutting down root actor")
await actor.cancel()
finally:
_state._current_actor = None
_state._last_actor_terminated = actor
# restore built-in `breakpoint()` hook state
if (
debug_mode
and
maybe_enable_greenback
):
if builtin_bp_handler is not None:
# restore breakpoint hook state
sys.breakpointhook = builtin_bp_handler
if orig_bp_path is not None:
os.environ['PYTHONBREAKPOINT'] = orig_bp_path
else:
# clear env back to having no entry
os.environ.pop('PYTHONBREAKPOINT', None)
os.environ.pop('PYTHONBREAKPOINT')
logger.runtime("Root actor terminated")
@ -433,7 +274,10 @@ def run_daemon(
# runtime kwargs
name: str | None = 'root',
registry_addrs: list[tuple[str, int]] = _default_lo_addrs,
registry_addr: tuple[str, int] = (
_default_arbiter_host,
_default_arbiter_port,
),
start_method: str | None = None,
debug_mode: bool = False,
@ -457,7 +301,7 @@ def run_daemon(
async def _main():
async with open_root_actor(
registry_addrs=registry_addrs,
registry_addr=registry_addr,
name=name,
start_method=start_method,
debug_mode=debug_mode,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,833 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
SC friendly shared memory management geared at real-time
processing.
Support for ``numpy`` compatible array-buffers is provided but is
considered optional within the context of this runtime-library.
"""
from __future__ import annotations
from sys import byteorder
import time
from typing import Optional
from multiprocessing import shared_memory as shm
from multiprocessing.shared_memory import (
SharedMemory,
ShareableList,
)
from msgspec import Struct
import tractor
from .log import get_logger
_USE_POSIX = getattr(shm, '_USE_POSIX', False)
if _USE_POSIX:
from _posixshmem import shm_unlink
try:
import numpy as np
from numpy.lib import recfunctions as rfn
# import nptyping
except ImportError:
pass
log = get_logger(__name__)
def disable_mantracker():
'''
Disable all ``multiprocessing``` "resource tracking" machinery since
it's an absolute multi-threaded mess of non-SC madness.
'''
from multiprocessing import resource_tracker as mantracker
# Tell the "resource tracker" thing to fuck off.
class ManTracker(mantracker.ResourceTracker):
def register(self, name, rtype):
pass
def unregister(self, name, rtype):
pass
def ensure_running(self):
pass
# "know your land and know your prey"
# https://www.dailymotion.com/video/x6ozzco
mantracker._resource_tracker = ManTracker()
mantracker.register = mantracker._resource_tracker.register
mantracker.ensure_running = mantracker._resource_tracker.ensure_running
mantracker.unregister = mantracker._resource_tracker.unregister
mantracker.getfd = mantracker._resource_tracker.getfd
disable_mantracker()
class SharedInt:
'''
Wrapper around a single entry shared memory array which
holds an ``int`` value used as an index counter.
'''
def __init__(
self,
shm: SharedMemory,
) -> None:
self._shm = shm
@property
def value(self) -> int:
return int.from_bytes(self._shm.buf, byteorder)
@value.setter
def value(self, value) -> None:
self._shm.buf[:] = value.to_bytes(self._shm.size, byteorder)
def destroy(self) -> None:
if _USE_POSIX:
# We manually unlink to bypass all the "resource tracker"
# nonsense meant for non-SC systems.
name = self._shm.name
try:
shm_unlink(name)
except FileNotFoundError:
# might be a teardown race here?
log.warning(f'Shm for {name} already unlinked?')
class NDToken(Struct, frozen=True):
'''
Internal represenation of a shared memory ``numpy`` array "token"
which can be used to key and load a system (OS) wide shm entry
and correctly read the array by type signature.
This type is msg safe.
'''
shm_name: str # this servers as a "key" value
shm_first_index_name: str
shm_last_index_name: str
dtype_descr: tuple
size: int # in struct-array index / row terms
# TODO: use nptyping here on dtypes
@property
def dtype(self) -> list[tuple[str, str, tuple[int, ...]]]:
return np.dtype(
list(
map(tuple, self.dtype_descr)
)
).descr
def as_msg(self):
return self.to_dict()
@classmethod
def from_msg(cls, msg: dict) -> NDToken:
if isinstance(msg, NDToken):
return msg
# TODO: native struct decoding
# return _token_dec.decode(msg)
msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr']))
return NDToken(**msg)
# _token_dec = msgspec.msgpack.Decoder(NDToken)
# TODO: this api?
# _known_tokens = tractor.ActorVar('_shm_tokens', {})
# _known_tokens = tractor.ContextStack('_known_tokens', )
# _known_tokens = trio.RunVar('shms', {})
# TODO: this should maybe be provided via
# a `.trionics.maybe_open_context()` wrapper factory?
# process-local store of keys to tokens
_known_tokens: dict[str, NDToken] = {}
def get_shm_token(key: str) -> NDToken | None:
'''
Convenience func to check if a token
for the provided key is known by this process.
Returns either the ``numpy`` token or a string for a shared list.
'''
return _known_tokens.get(key)
def _make_token(
key: str,
size: int,
dtype: np.dtype,
) -> NDToken:
'''
Create a serializable token that can be used
to access a shared array.
'''
return NDToken(
shm_name=key,
shm_first_index_name=key + "_first",
shm_last_index_name=key + "_last",
dtype_descr=tuple(np.dtype(dtype).descr),
size=size,
)
class ShmArray:
'''
A shared memory ``numpy.ndarray`` API.
An underlying shared memory buffer is allocated based on
a user specified ``numpy.ndarray``. This fixed size array
can be read and written to by pushing data both onto the "front"
or "back" of a set index range. The indexes for the "first" and
"last" index are themselves stored in shared memory (accessed via
``SharedInt`` interfaces) values such that multiple processes can
interact with the same array using a synchronized-index.
'''
def __init__(
self,
shmarr: np.ndarray,
first: SharedInt,
last: SharedInt,
shm: SharedMemory,
# readonly: bool = True,
) -> None:
self._array = shmarr
# indexes for first and last indices corresponding
# to fille data
self._first = first
self._last = last
self._len = len(shmarr)
self._shm = shm
self._post_init: bool = False
# pushing data does not write the index (aka primary key)
self._write_fields: list[str] | None = None
dtype = shmarr.dtype
if dtype.fields:
self._write_fields = list(shmarr.dtype.fields.keys())[1:]
# TODO: ringbuf api?
@property
def _token(self) -> NDToken:
return NDToken(
shm_name=self._shm.name,
shm_first_index_name=self._first._shm.name,
shm_last_index_name=self._last._shm.name,
dtype_descr=tuple(self._array.dtype.descr),
size=self._len,
)
@property
def token(self) -> dict:
"""Shared memory token that can be serialized and used by
another process to attach to this array.
"""
return self._token.as_msg()
@property
def index(self) -> int:
return self._last.value % self._len
@property
def array(self) -> np.ndarray:
'''
Return an up-to-date ``np.ndarray`` view of the
so-far-written data to the underlying shm buffer.
'''
a = self._array[self._first.value:self._last.value]
# first, last = self._first.value, self._last.value
# a = self._array[first:last]
# TODO: eventually comment this once we've not seen it in the
# wild in a long time..
# XXX: race where first/last indexes cause a reader
# to load an empty array..
if len(a) == 0 and self._post_init:
raise RuntimeError('Empty array race condition hit!?')
# breakpoint()
return a
def ustruct(
self,
fields: Optional[list[str]] = None,
# type that all field values will be cast to
# in the returned view.
common_dtype: np.dtype = float,
) -> np.ndarray:
array = self._array
if fields:
selection = array[fields]
# fcount = len(fields)
else:
selection = array
# fcount = len(array.dtype.fields)
# XXX: manual ``.view()`` attempt that also doesn't work.
# uview = selection.view(
# dtype='<f16',
# ).reshape(-1, 4, order='A')
# assert len(selection) == len(uview)
u = rfn.structured_to_unstructured(
selection,
# dtype=float,
copy=True,
)
# unstruct = np.ndarray(u.shape, dtype=a.dtype, buffer=shm.buf)
# array[:] = a[:]
return u
# return ShmArray(
# shmarr=u,
# first=self._first,
# last=self._last,
# shm=self._shm
# )
def last(
self,
length: int = 1,
) -> np.ndarray:
'''
Return the last ``length``'s worth of ("row") entries from the
array.
'''
return self.array[-length:]
def push(
self,
data: np.ndarray,
field_map: Optional[dict[str, str]] = None,
prepend: bool = False,
update_first: bool = True,
start: int | None = None,
) -> int:
'''
Ring buffer like "push" to append data
into the buffer and return updated "last" index.
NB: no actual ring logic yet to give a "loop around" on overflow
condition, lel.
'''
length = len(data)
if prepend:
index = (start or self._first.value) - length
if index < 0:
raise ValueError(
f'Array size of {self._len} was overrun during prepend.\n'
f'You have passed {abs(index)} too many datums.'
)
else:
index = start if start is not None else self._last.value
end = index + length
if field_map:
src_names, dst_names = zip(*field_map.items())
else:
dst_names = src_names = self._write_fields
try:
self._array[
list(dst_names)
][index:end] = data[list(src_names)][:]
# NOTE: there was a race here between updating
# the first and last indices and when the next reader
# tries to access ``.array`` (which due to the index
# overlap will be empty). Pretty sure we've fixed it now
# but leaving this here as a reminder.
if (
prepend
and update_first
and length
):
assert index < self._first.value
if (
index < self._first.value
and update_first
):
assert prepend, 'prepend=True not passed but index decreased?'
self._first.value = index
elif not prepend:
self._last.value = end
self._post_init = True
return end
except ValueError as err:
if field_map:
raise
# should raise if diff detected
self.diff_err_fields(data)
raise err
def diff_err_fields(
self,
data: np.ndarray,
) -> None:
# reraise with any field discrepancy
our_fields, their_fields = (
set(self._array.dtype.fields),
set(data.dtype.fields),
)
only_in_ours = our_fields - their_fields
only_in_theirs = their_fields - our_fields
if only_in_ours:
raise TypeError(
f"Input array is missing field(s): {only_in_ours}"
)
elif only_in_theirs:
raise TypeError(
f"Input array has unknown field(s): {only_in_theirs}"
)
# TODO: support "silent" prepends that don't update ._first.value?
def prepend(
self,
data: np.ndarray,
) -> int:
end = self.push(data, prepend=True)
assert end
def close(self) -> None:
self._first._shm.close()
self._last._shm.close()
self._shm.close()
def destroy(self) -> None:
if _USE_POSIX:
# We manually unlink to bypass all the "resource tracker"
# nonsense meant for non-SC systems.
shm_unlink(self._shm.name)
self._first.destroy()
self._last.destroy()
def flush(self) -> None:
# TODO: flush to storage backend like markestore?
...
def open_shm_ndarray(
size: int,
key: str | None = None,
dtype: np.dtype | None = None,
append_start_index: int | None = None,
readonly: bool = False,
) -> ShmArray:
'''
Open a memory shared ``numpy`` using the standard library.
This call unlinks (aka permanently destroys) the buffer on teardown
and thus should be used from the parent-most accessor (process).
'''
# create new shared mem segment for which we
# have write permission
a = np.zeros(size, dtype=dtype)
a['index'] = np.arange(len(a))
shm = SharedMemory(
name=key,
create=True,
size=a.nbytes
)
array = np.ndarray(
a.shape,
dtype=a.dtype,
buffer=shm.buf
)
array[:] = a[:]
array.setflags(write=int(not readonly))
token = _make_token(
key=key,
size=size,
dtype=dtype,
)
# create single entry arrays for storing an first and last indices
first = SharedInt(
shm=SharedMemory(
name=token.shm_first_index_name,
create=True,
size=4, # std int
)
)
last = SharedInt(
shm=SharedMemory(
name=token.shm_last_index_name,
create=True,
size=4, # std int
)
)
# Start the "real-time" append-updated (or "pushed-to") section
# after some start index: ``append_start_index``. This allows appending
# from a start point in the array which isn't the 0 index and looks
# something like,
# -------------------------
# | | i
# _________________________
# <-------------> <------->
# history real-time
#
# Once fully "prepended", the history section will leave the
# ``ShmArray._start.value: int = 0`` and the yet-to-be written
# real-time section will start at ``ShmArray.index: int``.
# this sets the index to nearly 2/3rds into the the length of
# the buffer leaving at least a "days worth of second samples"
# for the real-time section.
if append_start_index is None:
append_start_index = round(size * 0.616)
last.value = first.value = append_start_index
shmarr = ShmArray(
array,
first,
last,
shm,
)
assert shmarr._token == token
_known_tokens[key] = shmarr.token
# "unlink" created shm on process teardown by
# pushing teardown calls onto actor context stack
stack = tractor.current_actor().lifetime_stack
stack.callback(shmarr.close)
stack.callback(shmarr.destroy)
return shmarr
def attach_shm_ndarray(
token: tuple[str, str, tuple[str, str]],
readonly: bool = True,
) -> ShmArray:
'''
Attach to an existing shared memory array previously
created by another process using ``open_shared_array``.
No new shared mem is allocated but wrapper types for read/write
access are constructed.
'''
token = NDToken.from_msg(token)
key = token.shm_name
if key in _known_tokens:
assert NDToken.from_msg(_known_tokens[key]) == token, "WTF"
# XXX: ugh, looks like due to the ``shm_open()`` C api we can't
# actually place files in a subdir, see discussion here:
# https://stackoverflow.com/a/11103289
# attach to array buffer and view as per dtype
_err: Optional[Exception] = None
for _ in range(3):
try:
shm = SharedMemory(
name=key,
create=False,
)
break
except OSError as oserr:
_err = oserr
time.sleep(0.1)
else:
if _err:
raise _err
shmarr = np.ndarray(
(token.size,),
dtype=token.dtype,
buffer=shm.buf
)
shmarr.setflags(write=int(not readonly))
first = SharedInt(
shm=SharedMemory(
name=token.shm_first_index_name,
create=False,
size=4, # std int
),
)
last = SharedInt(
shm=SharedMemory(
name=token.shm_last_index_name,
create=False,
size=4, # std int
),
)
# make sure we can read
first.value
sha = ShmArray(
shmarr,
first,
last,
shm,
)
# read test
sha.array
# Stash key -> token knowledge for future queries
# via `maybe_opepn_shm_array()` but only after we know
# we can attach.
if key not in _known_tokens:
_known_tokens[key] = token
# "close" attached shm on actor teardown
tractor.current_actor().lifetime_stack.callback(sha.close)
return sha
def maybe_open_shm_ndarray(
key: str, # unique identifier for segment
size: int,
dtype: np.dtype | None = None,
append_start_index: int = 0,
readonly: bool = True,
) -> tuple[ShmArray, bool]:
'''
Attempt to attach to a shared memory block using a "key" lookup
to registered blocks in the users overall "system" registry
(presumes you don't have the block's explicit token).
This function is meant to solve the problem of discovering whether
a shared array token has been allocated or discovered by the actor
running in **this** process. Systems where multiple actors may seek
to access a common block can use this function to attempt to acquire
a token as discovered by the actors who have previously stored
a "key" -> ``NDToken`` map in an actor local (aka python global)
variable.
If you know the explicit ``NDToken`` for your memory segment instead
use ``attach_shm_array``.
'''
try:
# see if we already know this key
token = _known_tokens[key]
return (
attach_shm_ndarray(
token=token,
readonly=readonly,
),
False, # not newly opened
)
except KeyError:
log.warning(f"Could not find {key} in shms cache")
if dtype:
token = _make_token(
key,
size=size,
dtype=dtype,
)
else:
try:
return (
attach_shm_ndarray(
token=token,
readonly=readonly,
),
False,
)
except FileNotFoundError:
log.warning(f"Could not attach to shm with token {token}")
# This actor does not know about memory
# associated with the provided "key".
# Attempt to open a block and expect
# to fail if a block has been allocated
# on the OS by someone else.
return (
open_shm_ndarray(
key=key,
size=size,
dtype=dtype,
append_start_index=append_start_index,
readonly=readonly,
),
True,
)
class ShmList(ShareableList):
'''
Carbon copy of ``.shared_memory.ShareableList`` with a few
enhancements:
- readonly mode via instance var flag `._readonly: bool`
- ``.__getitem__()`` accepts ``slice`` inputs
- exposes the underlying buffer "name" as a ``.key: str``
'''
def __init__(
self,
sequence: list | None = None,
*,
name: str | None = None,
readonly: bool = True
) -> None:
self._readonly = readonly
self._key = name
return super().__init__(
sequence=sequence,
name=name,
)
@property
def key(self) -> str:
return self._key
@property
def readonly(self) -> bool:
return self._readonly
def __setitem__(
self,
position,
value,
) -> None:
# mimick ``numpy`` error
if self._readonly:
raise ValueError('assignment destination is read-only')
return super().__setitem__(position, value)
def __getitem__(
self,
indexish,
) -> list:
# NOTE: this is a non-writeable view (copy?) of the buffer
# in a new list instance.
if isinstance(indexish, slice):
return list(self)[indexish]
return super().__getitem__(indexish)
# TODO: should we offer a `.array` and `.push()` equivalent
# to the `ShmArray`?
# currently we have the following limitations:
# - can't write slices of input using traditional slice-assign
# syntax due to the ``ShareableList.__setitem__()`` implementation.
# - ``list(shmlist)`` returns a non-mutable copy instead of
# a writeable view which would be handier numpy-style ops.
def open_shm_list(
key: str,
sequence: list | None = None,
size: int = int(2 ** 10),
dtype: float | int | bool | str | bytes | None = float,
readonly: bool = True,
) -> ShmList:
if sequence is None:
default = {
float: 0.,
int: 0,
bool: True,
str: 'doggy',
None: None,
}[dtype]
sequence = [default] * size
shml = ShmList(
sequence=sequence,
name=key,
readonly=readonly,
)
# "close" attached shm on actor teardown
try:
actor = tractor.current_actor()
actor.lifetime_stack.callback(shml.shm.close)
actor.lifetime_stack.callback(shml.shm.unlink)
except RuntimeError:
log.warning('tractor runtime not active, skipping teardown steps')
return shml
def attach_shm_list(
key: str,
readonly: bool = False,
) -> ShmList:
return ShmList(
name=key,
readonly=readonly,
)

View File

@ -19,7 +19,6 @@ Machinery for actor process spawning using multiple backends.
"""
from __future__ import annotations
import multiprocessing as mp
import sys
import platform
from typing import (
@ -31,32 +30,30 @@ from typing import (
TYPE_CHECKING,
)
from exceptiongroup import BaseExceptionGroup
import trio
from trio import TaskStatus
from trio_typing import TaskStatus
from tractor.devx import (
from ._debug import (
maybe_wait_for_debugger,
acquire_debug_lock,
)
from tractor._state import (
from ._state import (
current_actor,
is_main_process,
is_root_process,
debug_mode,
_runtime_vars,
)
from tractor.log import get_logger
from tractor._portal import Portal
from tractor._runtime import Actor
from tractor._entry import _mp_main
from tractor._exceptions import ActorFailure
from tractor.msg.types import (
SpawnSpec,
)
from .log import get_logger
from ._portal import Portal
from ._runtime import Actor
from ._entry import _mp_main
from ._exceptions import ActorFailure
if TYPE_CHECKING:
from ._supervise import ActorNursery
import multiprocessing as mp
ProcessType = TypeVar('ProcessType', mp.Process, trio.Process)
log = get_logger('tractor')
@ -73,6 +70,7 @@ _spawn_method: SpawnMethodKey = 'trio'
if platform.system() == 'Windows':
import multiprocessing as mp
_ctx = mp.get_context("spawn")
async def proc_waiter(proc: mp.Process) -> None:
@ -143,13 +141,11 @@ async def exhaust_portal(
'''
__tracebackhide__ = True
try:
log.debug(
f'Waiting on final result from {actor.uid}'
)
log.debug(f"Waiting on final result from {actor.uid}")
# XXX: streams should never be reaped here since they should
# always be established and shutdown using a context manager api
final: Any = await portal.wait_for_result()
final = await portal.result()
except (
Exception,
@ -157,23 +153,13 @@ async def exhaust_portal(
) as err:
# we reraise in the parent task via a ``BaseExceptionGroup``
return err
except trio.Cancelled as err:
# lol, of course we need this too ;P
# TODO: merge with above?
log.warning(
'Cancelled portal result waiter task:\n'
f'uid: {portal.channel.uid}\n'
f'error: {err}\n'
)
log.warning(f"Cancelled result waiter for {portal.actor.uid}")
return err
else:
log.debug(
f'Returning final result from portal:\n'
f'uid: {portal.channel.uid}\n'
f'result: {final}\n'
)
log.debug(f"Returning final result: {final}")
return final
@ -185,75 +171,41 @@ async def cancel_on_completion(
) -> None:
'''
Cancel actor gracefully once its "main" portal's
Cancel actor gracefully once it's "main" portal's
result arrives.
Should only be called for actors spawned via the
`Portal.run_in_actor()` API.
=> and really this API will be deprecated and should be
re-implemented as a `.hilevel.one_shot_task_nursery()`..)
Should only be called for actors spawned with `run_in_actor()`.
'''
# if this call errors we store the exception for later
# in ``errors`` which will be reraised inside
# an exception group and we still send out a cancel request
result: Any|Exception = await exhaust_portal(
portal,
actor,
)
result = await exhaust_portal(portal, actor)
if isinstance(result, Exception):
errors[actor.uid]: Exception = result
log.cancel(
'Cancelling subactor runtime due to error:\n\n'
f'Portal.cancel_actor() => {portal.channel.uid}\n\n'
f'error: {result}\n'
errors[actor.uid] = result
log.warning(
f"Cancelling {portal.channel.uid} after error {result}"
)
else:
log.runtime(
'Cancelling subactor gracefully:\n\n'
f'Portal.cancel_actor() => {portal.channel.uid}\n\n'
f'result: {result}\n'
)
f"Cancelling {portal.channel.uid} gracefully "
f"after result {result}")
# cancel the process now that we have a final result
await portal.cancel_actor()
async def hard_kill(
async def do_hard_kill(
proc: trio.Process,
terminate_after: int = 1.6,
# NOTE: for mucking with `.pause()`-ing inside the runtime
# whilst also hacking on it XD
# terminate_after: int = 99999,
terminate_after: int = 3,
) -> None:
'''
Un-gracefully terminate an OS level `trio.Process` after timeout.
Used in 2 main cases:
- "unknown remote runtime state": a hanging/stalled actor that
isn't responding after sending a (graceful) runtime cancel
request via an IPC msg.
- "cancelled during spawn": a process who's actor runtime was
cancelled before full startup completed (such that
cancel-request-handling machinery was never fully
initialized) and thus a "cancel request msg" is never going
to be handled.
'''
log.cancel(
'Terminating sub-proc\n'
f'>x)\n'
f' |_{proc}\n'
)
# NOTE: this timeout used to do nothing since we were shielding
# the ``.wait()`` inside ``new_proc()`` which will pretty much
# never release until the process exits, now it acts as
# a hard-kill time ultimatum.
log.debug(f"Terminating {proc}")
with trio.move_on_after(terminate_after) as cs:
# NOTE: code below was copied verbatim from the now deprecated
@ -264,9 +216,6 @@ async def hard_kill(
# and wait for it to exit. If cancelled, kills the process and
# waits for it to finish exiting before propagating the
# cancellation.
#
# This code was originally triggred by ``proc.__aexit__()``
# but now must be called manually.
with trio.CancelScope(shield=True):
if proc.stdin is not None:
await proc.stdin.aclose()
@ -282,25 +231,16 @@ async def hard_kill(
with trio.CancelScope(shield=True):
await proc.wait()
# XXX NOTE XXX: zombie squad dispatch:
# (should ideally never, but) If we do get here it means
# graceful termination of a process failed and we need to
# resort to OS level signalling to interrupt and cancel the
# (presumably stalled or hung) actor. Since we never allow
# zombies (as a feature) we ask the OS to do send in the
# removal swad as the last resort.
if cs.cancelled_caught:
# TODO: toss in the skynet-logo face as ascii art?
log.critical(
# 'Well, the #ZOMBIE_LORD_IS_HERE# to collect\n'
'#T-800 deployed to collect zombie B0\n'
f'>x)\n'
f' |_{proc}\n'
)
# XXX: should pretty much never get here unless we have
# to move the bits from ``proc.__aexit__()`` out and
# into here.
log.critical(f"#ZOMBIE_LORD_IS_HERE: {proc}")
proc.kill()
async def soft_kill(
async def soft_wait(
proc: ProcessType,
wait_func: Callable[
[ProcessType],
@ -309,40 +249,15 @@ async def soft_kill(
portal: Portal,
) -> None:
'''
Wait for proc termination but **don't yet** teardown
std-streams since it will clobber any ongoing pdb REPL
session.
This is our "soft"/graceful, and thus itself also cancellable,
join/reap on an actor-runtime-in-process shutdown; it is
**not** the same as a "hard kill" via an OS signal (for that
see `.hard_kill()`).
'''
uid: tuple[str, str] = portal.channel.uid
# Wait for proc termination but **dont' yet** call
# ``trio.Process.__aexit__()`` (it tears down stdio
# which will kill any waiting remote pdb trace).
# This is a "soft" (cancellable) join/reap.
uid = portal.channel.uid
try:
log.cancel(
'Soft killing sub-actor via portal request\n'
f'c)> {portal.chan.uid}\n'
f' |_{proc}\n'
)
# wait on sub-proc to signal termination
log.cancel(f'Soft waiting on actor:\n{uid}')
await wait_func(proc)
except trio.Cancelled:
with trio.CancelScope(shield=True):
await maybe_wait_for_debugger(
child_in_debug=_runtime_vars.get(
'_debug_mode', False
),
header_msg=(
'Delaying `soft_kill()` subproc reaper while debugger locked..\n'
),
# TODO: need a diff value then default?
# poll_steps=9999999,
)
# if cancelled during a soft wait, cancel the child
# actor before entering the hard reap sequence
# below. This means we try to do a graceful teardown
@ -353,29 +268,22 @@ async def soft_kill(
async def cancel_on_proc_deth():
'''
"Cancel-the-cancel" request: if we detect that the
underlying sub-process exited prior to
a `Portal.cancel_actor()` call completing .
Cancel the actor cancel request if we detect that
that the process terminated.
'''
await wait_func(proc)
n.cancel_scope.cancel()
# start a task to wait on the termination of the
# process by itself waiting on a (caller provided) wait
# function which should unblock when the target process
# has terminated.
n.start_soon(cancel_on_proc_deth)
# send the actor-runtime a cancel request.
await portal.cancel_actor()
if proc.poll() is None: # type: ignore
log.warning(
'Subactor still alive after cancel request?\n\n'
f'uid: {uid}\n'
f'|_{proc}\n'
'Actor still alive after cancel request:\n'
f'{uid}'
)
n.cancel_scope.cancel()
raise
@ -387,7 +295,7 @@ async def new_proc(
errors: dict[tuple[str, str], Exception],
# passed through to actor main
bind_addrs: list[tuple[str, int]],
bind_addr: tuple[str, int],
parent_addr: tuple[str, int],
_runtime_vars: dict[str, Any], # serialized and sent to _child
@ -399,7 +307,7 @@ async def new_proc(
) -> None:
# lookup backend spawning target
target: Callable = _methods[_spawn_method]
target = _methods[_spawn_method]
# mark the new actor with the global spawn method
subactor._spawn_method = _spawn_method
@ -409,7 +317,7 @@ async def new_proc(
actor_nursery,
subactor,
errors,
bind_addrs,
bind_addr,
parent_addr,
_runtime_vars, # run time vars
infect_asyncio=infect_asyncio,
@ -424,7 +332,7 @@ async def trio_proc(
errors: dict[tuple[str, str], Exception],
# passed through to actor main
bind_addrs: list[tuple[str, int]],
bind_addr: tuple[str, int],
parent_addr: tuple[str, int],
_runtime_vars: dict[str, Any], # serialized and sent to _child
*,
@ -470,18 +378,16 @@ async def trio_proc(
proc: trio.Process | None = None
try:
try:
proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd)
log.runtime(
'Started new child\n'
f'|_{proc}\n'
)
# TODO: needs ``trio_typing`` patch?
proc = await trio.lowlevel.open_process(spawn_cmd)
log.runtime(f"Started {proc}")
# wait for actor to spawn and connect back to us
# channel should have handshake completed by the
# local actor by the time we get a ref to it
event, chan = await actor_nursery._actor.wait_for_peer(
subactor.uid
)
subactor.uid)
except trio.Cancelled:
cancelled_during_spawn = True
@ -510,20 +416,18 @@ async def trio_proc(
portal,
)
# send a "spawning specification" which configures the
# initial runtime state of the child.
await chan.send(
SpawnSpec(
_parent_main_data=subactor._parent_main_data,
enable_modules=subactor.enable_modules,
reg_addrs=subactor.reg_addrs,
bind_addrs=bind_addrs,
_runtime_vars=_runtime_vars,
)
)
# send additional init params
await chan.send({
"_parent_main_data": subactor._parent_main_data,
"enable_modules": subactor.enable_modules,
"_arb_addr": subactor._arb_addr,
"bind_host": bind_addr[0],
"bind_port": bind_addr[1],
"_runtime_vars": _runtime_vars,
})
# track subactor in current nursery
curr_actor: Actor = current_actor()
curr_actor = current_actor()
curr_actor._actoruid2nursery[subactor.uid] = actor_nursery
# resume caller at next checkpoint now that child is up
@ -545,7 +449,7 @@ async def trio_proc(
# This is a "soft" (cancellable) join/reap which
# will remote cancel the actor on a ``trio.Cancelled``
# condition.
await soft_kill(
await soft_wait(
proc,
trio.Process.wait,
portal
@ -553,11 +457,9 @@ async def trio_proc(
# cancel result waiter that may have been spawned in
# tandem if not done already
log.cancel(
'Cancelling portal result reaper task\n'
f'>c)\n'
f' |_{subactor.uid}\n'
)
log.warning(
"Cancelling existing result waiter task for "
f"{subactor.uid}")
nursery.cancel_scope.cancel()
finally:
@ -565,13 +467,9 @@ async def trio_proc(
# allowed! Do this **after** cancellation/teardown to avoid
# killing the process too early.
if proc:
log.cancel(
f'Hard reap sequence starting for subactor\n'
f'>x)\n'
f' |_{subactor}@{subactor.uid}\n'
)
log.cancel(f'Hard reap sequence starting for {subactor.uid}')
with trio.CancelScope(shield=True):
# don't clobber an ongoing pdb
if cancelled_during_spawn:
# Try again to avoid TTY clobbering.
@ -579,17 +477,7 @@ async def trio_proc(
with trio.move_on_after(0.5):
await proc.wait()
await maybe_wait_for_debugger(
child_in_debug=_runtime_vars.get(
'_debug_mode', False
),
header_msg=(
'Delaying subproc reaper while debugger locked..\n'
),
# TODO: need a diff value then default?
# poll_steps=9999999,
)
if is_root_process():
# TODO: solve the following issue where we need
# to do a similar wait like this but in an
# "intermediary" parent actor that itself isn't
@ -597,22 +485,14 @@ async def trio_proc(
# to hold off on relaying SIGINT until that child
# is complete.
# https://github.com/goodboy/tractor/issues/320
# -[ ] we need to handle non-root parent-actors specially
# by somehow determining if a child is in debug and then
# avoiding cancel/kill of said child by this
# (intermediary) parent until such a time as the root says
# the pdb lock is released and we are good to tear down
# (our children)..
#
# -[ ] so maybe something like this where we try to
# acquire the lock and get notified of who has it,
# check that uid against our known children?
# this_uid: tuple[str, str] = current_actor().uid
# await acquire_debug_lock(this_uid)
await maybe_wait_for_debugger(
child_in_debug=_runtime_vars.get(
'_debug_mode', False),
)
if proc.poll() is None:
log.cancel(f"Attempting to hard kill {proc}")
await hard_kill(proc)
await do_hard_kill(proc)
log.debug(f"Joined {proc}")
else:
@ -630,7 +510,7 @@ async def mp_proc(
subactor: Actor,
errors: dict[tuple[str, str], Exception],
# passed through to actor main
bind_addrs: list[tuple[str, int]],
bind_addr: tuple[str, int],
parent_addr: tuple[str, int],
_runtime_vars: dict[str, Any], # serialized and sent to _child
*,
@ -688,7 +568,7 @@ async def mp_proc(
target=_mp_main,
args=(
subactor,
bind_addrs,
bind_addr,
fs_info,
_spawn_method,
parent_addr,
@ -756,7 +636,7 @@ async def mp_proc(
# This is a "soft" (cancellable) join/reap which
# will remote cancel the actor on a ``trio.Cancelled``
# condition.
await soft_kill(
await soft_wait(
proc,
proc_waiter,
portal

View File

@ -18,127 +18,46 @@
Per process state
"""
from __future__ import annotations
from contextvars import (
ContextVar,
)
from typing import (
Optional,
Any,
TYPE_CHECKING,
)
from trio.lowlevel import current_task
import trio
if TYPE_CHECKING:
from ._runtime import Actor
from ._context import Context
from ._exceptions import NoRuntime
_current_actor: Actor|None = None # type: ignore # noqa
_last_actor_terminated: Actor|None = None
# TODO: mk this a `msgspec.Struct`!
_current_actor: Optional['Actor'] = None # type: ignore # noqa
_runtime_vars: dict[str, Any] = {
'_debug_mode': False,
'_is_root': False,
'_root_mailbox': (None, None),
'_registry_addrs': [],
'_is_infected_aio': False,
# for `tractor.pause_from_sync()` & `breakpoint()` support
'use_greenback': False,
'_root_mailbox': (None, None)
}
def last_actor() -> Actor|None:
'''
Try to return last active `Actor` singleton
for this process.
For case where runtime already exited but someone is asking
about the "last" actor probably to get its `.uid: tuple`.
'''
return _last_actor_terminated
def current_actor(
err_on_no_runtime: bool = True,
) -> Actor:
'''
Get the process-local actor instance.
'''
if (
err_on_no_runtime
and
_current_actor is None
):
msg: str = 'No local actor has been initialized yet?\n'
from ._exceptions import NoRuntime
if last := last_actor():
msg += (
f'Apparently the lact active actor was\n'
f'|_{last}\n'
f'|_{last.uid}\n'
)
# no actor runtime has (as of yet) ever been started for
# this process.
else:
msg += (
# 'No last actor found?\n'
'\nDid you forget to call one of,\n'
'- `tractor.open_root_actor()`\n'
'- `tractor.open_nursery()`\n'
)
raise NoRuntime(msg)
def current_actor(err_on_no_runtime: bool = True) -> 'Actor': # type: ignore # noqa
"""Get the process-local actor instance.
"""
if _current_actor is None and err_on_no_runtime:
raise NoRuntime("No local actor has been initialized yet")
return _current_actor
def is_main_process() -> bool:
'''
Bool determining if this actor is running in the top-most process.
'''
"""Bool determining if this actor is running in the top-most process.
"""
import multiprocessing as mp
return mp.current_process().name == 'MainProcess'
def debug_mode() -> bool:
'''
Bool determining if "debug mode" is on which enables
"""Bool determining if "debug mode" is on which enables
remote subactor pdb entry on crashes.
'''
"""
return bool(_runtime_vars['_debug_mode'])
def is_root_process() -> bool:
return _runtime_vars['_is_root']
_ctxvar_Context: ContextVar[Context] = ContextVar(
'ipc_context',
default=None,
)
def current_ipc_ctx(
error_on_not_set: bool = False,
) -> Context|None:
ctx: Context = _ctxvar_Context.get()
if (
not ctx
and error_on_not_set
):
from ._exceptions import InternalError
raise InternalError(
'No IPC context has been allocated for this task yet?\n'
f'|_{current_task()}\n'
)
return ctx

File diff suppressed because it is too large Load Diff

View File

@ -21,22 +21,22 @@
from contextlib import asynccontextmanager as acm
from functools import partial
import inspect
from pprint import pformat
from typing import TYPE_CHECKING
from typing import (
Optional,
TYPE_CHECKING,
)
import typing
import warnings
from exceptiongroup import BaseExceptionGroup
import trio
from .devx._debug import maybe_wait_for_debugger
from ._debug import maybe_wait_for_debugger
from ._state import current_actor, is_main_process
from .log import get_logger, get_loglevel
from ._runtime import Actor
from ._portal import Portal
from ._exceptions import (
is_multi_cancelled,
ContextCancelled,
)
from ._exceptions import is_multi_cancelled
from ._root import open_root_actor
from . import _state
from . import _spawn
@ -80,85 +80,54 @@ class ActorNursery:
'''
def __init__(
self,
# TODO: maybe def these as fields of a struct looking type?
actor: Actor,
ria_nursery: trio.Nursery,
da_nursery: trio.Nursery,
errors: dict[tuple[str, str], BaseException],
) -> None:
# self.supervisor = supervisor # TODO
self._actor: Actor = actor
# TODO: rename to `._tn` for our conventional "task-nursery"
self._ria_nursery = ria_nursery
self._da_nursery = da_nursery
self._children: dict[
tuple[str, str],
tuple[
Actor,
trio.Process | mp.Process,
Portal | None,
Optional[Portal],
]
] = {}
# portals spawned with ``run_in_actor()`` are
# cancelled when their "main" result arrives
self._cancel_after_result_on_exit: set = set()
self.cancelled: bool = False
self._join_procs = trio.Event()
self._at_least_one_child_in_debug: bool = False
self.errors = errors
self._scope_error: BaseException|None = None
self.exited = trio.Event()
# NOTE: when no explicit call is made to
# `.open_root_actor()` by application code,
# `.open_nursery()` will implicitly call it to start the
# actor-tree runtime. In this case we mark ourselves as
# such so that runtime components can be aware for logging
# and syncing purposes to any actor opened nurseries.
self._implicit_runtime_started: bool = False
# TODO: remove the `.run_in_actor()` API and thus this 2ndary
# nursery when that API get's moved outside this primitive!
self._ria_nursery = ria_nursery
# portals spawned with ``run_in_actor()`` are
# cancelled when their "main" result arrives
self._cancel_after_result_on_exit: set = set()
async def start_actor(
self,
name: str,
*,
bind_addrs: list[tuple[str, int]] = [_default_bind_addr],
bind_addr: tuple[str, int] = _default_bind_addr,
rpc_module_paths: list[str] | None = None,
enable_modules: list[str] | None = None,
loglevel: str | None = None, # set log level per subactor
debug_mode: bool|None = None,
infect_asyncio: bool = False,
# TODO: ideally we can rm this once we no longer have
# a `._ria_nursery` since the dependent APIs have been
# removed!
nursery: trio.Nursery | None = None,
debug_mode: Optional[bool] | None = None,
infect_asyncio: bool = False,
) -> Portal:
'''
Start a (daemon) actor: an process that has no designated
"main task" besides the runtime.
'''
__runtimeframe__: int = 1 # noqa
loglevel: str = (
loglevel
or self._actor.loglevel
or get_loglevel()
)
loglevel = loglevel or self._actor.loglevel or get_loglevel()
# configure and pass runtime state
_rtv = _state._runtime_vars.copy()
_rtv['_is_root'] = False
_rtv['_is_infected_aio'] = infect_asyncio
# allow setting debug policy per actor
if debug_mode is not None:
@ -181,16 +150,14 @@ class ActorNursery:
# modules allowed to invoked funcs from
enable_modules=enable_modules,
loglevel=loglevel,
# verbatim relay this actor's registrar addresses
registry_addrs=current_actor().reg_addrs,
arbiter_addr=current_actor()._arb_addr,
)
parent_addr = self._actor.accept_addr
assert parent_addr
# start a task to spawn a process
# blocks until process has been started and a portal setup
nursery: trio.Nursery = nursery or self._da_nursery
nursery = nursery or self._da_nursery
# XXX: the type ignore is actually due to a `mypy` bug
return await nursery.start( # type: ignore
@ -200,29 +167,21 @@ class ActorNursery:
self,
subactor,
self.errors,
bind_addrs,
bind_addr,
parent_addr,
_rtv, # run time vars
infect_asyncio=infect_asyncio,
)
)
# TODO: DEPRECATE THIS:
# -[ ] impl instead as a hilevel wrapper on
# top of a `@context` style invocation.
# |_ dynamic @context decoration on child side
# |_ implicit `Portal.open_context() as (ctx, first):`
# and `return first` on parent side.
# |_ mention how it's similar to `trio-parallel` API?
# -[ ] use @api_frame on the wrapper
async def run_in_actor(
self,
fn: typing.Callable,
*,
name: str | None = None,
bind_addrs: tuple[str, int] = [_default_bind_addr],
name: Optional[str] = None,
bind_addr: tuple[str, int] = _default_bind_addr,
rpc_module_paths: list[str] | None = None,
enable_modules: list[str] | None = None,
loglevel: str | None = None, # set log level per subactor
@ -231,28 +190,25 @@ class ActorNursery:
**kwargs, # explicit args to ``fn``
) -> Portal:
'''
Spawn a new actor, run a lone task, then terminate the actor and
"""Spawn a new actor, run a lone task, then terminate the actor and
return its result.
Actors spawned using this method are kept alive at nursery teardown
until the task spawned by executing ``fn`` completes at which point
the actor is terminated.
'''
__runtimeframe__: int = 1 # noqa
mod_path: str = fn.__module__
"""
mod_path = fn.__module__
if name is None:
# use the explicit function name if not provided
name = fn.__name__
portal: Portal = await self.start_actor(
portal = await self.start_actor(
name,
enable_modules=[mod_path] + (
enable_modules or rpc_module_paths or []
),
bind_addrs=bind_addrs,
bind_addr=bind_addr,
loglevel=loglevel,
# use the run_in_actor nursery
nursery=self._ria_nursery,
@ -276,42 +232,21 @@ class ActorNursery:
)
return portal
# @api_frame
async def cancel(
self,
hard_kill: bool = False,
async def cancel(self, hard_kill: bool = False) -> None:
"""Cancel this nursery by instructing each subactor to cancel
itself and wait for all subactors to terminate.
) -> None:
'''
Cancel this actor-nursery by instructing each subactor's
runtime to cancel and wait for all underlying sub-processes
to terminate.
If `hard_kill` is set then kill the processes directly using
the spawning-backend's API/OS-machinery without any attempt
at (graceful) `trio`-style cancellation using our
`Actor.cancel()`.
'''
__runtimeframe__: int = 1 # noqa
If ``hard_killl`` is set to ``True`` then kill the processes
directly without any far end graceful ``trio`` cancellation.
"""
self.cancelled = True
# TODO: impl a repr for spawn more compact
# then `._children`..
children: dict = self._children
child_count: int = len(children)
msg: str = f'Cancelling actor nursery with {child_count} children\n'
log.cancel(f"Cancelling nursery in {self._actor.uid}")
with trio.move_on_after(3) as cs:
async with trio.open_nursery() as tn:
subactor: Actor
proc: trio.Process
portal: Portal
for (
subactor,
proc,
portal,
) in children.values():
async with trio.open_nursery() as nursery:
for subactor, proc, portal in self._children.values():
# TODO: are we ever even going to use this or
# is the spawning backend responsible for such
@ -323,13 +258,12 @@ class ActorNursery:
if portal is None: # actor hasn't fully spawned yet
event = self._actor._peer_connected[subactor.uid]
log.warning(
f"{subactor.uid} never 't finished spawning?"
)
f"{subactor.uid} wasn't finished spawning?")
await event.wait()
# channel/portal should now be up
_, _, portal = children[subactor.uid]
_, _, portal = self._children[subactor.uid]
# XXX should be impossible to get here
# unless method was called from within
@ -346,24 +280,14 @@ class ActorNursery:
# spawn cancel tasks for each sub-actor
assert portal
if portal.channel.connected():
tn.start_soon(portal.cancel_actor)
nursery.start_soon(portal.cancel_actor)
log.cancel(msg)
# if we cancelled the cancel (we hung cancelling remote actors)
# then hard kill all sub-processes
if cs.cancelled_caught:
log.error(
f'Failed to cancel {self}?\n'
'Hard killing underlying subprocess tree!\n'
)
subactor: Actor
proc: trio.Process
portal: Portal
for (
subactor,
proc,
portal,
) in children.values():
f"Failed to cancel {self}\nHard killing process tree!")
for subactor, proc, portal in self._children.values():
log.warning(f"Hard killing process {proc}")
proc.terminate()
@ -374,15 +298,11 @@ class ActorNursery:
@acm
async def _open_and_supervise_one_cancels_all_nursery(
actor: Actor,
tb_hide: bool = False,
) -> typing.AsyncGenerator[ActorNursery, None]:
# normally don't need to show user by default
__tracebackhide__: bool = tb_hide
outer_err: BaseException|None = None
inner_err: BaseException|None = None
# TODO: yay or nay?
__tracebackhide__ = True
# the collection of errors retreived from spawned sub-actors
errors: dict[tuple[str, str], BaseException] = {}
@ -392,7 +312,7 @@ async def _open_and_supervise_one_cancels_all_nursery(
# handling errors that are generated by the inner nursery in
# a supervisor strategy **before** blocking indefinitely to wait for
# actors spawned in "daemon mode" (aka started using
# `ActorNursery.start_actor()`).
# ``ActorNursery.start_actor()``).
# errors from this daemon actor nursery bubble up to caller
async with trio.open_nursery() as da_nursery:
@ -407,7 +327,7 @@ async def _open_and_supervise_one_cancels_all_nursery(
# the above "daemon actor" nursery will be notified.
async with trio.open_nursery() as ria_nursery:
an = ActorNursery(
anursery = ActorNursery(
actor,
ria_nursery,
da_nursery,
@ -416,19 +336,18 @@ async def _open_and_supervise_one_cancels_all_nursery(
try:
# spawning of actors happens in the caller's scope
# after we yield upwards
yield an
yield anursery
# When we didn't error in the caller's scope,
# signal all process-monitor-tasks to conduct
# the "hard join phase".
log.runtime(
'Waiting on subactors to complete:\n'
f'{pformat(an._children)}\n'
f"Waiting on subactors {anursery._children} "
"to complete"
)
an._join_procs.set()
anursery._join_procs.set()
except BaseException as _inner_err:
inner_err = _inner_err
except BaseException as inner_err:
errors[actor.uid] = inner_err
# If we error in the root but the debugger is
@ -438,60 +357,37 @@ async def _open_and_supervise_one_cancels_all_nursery(
# Instead try to wait for pdb to be released before
# tearing down.
await maybe_wait_for_debugger(
child_in_debug=an._at_least_one_child_in_debug
child_in_debug=anursery._at_least_one_child_in_debug
)
# if the caller's scope errored then we activate our
# one-cancels-all supervisor strategy (don't
# worry more are coming).
an._join_procs.set()
anursery._join_procs.set()
# XXX NOTE XXX: hypothetically an error could
# be raised and then a cancel signal shows up
# XXX: hypothetically an error could be
# raised and then a cancel signal shows up
# slightly after in which case the `else:`
# block here might not complete? For now,
# shield both.
with trio.CancelScope(shield=True):
etype: type = type(inner_err)
etype = type(inner_err)
if etype in (
trio.Cancelled,
KeyboardInterrupt,
KeyboardInterrupt
) or (
is_multi_cancelled(inner_err)
):
log.cancel(
f'Actor-nursery cancelled by {etype}\n\n'
f'{current_actor().uid}\n'
f' |_{an}\n\n'
# TODO: show tb str?
# f'{tb_str}'
)
elif etype in {
ContextCancelled,
}:
log.cancel(
'Actor-nursery caught remote cancellation\n\n'
f'{inner_err.tb_str}'
)
f"Nursery for {current_actor().uid} "
f"was cancelled with {etype}")
else:
log.exception(
'Nursery errored with:\n'
# TODO: same thing as in
# `._invoke()` to compute how to
# place this div-line in the
# middle of the above msg
# content..
# -[ ] prolly helper-func it too
# in our `.log` module..
# '------ - ------'
)
f"Nursery for {current_actor().uid} "
f"errored with")
# cancel all subactors
await an.cancel()
await anursery.cancel()
# ria_nursery scope end
@ -506,30 +402,24 @@ async def _open_and_supervise_one_cancels_all_nursery(
Exception,
BaseExceptionGroup,
trio.Cancelled
) as _outer_err:
outer_err = _outer_err
an._scope_error = outer_err or inner_err
) as err:
# XXX: yet another guard before allowing the cancel
# sequence in case a (single) child is in debug.
await maybe_wait_for_debugger(
child_in_debug=an._at_least_one_child_in_debug
child_in_debug=anursery._at_least_one_child_in_debug
)
# If actor-local error was raised while waiting on
# ".run_in_actor()" actors then we also want to cancel all
# remaining sub-actors (due to our lone strategy:
# one-cancels-all).
if an._children:
log.cancel(
'Actor-nursery cancelling due error type:\n'
f'{outer_err}\n'
)
log.cancel(f"Nursery cancelling due to {err}")
if anursery._children:
with trio.CancelScope(shield=True):
await an.cancel()
await anursery.cancel()
raise
finally:
# No errors were raised while awaiting ".run_in_actor()"
# actors but those actors may have returned remote errors as
@ -538,9 +428,9 @@ async def _open_and_supervise_one_cancels_all_nursery(
# collected in ``errors`` so cancel all actors, summarize
# all errors and re-raise.
if errors:
if an._children:
if anursery._children:
with trio.CancelScope(shield=True):
await an.cancel()
await anursery.cancel()
# use `BaseExceptionGroup` as needed
if len(errors) > 1:
@ -551,19 +441,11 @@ async def _open_and_supervise_one_cancels_all_nursery(
else:
raise list(errors.values())[0]
# show frame on any (likely) internal error
if (
not an.cancelled
and an._scope_error
):
__tracebackhide__: bool = False
# da_nursery scope end - nursery checkpoint
# final exit
@acm
# @api_frame
async def open_nursery(
**kwargs,
@ -583,21 +465,19 @@ async def open_nursery(
which cancellation scopes correspond to each spawned subactor set.
'''
__tracebackhide__: bool = True
implicit_runtime: bool = False
actor: Actor = current_actor(err_on_no_runtime=False)
an: ActorNursery|None = None
implicit_runtime = False
actor = current_actor(err_on_no_runtime=False)
try:
if (
actor is None
and is_main_process()
):
if actor is None and is_main_process():
# if we are the parent process start the
# actor runtime implicitly
log.info("Starting actor runtime!")
# mark us for teardown on exit
implicit_runtime: bool = True
implicit_runtime = True
async with open_root_actor(**kwargs) as actor:
assert actor is current_actor()
@ -605,54 +485,24 @@ async def open_nursery(
try:
async with _open_and_supervise_one_cancels_all_nursery(
actor
) as an:
# NOTE: mark this nursery as having
# implicitly started the root actor so
# that `._runtime` machinery can avoid
# certain teardown synchronization
# blocking/waits and any associated (warn)
# logging when it's known that this
# nursery shouldn't be exited before the
# root actor is.
an._implicit_runtime_started = True
yield an
) as anursery:
yield anursery
finally:
# XXX: this event will be set after the root actor
# runtime is already torn down, so we want to
# avoid any blocking on it.
an.exited.set()
anursery.exited.set()
else: # sub-nursery case
try:
async with _open_and_supervise_one_cancels_all_nursery(
actor
) as an:
yield an
) as anursery:
yield anursery
finally:
an.exited.set()
anursery.exited.set()
finally:
# show frame on any internal runtime-scope error
if (
an
and not an.cancelled
and an._scope_error
):
__tracebackhide__: bool = False
msg: str = (
'Actor-nursery exited\n'
f'|_{an}\n'
)
log.debug("Nursery teardown complete")
# shutdown runtime if it was started
if implicit_runtime:
# shutdown runtime if it was started and report noisly
# that we're did so.
msg += '=> Shutting down actor runtime <=\n'
log.info(msg)
else:
# keep noise low during std operation.
log.runtime(msg)
log.info("Shutting down actor tree")

View File

@ -1,96 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Various helpers/utils for auditing your `tractor` app and/or the
core runtime.
'''
from contextlib import asynccontextmanager as acm
import pathlib
import tractor
from .pytest import (
tractor_test as tractor_test
)
from .fault_simulation import (
break_ipc as break_ipc,
)
def repodir() -> pathlib.Path:
'''
Return the abspath to the repo directory.
'''
# 2 parents up to step up through tests/<repo_dir>
return pathlib.Path(
__file__
# 3 .parents bc:
# <._testing-pkg>.<tractor-pkg>.<git-repo-dir>
# /$HOME/../<tractor-repo-dir>/tractor/_testing/__init__.py
).parent.parent.parent.absolute()
def examples_dir() -> pathlib.Path:
'''
Return the abspath to the examples directory as `pathlib.Path`.
'''
return repodir() / 'examples'
def mk_cmd(
ex_name: str,
exs_subpath: str = 'debugging',
) -> str:
'''
Generate a shell command suitable to pass to ``pexpect.spawn()``.
'''
script_path: pathlib.Path = (
examples_dir()
/ exs_subpath
/ f'{ex_name}.py'
)
return ' '.join([
'python',
str(script_path)
])
@acm
async def expect_ctxc(
yay: bool,
reraise: bool = False,
) -> None:
'''
Small acm to catch `ContextCancelled` errors when expected
below it in a `async with ()` block.
'''
if yay:
try:
yield
raise RuntimeError('Never raised ctxc?')
except tractor.ContextCancelled:
if reraise:
raise
else:
return
else:
yield

View File

@ -1,92 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
`pytest` utils helpers and plugins for testing `tractor`'s runtime
and applications.
'''
from tractor import (
MsgStream,
)
async def break_ipc(
stream: MsgStream,
method: str|None = None,
pre_close: bool = False,
def_method: str = 'socket_close',
) -> None:
'''
XXX: close the channel right after an error is raised
purposely breaking the IPC transport to make sure the parent
doesn't get stuck in debug or hang on the connection join.
this more or less simulates an infinite msg-receive hang on
the other end.
'''
# close channel via IPC prot msging before
# any transport breakage
if pre_close:
await stream.aclose()
method: str = method or def_method
print(
'#################################\n'
'Simulating CHILD-side IPC BREAK!\n'
f'method: {method}\n'
f'pre `.aclose()`: {pre_close}\n'
'#################################\n'
)
match method:
case 'socket_close':
await stream._ctx.chan.transport.stream.aclose()
case 'socket_eof':
# NOTE: `trio` does the following underneath this
# call in `src/trio/_highlevel_socket.py`:
# `Stream.socket.shutdown(tsocket.SHUT_WR)`
await stream._ctx.chan.transport.stream.send_eof()
# TODO: remove since now this will be invalid with our
# new typed msg spec?
# case 'msg':
# await stream._ctx.chan.send(None)
# TODO: the actual real-world simulated cases like
# transport layer hangs and/or lower layer 2-gens type
# scenarios..
#
# -[ ] already have some issues for this general testing
# area:
# - https://github.com/goodboy/tractor/issues/97
# - https://github.com/goodboy/tractor/issues/124
# - PR from @guille:
# https://github.com/goodboy/tractor/pull/149
# case 'hang':
# TODO: framework research:
#
# - https://github.com/GuoTengda1993/pynetem
# - https://github.com/shopify/toxiproxy
# - https://manpages.ubuntu.com/manpages/trusty/man1/wirefilter.1.html
case _:
raise RuntimeError(
f'IPC break method unsupported: {method}'
)

View File

@ -1,113 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
`pytest` utils helpers and plugins for testing `tractor`'s runtime
and applications.
'''
from functools import (
partial,
wraps,
)
import inspect
import platform
import tractor
import trio
def tractor_test(fn):
'''
Decorator for async test funcs to present them as "native"
looking sync funcs runnable by `pytest` using `trio.run()`.
Use:
@tractor_test
async def test_whatever():
await ...
If fixtures:
- ``reg_addr`` (a socket addr tuple where arbiter is listening)
- ``loglevel`` (logging level passed to tractor internals)
- ``start_method`` (subprocess spawning backend)
are defined in the `pytest` fixture space they will be automatically
injected to tests declaring these funcargs.
'''
@wraps(fn)
def wrapper(
*args,
loglevel=None,
reg_addr=None,
start_method: str|None = None,
debug_mode: bool = False,
**kwargs
):
# __tracebackhide__ = True
# NOTE: inject ant test func declared fixture
# names by manually checking!
if 'reg_addr' in inspect.signature(fn).parameters:
# injects test suite fixture value to test as well
# as `run()`
kwargs['reg_addr'] = reg_addr
if 'loglevel' in inspect.signature(fn).parameters:
# allows test suites to define a 'loglevel' fixture
# that activates the internal logging
kwargs['loglevel'] = loglevel
if start_method is None:
if platform.system() == "Windows":
start_method = 'trio'
if 'start_method' in inspect.signature(fn).parameters:
# set of subprocess spawning backends
kwargs['start_method'] = start_method
if 'debug_mode' in inspect.signature(fn).parameters:
# set of subprocess spawning backends
kwargs['debug_mode'] = debug_mode
if kwargs:
# use explicit root actor start
async def _main():
async with tractor.open_root_actor(
# **kwargs,
registry_addrs=[reg_addr] if reg_addr else None,
loglevel=loglevel,
start_method=start_method,
# TODO: only enable when pytest is passed --pdb
debug_mode=debug_mode,
):
await fn(*args, **kwargs)
main = _main
else:
# use implicit root actor start
main = partial(fn, *args, **kwargs)
return trio.run(main)
return wrapper

View File

@ -1,43 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Runtime "developer experience" utils and addons to aid our
(advanced) users and core devs in building distributed applications
and working with/on the actor runtime.
"""
from ._debug import (
maybe_wait_for_debugger as maybe_wait_for_debugger,
acquire_debug_lock as acquire_debug_lock,
breakpoint as breakpoint,
pause as pause,
pause_from_sync as pause_from_sync,
sigint_shield as sigint_shield,
open_crash_handler as open_crash_handler,
maybe_open_crash_handler as maybe_open_crash_handler,
maybe_init_greenback as maybe_init_greenback,
post_mortem as post_mortem,
mk_pdb as mk_pdb,
)
from ._stackscope import (
enable_stack_on_sig as enable_stack_on_sig,
)
from .pformat import (
add_div as add_div,
pformat_caller_frame as pformat_caller_frame,
pformat_boxed_tb as pformat_boxed_tb,
)

File diff suppressed because it is too large Load Diff

View File

@ -1,288 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Tools for code-object annotation, introspection and mutation
as it pertains to improving the grok-ability of our runtime!
'''
from __future__ import annotations
from functools import partial
import inspect
from types import (
FrameType,
FunctionType,
MethodType,
# CodeType,
)
from typing import (
Any,
Callable,
Type,
)
from tractor.msg import (
pretty_struct,
NamespacePath,
)
import wrapt
# TODO: yeah, i don't love this and we should prolly just
# write a decorator that actually keeps a stupid ref to the func
# obj..
def get_class_from_frame(fr: FrameType) -> (
FunctionType
|MethodType
):
'''
Attempt to get the function (or method) reference
from a given `FrameType`.
Verbatim from an SO:
https://stackoverflow.com/a/2220759
'''
args, _, _, value_dict = inspect.getargvalues(fr)
# we check the first parameter for the frame function is
# named 'self'
if (
len(args)
and
# TODO: other cases for `@classmethod` etc..?)
args[0] == 'self'
):
# in that case, 'self' will be referenced in value_dict
instance: object = value_dict.get('self')
if instance:
# return its class
return getattr(
instance,
'__class__',
None,
)
# return None otherwise
return None
def get_ns_and_func_from_frame(
frame: FrameType,
) -> Callable:
'''
Return the corresponding function object reference from
a `FrameType`, and return it and it's parent namespace `dict`.
'''
ns: dict[str, Any]
# for a method, go up a frame and lookup the name in locals()
if '.' in (qualname := frame.f_code.co_qualname):
cls_name, _, func_name = qualname.partition('.')
ns = frame.f_back.f_locals[cls_name].__dict__
else:
func_name: str = frame.f_code.co_name
ns = frame.f_globals
return (
ns,
ns[func_name],
)
def func_ref_from_frame(
frame: FrameType,
) -> Callable:
func_name: str = frame.f_code.co_name
try:
return frame.f_globals[func_name]
except KeyError:
cls: Type|None = get_class_from_frame(frame)
if cls:
return getattr(
cls,
func_name,
)
class CallerInfo(pretty_struct.Struct):
# https://docs.python.org/dev/reference/datamodel.html#frame-objects
# https://docs.python.org/dev/library/inspect.html#the-interpreter-stack
_api_frame: FrameType
@property
def api_frame(self) -> FrameType:
try:
self._api_frame.clear()
except RuntimeError:
# log.warning(
print(
f'Frame {self._api_frame} for {self.api_func} is still active!'
)
return self._api_frame
_api_func: Callable
@property
def api_func(self) -> Callable:
return self._api_func
_caller_frames_up: int|None = 1
_caller_frame: FrameType|None = None # cached after first stack scan
@property
def api_nsp(self) -> NamespacePath|None:
func: FunctionType = self.api_func
if func:
return NamespacePath.from_ref(func)
return '<unknown>'
@property
def caller_frame(self) -> FrameType:
# if not already cached, scan up stack explicitly by
# configured count.
if not self._caller_frame:
if self._caller_frames_up:
for _ in range(self._caller_frames_up):
caller_frame: FrameType|None = self.api_frame.f_back
if not caller_frame:
raise ValueError(
'No frame exists {self._caller_frames_up} up from\n'
f'{self.api_frame} @ {self.api_nsp}\n'
)
self._caller_frame = caller_frame
return self._caller_frame
@property
def caller_nsp(self) -> NamespacePath|None:
func: FunctionType = self.api_func
if func:
return NamespacePath.from_ref(func)
return '<unknown>'
def find_caller_info(
dunder_var: str = '__runtimeframe__',
iframes:int = 1,
check_frame_depth: bool = True,
) -> CallerInfo|None:
'''
Scan up the callstack for a frame with a `dunder_var: str` variable
and return the `iframes` frames above it.
By default we scan for a `__runtimeframe__` scope var which
denotes a `tractor` API above which (one frame up) is "user
app code" which "called into" the `tractor` method or func.
TODO: ex with `Portal.open_context()`
'''
# TODO: use this instead?
# https://docs.python.org/3/library/inspect.html#inspect.getouterframes
frames: list[inspect.FrameInfo] = inspect.stack()
for fi in frames:
assert (
fi.function
==
fi.frame.f_code.co_name
)
this_frame: FrameType = fi.frame
dunder_val: int|None = this_frame.f_locals.get(dunder_var)
if dunder_val:
go_up_iframes: int = (
dunder_val # could be 0 or `True` i guess?
or
iframes
)
rt_frame: FrameType = fi.frame
call_frame = rt_frame
for i in range(go_up_iframes):
call_frame = call_frame.f_back
return CallerInfo(
_api_frame=rt_frame,
_api_func=func_ref_from_frame(rt_frame),
_caller_frames_up=go_up_iframes,
)
return None
_frame2callerinfo_cache: dict[FrameType, CallerInfo] = {}
# TODO: -[x] move all this into new `.devx._code`!
# -[ ] consider rename to _callstack?
# -[ ] prolly create a `@runtime_api` dec?
# |_ @api_frame seems better?
# -[ ] ^- make it capture and/or accept buncha optional
# meta-data like a fancier version of `@pdbp.hideframe`.
#
def api_frame(
wrapped: Callable|None = None,
*,
caller_frames_up: int = 1,
) -> Callable:
# handle the decorator called WITHOUT () case,
# i.e. just @api_frame, NOT @api_frame(extra=<blah>)
if wrapped is None:
return partial(
api_frame,
caller_frames_up=caller_frames_up,
)
@wrapt.decorator
async def wrapper(
wrapped: Callable,
instance: object,
args: tuple,
kwargs: dict,
):
# maybe cache the API frame for this call
global _frame2callerinfo_cache
this_frame: FrameType = inspect.currentframe()
api_frame: FrameType = this_frame.f_back
if not _frame2callerinfo_cache.get(api_frame):
_frame2callerinfo_cache[api_frame] = CallerInfo(
_api_frame=api_frame,
_api_func=wrapped,
_caller_frames_up=caller_frames_up,
)
return wrapped(*args, **kwargs)
# annotate the function as a "api function", meaning it is
# a function for which the function above it in the call stack should be
# non-`tractor` code aka "user code".
#
# in the global frame cache for easy lookup from a given
# func-instance
wrapped._call_infos: dict[FrameType, CallerInfo] = _frame2callerinfo_cache
wrapped.__api_func__: bool = True
return wrapper(wrapped)

View File

@ -1,239 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
The fundamental cross process SC abstraction: an inter-actor,
cancel-scope linked task "context".
A ``Context`` is very similar to the ``trio.Nursery.cancel_scope`` built
into each ``trio.Nursery`` except it links the lifetimes of memory space
disjoint, parallel executing tasks in separate actors.
'''
from __future__ import annotations
# from functools import partial
from threading import (
current_thread,
Thread,
RLock,
)
import multiprocessing as mp
from signal import (
signal,
getsignal,
SIGUSR1,
)
# import traceback
from types import ModuleType
from typing import (
Callable,
TYPE_CHECKING,
)
import trio
from tractor import (
_state,
log as logmod,
)
log = logmod.get_logger(__name__)
if TYPE_CHECKING:
from tractor._spawn import ProcessType
from tractor import (
Actor,
ActorNursery,
)
@trio.lowlevel.disable_ki_protection
def dump_task_tree() -> None:
'''
Do a classic `stackscope.extract()` task-tree dump to console at
`.devx()` level.
'''
import stackscope
tree_str: str = str(
stackscope.extract(
trio.lowlevel.current_root_task(),
recurse_child_tasks=True
)
)
actor: Actor = _state.current_actor()
thr: Thread = current_thread()
log.devx(
f'Dumping `stackscope` tree for actor\n'
f'{actor.uid}:\n'
f'|_{mp.current_process()}\n'
f' |_{thr}\n'
f' |_{actor}\n\n'
# start-of-trace-tree delimiter (mostly for testing)
'------ - ------\n'
'\n'
+
f'{tree_str}\n'
+
# end-of-trace-tree delimiter (mostly for testing)
f'\n'
f'------ {actor.uid!r} ------\n'
)
# TODO: can remove this right?
# -[ ] was original code from author
#
# print(
# 'DUMPING FROM PRINT\n'
# +
# content
# )
# import logging
# try:
# with open("/dev/tty", "w") as tty:
# tty.write(tree_str)
# except BaseException:
# logging.getLogger(
# "task_tree"
# ).exception("Error printing task tree")
_handler_lock = RLock()
_tree_dumped: bool = False
def dump_tree_on_sig(
sig: int,
frame: object,
relay_to_subs: bool = True,
) -> None:
global _tree_dumped, _handler_lock
with _handler_lock:
if _tree_dumped:
log.warning(
'Already dumped for this actor...??'
)
return
_tree_dumped = True
# actor: Actor = _state.current_actor()
log.devx(
'Trying to dump `stackscope` tree..\n'
)
try:
dump_task_tree()
# await actor._service_n.start_soon(
# partial(
# trio.to_thread.run_sync,
# dump_task_tree,
# )
# )
# trio.lowlevel.current_trio_token().run_sync_soon(
# dump_task_tree
# )
except RuntimeError:
log.exception(
'Failed to dump `stackscope` tree..\n'
)
# not in async context -- print a normal traceback
# traceback.print_stack()
raise
except BaseException:
log.exception(
'Failed to dump `stackscope` tree..\n'
)
raise
log.devx(
'Supposedly we dumped just fine..?'
)
if not relay_to_subs:
return
an: ActorNursery
for an in _state.current_actor()._actoruid2nursery.values():
subproc: ProcessType
subactor: Actor
for subactor, subproc, _ in an._children.values():
log.warning(
f'Relaying `SIGUSR1`[{sig}] to sub-actor\n'
f'{subactor}\n'
f' |_{subproc}\n'
)
# bc of course stdlib can't have a std API.. XD
match subproc:
case trio.Process():
subproc.send_signal(sig)
case mp.Process():
subproc._send_signal(sig)
def enable_stack_on_sig(
sig: int = SIGUSR1,
) -> ModuleType:
'''
Enable `stackscope` tracing on reception of a signal; by
default this is SIGUSR1.
HOT TIP: a task/ctx-tree dump can be triggered from a shell with
fancy cmds.
For ex. from `bash` using `pgrep` and cmd-sustitution
(https://www.gnu.org/software/bash/manual/bash.html#Command-Substitution)
you could use:
>> kill -SIGUSR1 $(pgrep -f '<cmd>')
Or with with `xonsh` (which has diff capture-from-subproc syntax)
>> kill -SIGUSR1 @$(pgrep -f '<cmd>')
'''
try:
import stackscope
except ImportError:
log.warning(
'`stackscope` not installed for use in debug mode!'
)
return None
handler: Callable|int = getsignal(sig)
if handler is dump_tree_on_sig:
log.devx(
'A `SIGUSR1` handler already exists?\n'
f'|_ {handler!r}\n'
)
return
signal(
sig,
dump_tree_on_sig,
)
log.devx(
'Enabling trace-trees on `SIGUSR1` '
'since `stackscope` is installed @ \n'
f'{stackscope!r}\n\n'
f'With `SIGUSR1` handler\n'
f'|_{dump_tree_on_sig}\n'
)
return stackscope

View File

@ -1,129 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
CLI framework extensions for hacking on the actor runtime.
Currently popular frameworks supported are:
- `typer` via the `@callback` API
"""
from __future__ import annotations
from typing import (
Any,
Callable,
)
from typing_extensions import Annotated
import typer
_runtime_vars: dict[str, Any] = {}
def load_runtime_vars(
ctx: typer.Context,
callback: Callable,
pdb: bool = False, # --pdb
ll: Annotated[
str,
typer.Option(
'--loglevel',
'-l',
help='BigD logging level',
),
] = 'cancel', # -l info
):
'''
Maybe engage crash handling with `pdbp` when code inside
a `typer` CLI endpoint cmd raises.
To use this callback simply take your `app = typer.Typer()` instance
and decorate this function with it like so:
.. code:: python
from tractor.devx import cli
app = typer.Typer()
# manual decoration to hook into `click`'s context system!
cli.load_runtime_vars = app.callback(
invoke_without_command=True,
)
And then you can use the now augmented `click` CLI context as so,
.. code:: python
@app.command(
context_settings={
"allow_extra_args": True,
"ignore_unknown_options": True,
}
)
def my_cli_cmd(
ctx: typer.Context,
):
rtvars: dict = ctx.runtime_vars
pdb: bool = rtvars['pdb']
with tractor.devx.cli.maybe_open_crash_handler(pdb=pdb):
trio.run(
partial(
my_tractor_main_task_func,
debug_mode=pdb,
loglevel=rtvars['ll'],
)
)
which will enable log level and debug mode globally for the entire
`tractor` + `trio` runtime thereafter!
Bo
'''
global _runtime_vars
_runtime_vars |= {
'pdb': pdb,
'll': ll,
}
ctx.runtime_vars: dict[str, Any] = _runtime_vars
print(
f'`typer` sub-cmd: {ctx.invoked_subcommand}\n'
f'`tractor` runtime vars: {_runtime_vars}'
)
# XXX NOTE XXX: hackzone.. if no sub-cmd is specified (the
# default if the user just invokes `bigd`) then we simply
# invoke the sole `_bigd()` cmd passing in the "parent"
# typer.Context directly to that call since we're treating it
# as a "non sub-command" or wtv..
# TODO: ideally typer would have some kinda built-in way to get
# this behaviour without having to construct and manually
# invoke our own cmd..
if (
ctx.invoked_subcommand is None
or ctx.invoked_subcommand == callback.__name__
):
cmd: typer.core.TyperCommand = typer.core.TyperCommand(
name='bigd',
callback=callback,
)
ctx.params = {'ctx': ctx}
cmd.invoke(ctx)

View File

@ -1,168 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Pretty formatters for use throughout the code base.
Mostly handy for logging and exception message content.
'''
import textwrap
import traceback
from trio import CancelScope
def add_div(
message: str,
div_str: str = '------ - ------',
) -> str:
'''
Add a "divider string" to the input `message` with
a little math to center it underneath.
'''
div_offset: int = (
round(len(message)/2)+1
-
round(len(div_str)/2)+1
)
div_str: str = (
'\n' + ' '*div_offset + f'{div_str}\n'
)
return div_str
def pformat_boxed_tb(
tb_str: str,
fields_str: str|None = None,
field_prefix: str = ' |_',
tb_box_indent: int|None = None,
tb_body_indent: int = 1,
) -> str:
'''
Create a "boxed" looking traceback string.
Useful for emphasizing traceback text content as being an
embedded attribute of some other object (like
a `RemoteActorError` or other boxing remote error shuttle
container).
Any other parent/container "fields" can be passed in the
`fields_str` input along with other prefix/indent settings.
'''
if (
fields_str
and
field_prefix
):
fields: str = textwrap.indent(
fields_str,
prefix=field_prefix,
)
else:
fields = fields_str or ''
tb_body = tb_str
if tb_body_indent:
tb_body: str = textwrap.indent(
tb_str,
prefix=tb_body_indent * ' ',
)
tb_box: str = (
f'|\n'
f' ------ - ------\n'
f'{tb_body}'
f' ------ - ------\n'
f'_|\n'
)
tb_box_indent: str = (
tb_box_indent
or
1
# (len(field_prefix))
# ? ^-TODO-^ ? if you wanted another indent level
)
if tb_box_indent > 0:
tb_box: str = textwrap.indent(
tb_box,
prefix=tb_box_indent * ' ',
)
return (
fields
+
tb_box
)
def pformat_caller_frame(
stack_limit: int = 1,
box_tb: bool = True,
) -> str:
'''
Capture and return the traceback text content from
`stack_limit` call frames up.
'''
tb_str: str = (
'\n'.join(
traceback.format_stack(limit=stack_limit)
)
)
if box_tb:
tb_str: str = pformat_boxed_tb(
tb_str=tb_str,
field_prefix=' ',
indent='',
)
return tb_str
def pformat_cs(
cs: CancelScope,
var_name: str = 'cs',
field_prefix: str = ' |_',
) -> str:
'''
Pretty format info about a `trio.CancelScope` including most
of its public state and `._cancel_status`.
The output can be modified to show a "var name" for the
instance as a field prefix, just a simple str before each
line more or less.
'''
fields: str = textwrap.indent(
(
f'cancel_called = {cs.cancel_called}\n'
f'cancelled_caught = {cs.cancelled_caught}\n'
f'_cancel_status = {cs._cancel_status}\n'
f'shield = {cs.shield}\n'
),
prefix=field_prefix,
)
return (
f'{var_name}: {cs}\n'
+
fields
)

View File

@ -31,13 +31,13 @@ from typing import (
Callable,
)
from functools import partial
from contextlib import aclosing
from async_generator import aclosing
import trio
import wrapt
from ..log import get_logger
from .._context import Context
from .._streaming import Context
__all__ = ['pub']
@ -148,8 +148,7 @@ def pub(
*,
tasks: set[str] = set(),
):
'''
Publisher async generator decorator.
"""Publisher async generator decorator.
A publisher can be called multiple times from different actors but
will only spawn a finite set of internal tasks to stream values to
@ -228,8 +227,7 @@ def pub(
running in a single actor to stream data to an arbitrary number of
subscribers. If you are ok to have a new task running for every call
to ``pub_service()`` then probably don't need this.
'''
"""
global _pubtask2lock
# handle the decorator not called with () case

View File

@ -21,11 +21,6 @@ Log like a forester!
from collections.abc import Mapping
import sys
import logging
from logging import (
LoggerAdapter,
Logger,
StreamHandler,
)
import colorlog # type: ignore
import trio
@ -53,20 +48,17 @@ LOG_FORMAT = (
DATE_FORMAT = '%b %d %H:%M:%S'
# FYI, ERROR is 40
# TODO: use a `bidict` to avoid the :155 check?
CUSTOM_LEVELS: dict[str, int] = {
LEVELS = {
'TRANSPORT': 5,
'RUNTIME': 15,
'DEVX': 17,
'CANCEL': 22,
'CANCEL': 16,
'PDB': 500,
}
STD_PALETTE = {
'CRITICAL': 'red',
'ERROR': 'red',
'PDB': 'white',
'DEVX': 'cyan',
'WARNING': 'yellow',
'INFO': 'green',
'CANCEL': 'yellow',
@ -83,18 +75,13 @@ BOLD_PALETTE = {
# TODO: this isn't showing the correct '{filename}'
# as it did before..
class StackLevelAdapter(LoggerAdapter):
class StackLevelAdapter(logging.LoggerAdapter):
def transport(
self,
msg: str,
) -> None:
'''
IPC transport level msg IO; generally anything below
`._ipc.Channel` and friends.
'''
return self.log(5, msg)
def runtime(
@ -107,68 +94,22 @@ class StackLevelAdapter(LoggerAdapter):
self,
msg: str,
) -> None:
'''
Cancellation sequencing, mostly for runtime reporting.
'''
return self.log(
level=22,
msg=msg,
# stacklevel=4,
)
return self.log(16, msg)
def pdb(
self,
msg: str,
) -> None:
'''
`pdb`-REPL (debugger) related statuses.
'''
return self.log(500, msg)
def devx(
self,
msg: str,
) -> None:
'''
"Developer experience" sub-sys statuses.
'''
return self.log(17, msg)
def log(
self,
level,
msg,
*args,
**kwargs,
):
'''
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
NOTE: all custom level methods (above) delegate to this!
'''
"""
if self.isEnabledFor(level):
stacklevel: int = 3
if (
level in CUSTOM_LEVELS.values()
):
stacklevel: int = 4
# msg, kwargs = self.process(msg, kwargs)
self._log(
level=level,
msg=msg,
args=args,
# NOTE: not sure how this worked before but, it
# seems with our custom level methods defined above
# we do indeed (now) require another stack level??
stacklevel=stacklevel,
**kwargs,
)
self._log(level, msg, args, **kwargs)
# LOL, the stdlib doesn't allow passing through ``stacklevel``..
def _log(
@ -181,15 +122,12 @@ class StackLevelAdapter(LoggerAdapter):
stack_info=False,
# XXX: bit we added to show fileinfo from actual caller.
# - this level
# - then ``.log()``
# - then finally the caller's level..
stacklevel=4,
# this level then ``.log()`` then finally the caller's level..
stacklevel=3,
):
'''
"""
Low-level log implementation, proxied to allow nested logger adapters.
'''
"""
return self.logger._log(
level,
msg,
@ -201,30 +139,8 @@ class StackLevelAdapter(LoggerAdapter):
)
# TODO IDEAs:
# -[ ] move to `.devx.pformat`?
# -[ ] do per task-name and actor-name color coding
# -[ ] unique color per task-id and actor-uuid
def pformat_task_uid(
id_part: str = 'tail'
):
'''
Return `str`-ified unique for a `trio.Task` via a combo of its
`.name: str` and `id()` truncated output.
'''
task: trio.Task = trio.lowlevel.current_task()
tid: str = str(id(task))
if id_part == 'tail':
tid_part: str = tid[-6:]
else:
tid_part: str = tid[:6]
return f'{task.name}[{tid_part}]'
_conc_name_getters = {
'task': pformat_task_uid,
'task': lambda: trio.lowlevel.current_task().name,
'actor': lambda: current_actor(),
'actor_name': lambda: current_actor().name,
'actor_uid': lambda: current_actor().uid[1][:6],
@ -232,10 +148,7 @@ _conc_name_getters = {
class ActorContextInfo(Mapping):
'''
Dyanmic lookup for local actor and task names.
'''
"Dyanmic lookup for local actor and task names"
_context_keys = (
'task',
'actor',
@ -266,42 +179,17 @@ def get_logger(
'''Return the package log or a sub-logger for ``name`` if provided.
'''
log: Logger
log = rlog = logging.getLogger(_root_name)
if (
name
and name != _proj_name
):
if name and name != _proj_name:
# NOTE: for handling for modules that use ``get_logger(__name__)``
# we make the following stylistic choice:
# - always avoid duplicate project-package token
# in msg output: i.e. tractor.tractor _ipc.py in header
# looks ridiculous XD
# - never show the leaf module name in the {name} part
# since in python the {filename} is always this same
# module-file.
sub_name: None | str = None
rname, _, sub_name = name.partition('.')
pkgpath, _, modfilename = sub_name.rpartition('.')
# NOTE: for tractor itself never include the last level
# module key in the name such that something like: eg.
# 'tractor.trionics._broadcast` only includes the first
# 2 tokens in the (coloured) name part.
if rname == 'tractor':
sub_name = pkgpath
if _root_name in sub_name:
duplicate, _, sub_name = sub_name.partition('.')
if not sub_name:
log = rlog
else:
log = rlog.getChild(sub_name)
# handling for modules that use ``get_logger(__name__)`` to
# avoid duplicate project-package token in msg output
rname, _, tail = name.partition('.')
if rname == _root_name:
name = tail
log = rlog.getChild(name)
log.level = rlog.level
# add our actor-task aware adapter which will dynamically look up
@ -309,7 +197,7 @@ def get_logger(
logger = StackLevelAdapter(log, ActorContextInfo())
# additional levels
for name, val in CUSTOM_LEVELS.items():
for name, val in LEVELS.items():
logging.addLevelName(val, name)
# ensure customs levels exist as methods
@ -321,7 +209,7 @@ def get_logger(
def get_console_log(
level: str | None = None,
**kwargs,
) -> LoggerAdapter:
) -> logging.LoggerAdapter:
'''Get the package logger and enable a handler which writes to stderr.
Yeah yeah, i know we can use ``DictConfig``. You do it.
@ -332,21 +220,13 @@ def get_console_log(
if not level:
return log
log.setLevel(
level.upper()
if not isinstance(level, int)
else level
)
log.setLevel(level.upper() if not isinstance(level, int) else level)
if not any(
handler.stream == sys.stderr # type: ignore
for handler in logger.handlers if getattr(
handler,
'stream',
None,
)
for handler in logger.handlers if getattr(handler, 'stream', None)
):
handler = StreamHandler()
handler = logging.StreamHandler()
formatter = colorlog.ColoredFormatter(
LOG_FORMAT,
datefmt=DATE_FORMAT,
@ -362,23 +242,3 @@ def get_console_log(
def get_loglevel() -> str:
return _default_loglevel
# global module logger for tractor itself
log = get_logger('tractor')
def at_least_level(
log: Logger|LoggerAdapter,
level: int|str,
) -> bool:
'''
Predicate to test if a given level is active.
'''
if isinstance(level, str):
level: int = CUSTOM_LEVELS[level.upper()]
if log.getEffectiveLevel() <= level:
return True
return False

80
tractor/msg.py 100644
View File

@ -0,0 +1,80 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Built-in messaging patterns, types, APIs and helpers.
'''
# TODO: integration with our ``enable_modules: list[str]`` caps sys.
# ``pkgutil.resolve_name()`` internally uses
# ``importlib.import_module()`` which can be filtered by inserting
# a ``MetaPathFinder`` into ``sys.meta_path`` (which we could do before
# entering the ``_runtime.process_messages()`` loop).
# - https://github.com/python/cpython/blob/main/Lib/pkgutil.py#L645
# - https://stackoverflow.com/questions/1350466/preventing-python-code-from-importing-certain-modules
# - https://stackoverflow.com/a/63320902
# - https://docs.python.org/3/library/sys.html#sys.meta_path
# the new "Implicit Namespace Packages" might be relevant?
# - https://www.python.org/dev/peps/pep-0420/
# add implicit serialized message type support so that paths can be
# handed directly to IPC primitives such as streams and `Portal.run()`
# calls:
# - via ``msgspec``:
# - https://jcristharif.com/msgspec/api.html#struct
# - https://jcristharif.com/msgspec/extending.html
# via ``msgpack-python``:
# - https://github.com/msgpack/msgpack-python#packingunpacking-of-custom-data-type
from __future__ import annotations
from pkgutil import resolve_name
class NamespacePath(str):
'''
A serializeable description of a (function) Python object location
described by the target's module path and namespace key meant as
a message-native "packet" to allows actors to point-and-load objects
by absolute reference.
'''
_ref: object = None
def load_ref(self) -> object:
if self._ref is None:
self._ref = resolve_name(self)
return self._ref
def to_tuple(
self,
) -> tuple[str, str]:
ref = self.load_ref()
return ref.__module__, getattr(ref, '__name__', '')
@classmethod
def from_ref(
cls,
ref,
) -> NamespacePath:
return cls(':'.join(
(ref.__module__,
getattr(ref, '__name__', ''))
))

View File

@ -1,73 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Built-in messaging patterns, types, APIs and helpers.
'''
from typing import (
TypeAlias,
)
from .ptr import (
NamespacePath as NamespacePath,
)
from .pretty_struct import (
Struct as Struct,
)
from ._codec import (
_def_msgspec_codec as _def_msgspec_codec,
_ctxvar_MsgCodec as _ctxvar_MsgCodec,
apply_codec as apply_codec,
mk_codec as mk_codec,
MsgCodec as MsgCodec,
MsgDec as MsgDec,
current_codec as current_codec,
)
# currently can't bc circular with `._context`
# from ._ops import (
# PldRx as PldRx,
# _drain_to_final_msg as _drain_to_final_msg,
# )
from .types import (
PayloadMsg as PayloadMsg,
Aid as Aid,
SpawnSpec as SpawnSpec,
Start as Start,
StartAck as StartAck,
Started as Started,
Yield as Yield,
Stop as Stop,
Return as Return,
CancelAck as CancelAck,
Error as Error,
# type-var for `.pld` field
PayloadT as PayloadT,
# full msg class set from above as list
__msg_types__ as __msg_types__,
# type-alias for union of all msgs
MsgType as MsgType,
)
__msg_spec__: TypeAlias = MsgType

View File

@ -1,660 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
IPC msg interchange codec management.
Supported backend libs:
- `msgspec.msgpack`
ToDo: backends we prolly should offer:
- see project/lib list throughout GH issue discussion comments:
https://github.com/goodboy/tractor/issues/196
- `capnproto`: https://capnproto.org/rpc.html
- https://capnproto.org/language.html#language-reference
'''
from __future__ import annotations
from contextlib import (
contextmanager as cm,
)
from contextvars import (
ContextVar,
Token,
)
import textwrap
from typing import (
Any,
Callable,
Type,
TYPE_CHECKING,
Union,
)
from types import ModuleType
import msgspec
from msgspec import (
msgpack,
Raw,
)
# TODO: see notes below from @mikenerone..
# from tricycle import TreeVar
from tractor.msg.pretty_struct import Struct
from tractor.msg.types import (
mk_msg_spec,
MsgType,
)
from tractor.log import get_logger
if TYPE_CHECKING:
from tractor._context import Context
log = get_logger(__name__)
# TODO: unify with `MsgCodec` by making `._dec` part this?
class MsgDec(Struct):
'''
An IPC msg (payload) decoder.
Normally used to decode only a payload: `MsgType.pld:
PayloadT` field before delivery to IPC consumer code.
'''
_dec: msgpack.Decoder
@property
def dec(self) -> msgpack.Decoder:
return self._dec
def __repr__(self) -> str:
speclines: str = self.spec_str
# in multi-typed spec case we stick the list
# all on newlines after the |__pld_spec__:,
# OW it's prolly single type spec-value
# so just leave it on same line.
if '\n' in speclines:
speclines: str = '\n' + textwrap.indent(
speclines,
prefix=' '*3,
)
body: str = textwrap.indent(
f'|_dec_hook: {self.dec.dec_hook}\n'
f'|__pld_spec__: {speclines}\n',
prefix=' '*2,
)
return (
f'<{type(self).__name__}(\n'
f'{body}'
')>'
)
# struct type unions
# https://jcristharif.com/msgspec/structs.html#tagged-unions
#
# ^-TODO-^: make a wrapper type for this such that alt
# backends can be represented easily without a `Union` needed,
# AND so that we have better support for wire transport.
#
# -[ ] maybe `FieldSpec` is a good name since msg-spec
# better applies to a `MsgType[FieldSpec]`?
#
# -[ ] both as part of the `.open_context()` call AND as part of the
# immediate ack-reponse (see similar below)
# we should do spec matching and fail if anything is awry?
#
# -[ ] eventually spec should be generated/parsed from the
# type-annots as # desired in GH issue:
# https://github.com/goodboy/tractor/issues/365
#
# -[ ] semantics of the mismatch case
# - when caller-callee specs we should raise
# a `MsgTypeError` or `MsgSpecError` or similar?
#
# -[ ] wrapper types for both spec types such that we can easily
# IPC transport them?
# - `TypeSpec: Union[Type]`
# * also a `.__contains__()` for doing `None in
# TypeSpec[None|int]` since rn you need to do it on
# `.__args__` for unions..
# - `MsgSpec: Union[MsgType]
#
# -[ ] auto-genning this from new (in 3.12) type parameter lists Bo
# |_ https://docs.python.org/3/reference/compound_stmts.html#type-params
# |_ historical pep 695: https://peps.python.org/pep-0695/
# |_ full lang spec: https://typing.readthedocs.io/en/latest/spec/
# |_ on annotation scopes:
# https://docs.python.org/3/reference/executionmodel.html#annotation-scopes
# |_ 3.13 will have subscriptable funcs Bo
# https://peps.python.org/pep-0718/
@property
def spec(self) -> Union[Type[Struct]]:
# NOTE: defined and applied inside `mk_codec()`
return self._dec.type
# no difference, as compared to a `MsgCodec` which defines the
# `MsgType.pld: PayloadT` part of its spec separately
pld_spec = spec
# TODO: would get moved into `FieldSpec.__str__()` right?
@property
def spec_str(self) -> str:
return pformat_msgspec(
codec=self,
join_char='|',
)
pld_spec_str = spec_str
def decode(
self,
raw: Raw|bytes,
) -> Any:
return self._dec.decode(raw)
@property
def hook(self) -> Callable|None:
return self._dec.dec_hook
def mk_dec(
spec: Union[Type[Struct]]|Any = Any,
dec_hook: Callable|None = None,
) -> MsgDec:
return MsgDec(
_dec=msgpack.Decoder(
type=spec, # like `MsgType[Any]`
dec_hook=dec_hook,
)
)
def mk_msgspec_table(
dec: msgpack.Decoder,
msg: MsgType|None = None,
) -> dict[str, MsgType]|str:
'''
Fill out a `dict` of `MsgType`s keyed by name
for a given input `msgspec.msgpack.Decoder`
as defined by its `.type: Union[Type]` setting.
If `msg` is provided, only deliver a `dict` with a single
entry for that type.
'''
msgspec: Union[Type]|Type = dec.type
if not (msgtypes := getattr(msgspec, '__args__', False)):
msgtypes = [msgspec]
msgt_table: dict[str, MsgType] = {
msgt: str(msgt.__name__)
for msgt in msgtypes
}
if msg:
msgt: MsgType = type(msg)
str_repr: str = msgt_table[msgt]
return {msgt: str_repr}
return msgt_table
def pformat_msgspec(
codec: MsgCodec|MsgDec,
msg: MsgType|None = None,
join_char: str = '\n',
) -> str:
dec: msgpack.Decoder = getattr(codec, 'dec', codec)
return join_char.join(
mk_msgspec_table(
dec=dec,
msg=msg,
).values()
)
# TODO: overall IPC msg-spec features (i.e. in this mod)!
#
# -[ ] API changes towards being interchange lib agnostic!
# -[ ] capnproto has pre-compiled schema for eg..
# * https://capnproto.org/language.html
# * http://capnproto.github.io/pycapnp/quickstart.html
# * https://github.com/capnproto/pycapnp/blob/master/examples/addressbook.capnp
#
# -[ ] struct aware messaging coders as per:
# -[x] https://github.com/goodboy/tractor/issues/36
# -[ ] https://github.com/goodboy/tractor/issues/196
# -[ ] https://github.com/goodboy/tractor/issues/365
#
class MsgCodec(Struct):
'''
A IPC msg interchange format lib's encoder + decoder pair.
Pretty much nothing more then delegation to underlying
`msgspec.<interchange-protocol>.Encoder/Decoder`s for now.
'''
_enc: msgpack.Encoder
_dec: msgpack.Decoder
_pld_spec: Type[Struct]|Raw|Any
def __repr__(self) -> str:
speclines: str = textwrap.indent(
pformat_msgspec(codec=self),
prefix=' '*3,
)
body: str = textwrap.indent(
f'|_lib = {self.lib.__name__!r}\n'
f'|_enc_hook: {self.enc.enc_hook}\n'
f'|_dec_hook: {self.dec.dec_hook}\n'
f'|_pld_spec: {self.pld_spec_str}\n'
# f'|\n'
f'|__msg_spec__:\n'
f'{speclines}\n',
prefix=' '*2,
)
return (
f'<{type(self).__name__}(\n'
f'{body}'
')>'
)
@property
def pld_spec(self) -> Type[Struct]|Raw|Any:
return self._pld_spec
@property
def pld_spec_str(self) -> str:
# TODO: could also use match: instead?
spec: Union[Type]|Type = self.pld_spec
# `typing.Union` case
if getattr(spec, '__args__', False):
return str(spec)
# just a single type
else:
return spec.__name__
# struct type unions
# https://jcristharif.com/msgspec/structs.html#tagged-unions
@property
def msg_spec(self) -> Union[Type[Struct]]:
# NOTE: defined and applied inside `mk_codec()`
return self._dec.type
# TODO: some way to make `pretty_struct.Struct` use this
# wrapped field over the `.msg_spec` one?
@property
def msg_spec_str(self) -> str:
return pformat_msgspec(self.msg_spec)
lib: ModuleType = msgspec
# TODO: use `functools.cached_property` for these ?
# https://docs.python.org/3/library/functools.html#functools.cached_property
@property
def enc(self) -> msgpack.Encoder:
return self._enc
# TODO: reusing encode buffer for perf?
# https://jcristharif.com/msgspec/perf-tips.html#reusing-an-output-buffer
_buf: bytearray = bytearray()
def encode(
self,
py_obj: Any,
use_buf: bool = False,
# ^-XXX-^ uhh why am i getting this?
# |_BufferError: Existing exports of data: object cannot be re-sized
) -> bytes:
'''
Encode input python objects to `msgpack` bytes for
transfer on a tranport protocol connection.
When `use_buf == True` use the output buffer optimization:
https://jcristharif.com/msgspec/perf-tips.html#reusing-an-output-buffer
'''
if use_buf:
self._enc.encode_into(py_obj, self._buf)
return self._buf
else:
return self._enc.encode(py_obj)
@property
def dec(self) -> msgpack.Decoder:
return self._dec
def decode(
self,
msg: bytes,
) -> Any:
'''
Decode received `msgpack` bytes into a local python object
with special `msgspec.Struct` (or other type) handling
determined by the
'''
# https://jcristharif.com/msgspec/usage.html#typed-decoding
return self._dec.decode(msg)
# [x] TODO: a sub-decoder system as well? => No!
#
# -[x] do we still want to try and support the sub-decoder with
# `.Raw` technique in the case that the `Generic` approach gives
# future grief?
# => NO, since we went with the `PldRx` approach instead B)
#
# IF however you want to see the code that was staged for this
# from wayyy back, see the pure removal commit.
def mk_codec(
# struct type unions set for `Decoder`
# https://jcristharif.com/msgspec/structs.html#tagged-unions
ipc_pld_spec: Union[Type[Struct]]|Any = Any,
# TODO: offering a per-msg(-field) type-spec such that
# the fields can be dynamically NOT decoded and left as `Raw`
# values which are later loaded by a sub-decoder specified
# by `tag_field: str` value key?
# payload_msg_specs: dict[
# str, # tag_field value as sub-decoder key
# Union[Type[Struct]] # `MsgType.pld` type spec
# ]|None = None,
libname: str = 'msgspec',
# proxy as `Struct(**kwargs)` for ad-hoc type extensions
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
# ------ - ------
dec_hook: Callable|None = None,
enc_hook: Callable|None = None,
# ------ - ------
#
# Encoder:
# write_buffer_size=write_buffer_size,
#
# Decoder:
# ext_hook: ext_hook_sig
) -> MsgCodec:
'''
Convenience factory for creating codecs eventually meant
to be interchange lib agnostic (i.e. once we support more then just
`msgspec` ;).
'''
# (manually) generate a msg-payload-spec for all relevant
# god-boxing-msg subtypes, parameterizing the `PayloadMsg.pld: PayloadT`
# for the decoder such that all sub-type msgs in our SCIPP
# will automatically decode to a type-"limited" payload (`Struct`)
# object (set).
(
ipc_msg_spec,
msg_types,
) = mk_msg_spec(
payload_type_union=ipc_pld_spec,
)
assert len(ipc_msg_spec.__args__) == len(msg_types)
assert ipc_msg_spec
# TODO: use this shim instead?
# bc.. unification, err somethin?
# dec: MsgDec = mk_dec(
# spec=ipc_msg_spec,
# dec_hook=dec_hook,
# )
dec = msgpack.Decoder(
type=ipc_msg_spec,
dec_hook=dec_hook,
)
enc = msgpack.Encoder(
enc_hook=enc_hook,
)
codec = MsgCodec(
_enc=enc,
_dec=dec,
_pld_spec=ipc_pld_spec,
)
# sanity on expected backend support
assert codec.lib.__name__ == libname
return codec
# instance of the default `msgspec.msgpack` codec settings, i.e.
# no custom structs, hooks or other special types.
_def_msgspec_codec: MsgCodec = mk_codec(ipc_pld_spec=Any)
# The built-in IPC `Msg` spec.
# Our composing "shuttle" protocol which allows `tractor`-app code
# to use any `msgspec` supported type as the `PayloadMsg.pld` payload,
# https://jcristharif.com/msgspec/supported-types.html
#
_def_tractor_codec: MsgCodec = mk_codec(
# TODO: use this for debug mode locking prot?
# ipc_pld_spec=Any,
ipc_pld_spec=Raw,
)
# TODO: IDEALLY provides for per-`trio.Task` specificity of the
# IPC msging codec used by the transport layer when doing
# `Channel.send()/.recv()` of wire data.
# ContextVar-TODO: DIDN'T WORK, kept resetting in every new task to default!?
# _ctxvar_MsgCodec: ContextVar[MsgCodec] = ContextVar(
# TreeVar-TODO: DIDN'T WORK, kept resetting in every new embedded nursery
# even though it's supposed to inherit from a parent context ???
#
# _ctxvar_MsgCodec: TreeVar[MsgCodec] = TreeVar(
#
# ^-NOTE-^: for this to work see the mods by @mikenerone from `trio` gitter:
#
# 22:02:54 <mikenerone> even for regular contextvars, all you have to do is:
# `task: Task = trio.lowlevel.current_task()`
# `task.parent_nursery.parent_task.context.run(my_ctx_var.set, new_value)`
#
# From a comment in his prop code he couldn't share outright:
# 1. For every TreeVar set in the current task (which covers what
# we need from SynchronizerFacade), walk up the tree until the
# root or finding one where the TreeVar is already set, setting
# it in all of the contexts along the way.
# 2. For each of those, we also forcibly set the values that are
# pending for child nurseries that have not yet accessed the
# TreeVar.
# 3. We similarly set the pending values for the child nurseries
# of the *current* task.
#
_ctxvar_MsgCodec: ContextVar[MsgCodec] = ContextVar(
'msgspec_codec',
default=_def_tractor_codec,
)
@cm
def apply_codec(
codec: MsgCodec,
ctx: Context|None = None,
) -> MsgCodec:
'''
Dynamically apply a `MsgCodec` to the current task's runtime
context such that all (of a certain class of payload
containing i.e. `MsgType.pld: PayloadT`) IPC msgs are
processed with it for that task.
Uses a `contextvars.ContextVar` to ensure the scope of any
codec setting matches the current `Context` or
`._rpc.process_messages()` feeder task's prior setting without
mutating any surrounding scope.
When a `ctx` is supplied, only mod its `Context.pld_codec`.
matches the `@cm` block and DOES NOT change to the original
(default) value in new tasks (as it does for `ContextVar`).
'''
__tracebackhide__: bool = True
if ctx is not None:
var: ContextVar = ctx._var_pld_codec
else:
# use IPC channel-connection "global" codec
var: ContextVar = _ctxvar_MsgCodec
orig: MsgCodec = var.get()
assert orig is not codec
if codec.pld_spec is None:
breakpoint()
log.info(
'Applying new msg-spec codec\n\n'
f'{codec}\n'
)
token: Token = var.set(codec)
# ?TODO? for TreeVar approach which copies from the
# cancel-scope of the prior value, NOT the prior task
# See the docs:
# - https://tricycle.readthedocs.io/en/latest/reference.html#tree-variables
# - https://github.com/oremanj/tricycle/blob/master/tricycle/_tests/test_tree_var.py
# ^- see docs for @cm `.being()` API
# with _ctxvar_MsgCodec.being(codec):
# new = _ctxvar_MsgCodec.get()
# assert new is codec
# yield codec
try:
yield var.get()
finally:
var.reset(token)
log.info(
'Reverted to last msg-spec codec\n\n'
f'{orig}\n'
)
assert var.get() is orig
def current_codec() -> MsgCodec:
'''
Return the current `trio.Task.context`'s value
for `msgspec_codec` used by `Channel.send/.recv()`
for wire serialization.
'''
return _ctxvar_MsgCodec.get()
@cm
def limit_msg_spec(
payload_spec: Union[Type[Struct]],
# TODO: don't need this approach right?
# -> related to the `MsgCodec._payload_decs` stuff above..
# tagged_structs: list[Struct]|None = None,
**codec_kwargs,
) -> MsgCodec:
'''
Apply a `MsgCodec` that will natively decode the SC-msg set's
`PayloadMsg.pld: Union[Type[Struct]]` payload fields using
tagged-unions of `msgspec.Struct`s from the `payload_types`
for all IPC contexts in use by the current `trio.Task`.
'''
__tracebackhide__: bool = True
curr_codec: MsgCodec = current_codec()
msgspec_codec: MsgCodec = mk_codec(
ipc_pld_spec=payload_spec,
**codec_kwargs,
)
with apply_codec(msgspec_codec) as applied_codec:
assert applied_codec is msgspec_codec
yield msgspec_codec
assert curr_codec is current_codec()
# XXX: msgspec won't allow this with non-struct custom types
# like `NamespacePath`!@!
# @cm
# def extend_msg_spec(
# payload_spec: Union[Type[Struct]],
# ) -> MsgCodec:
# '''
# Extend the current `MsgCodec.pld_spec` (type set) by extending
# the payload spec to **include** the types specified by
# `payload_spec`.
# '''
# codec: MsgCodec = current_codec()
# pld_spec: Union[Type] = codec.pld_spec
# extended_spec: Union[Type] = pld_spec|payload_spec
# with limit_msg_spec(payload_types=extended_spec) as ext_codec:
# # import pdbp; pdbp.set_trace()
# assert ext_codec.pld_spec == extended_spec
# yield ext_codec
# TODO: make something similar to this inside `._codec` such that
# user can just pass a type table of some sort?
# -[ ] we would need to decode all msgs to `pretty_struct.Struct`
# and then call `.to_dict()` on them?
# -[x] we're going to need to re-impl all the stuff changed in the
# runtime port such that it can handle dicts or `Msg`s?
#
# def mk_dict_msg_codec_hooks() -> tuple[Callable, Callable]:
# '''
# Deliver a `enc_hook()`/`dec_hook()` pair which does
# manual convertion from our above native `Msg` set
# to `dict` equivalent (wire msgs) in order to keep legacy compat
# with the original runtime implementation.
#
# Note: this is is/was primarly used while moving the core
# runtime over to using native `Msg`-struct types wherein we
# start with the send side emitting without loading
# a typed-decoder and then later flipping the switch over to
# load to the native struct types once all runtime usage has
# been adjusted appropriately.
#
# '''
# return (
# # enc_to_dict,
# dec_from_dict,
# )

View File

@ -1,842 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Near-application abstractions for `MsgType.pld: PayloadT|Raw`
delivery, filtering and type checking as well as generic
operational helpers for processing transaction flows.
'''
from __future__ import annotations
from contextlib import (
asynccontextmanager as acm,
contextmanager as cm,
)
from typing import (
Any,
Callable,
Type,
TYPE_CHECKING,
Union,
)
# ------ - ------
from msgspec import (
msgpack,
Raw,
Struct,
ValidationError,
)
import trio
# ------ - ------
from tractor.log import get_logger
from tractor._exceptions import (
MessagingError,
InternalError,
_raise_from_unexpected_msg,
MsgTypeError,
_mk_recv_mte,
pack_error,
)
from tractor._state import current_ipc_ctx
from ._codec import (
mk_dec,
MsgDec,
MsgCodec,
current_codec,
)
from .types import (
CancelAck,
Error,
MsgType,
PayloadT,
Return,
Started,
Stop,
Yield,
pretty_struct,
)
if TYPE_CHECKING:
from tractor._context import Context
from tractor._streaming import MsgStream
log = get_logger(__name__)
_def_any_pldec: MsgDec[Any] = mk_dec()
class PldRx(Struct):
'''
A "msg payload receiver".
The pairing of a "feeder" `trio.abc.ReceiveChannel` and an
interchange-specific (eg. msgpack) payload field decoder. The
validation/type-filtering rules are runtime mutable and allow
type constraining the set of `MsgType.pld: Raw|PayloadT`
values at runtime, per IPC task-context.
This abstraction, being just below "user application code",
allows for the equivalent of our `MsgCodec` (used for
typer-filtering IPC dialog protocol msgs against a msg-spec)
but with granular control around payload delivery (i.e. the
data-values user code actually sees and uses (the blobs that
are "shuttled" by the wrapping dialog prot) such that invalid
`.pld: Raw` can be decoded and handled by IPC-primitive user
code (i.e. that operates on `Context` and `Msgstream` APIs)
without knowledge of the lower level `Channel`/`MsgTransport`
primitives nor the `MsgCodec` in use. Further, lazily decoding
payload blobs allows for topical (and maybe intentionally
"partial") encryption of msg field subsets.
'''
# TODO: better to bind it here?
# _rx_mc: trio.MemoryReceiveChannel
_pld_dec: MsgDec
_ctx: Context|None = None
_ipc: Context|MsgStream|None = None
@property
def pld_dec(self) -> MsgDec:
return self._pld_dec
# TODO: a better name?
# -[ ] when would this be used as it avoids needingn to pass the
# ipc prim to every method
@cm
def wraps_ipc(
self,
ipc_prim: Context|MsgStream,
) -> PldRx:
'''
Apply this payload receiver to an IPC primitive type, one
of `Context` or `MsgStream`.
'''
self._ipc = ipc_prim
try:
yield self
finally:
self._ipc = None
@cm
def limit_plds(
self,
spec: Union[Type[Struct]],
**dec_kwargs,
) -> MsgDec:
'''
Type-limit the loadable msg payloads via an applied
`MsgDec` given an input spec, revert to prior decoder on
exit.
'''
orig_dec: MsgDec = self._pld_dec
limit_dec: MsgDec = mk_dec(
spec=spec,
**dec_kwargs,
)
try:
self._pld_dec = limit_dec
yield limit_dec
finally:
self._pld_dec = orig_dec
@property
def dec(self) -> msgpack.Decoder:
return self._pld_dec.dec
def recv_pld_nowait(
self,
# TODO: make this `MsgStream` compat as well, see above^
# ipc_prim: Context|MsgStream,
ipc: Context|MsgStream,
ipc_msg: MsgType|None = None,
expect_msg: Type[MsgType]|None = None,
hide_tb: bool = False,
**dec_pld_kwargs,
) -> Any|Raw:
__tracebackhide__: bool = hide_tb
msg: MsgType = (
ipc_msg
or
# sync-rx msg from underlying IPC feeder (mem-)chan
ipc._rx_chan.receive_nowait()
)
return self.decode_pld(
msg,
ipc=ipc,
expect_msg=expect_msg,
hide_tb=hide_tb,
**dec_pld_kwargs,
)
async def recv_pld(
self,
ipc: Context|MsgStream,
ipc_msg: MsgType|None = None,
expect_msg: Type[MsgType]|None = None,
hide_tb: bool = True,
**dec_pld_kwargs,
) -> Any|Raw:
'''
Receive a `MsgType`, then decode and return its `.pld` field.
'''
__tracebackhide__: bool = hide_tb
msg: MsgType = (
ipc_msg
or
# async-rx msg from underlying IPC feeder (mem-)chan
await ipc._rx_chan.receive()
)
return self.decode_pld(
msg=msg,
ipc=ipc,
expect_msg=expect_msg,
**dec_pld_kwargs,
)
def decode_pld(
self,
msg: MsgType,
ipc: Context|MsgStream,
expect_msg: Type[MsgType]|None,
raise_error: bool = True,
hide_tb: bool = True,
# XXX for special (default?) case of send side call with
# `Context.started(validate_pld_spec=True)`
is_started_send_side: bool = False,
) -> PayloadT|Raw:
'''
Decode a msg's payload field: `MsgType.pld: PayloadT|Raw` and
return the value or raise an appropriate error.
'''
__tracebackhide__: bool = hide_tb
src_err: BaseException|None = None
match msg:
# payload-data shuttle msg; deliver the `.pld` value
# directly to IPC (primitive) client-consumer code.
case (
Started(pld=pld) # sync phase
|Yield(pld=pld) # streaming phase
|Return(pld=pld) # termination phase
):
try:
pld: PayloadT = self._pld_dec.decode(pld)
log.runtime(
'Decoded msg payload\n\n'
f'{msg}\n'
f'where payload decoded as\n'
f'|_pld={pld!r}\n'
)
return pld
# XXX pld-value type failure
except ValidationError as valerr:
# pack mgterr into error-msg for
# reraise below; ensure remote-actor-err
# info is displayed nicely?
mte: MsgTypeError = _mk_recv_mte(
msg=msg,
codec=self.pld_dec,
src_validation_error=valerr,
is_invalid_payload=True,
expected_msg=expect_msg,
)
# NOTE: just raise the MTE inline instead of all
# the pack-unpack-repack non-sense when this is
# a "send side" validation error.
if is_started_send_side:
raise mte
# NOTE: the `.message` is automatically
# transferred into the message as long as we
# define it as a `Error.message` field.
err_msg: Error = pack_error(
exc=mte,
cid=msg.cid,
src_uid=(
ipc.chan.uid
if not is_started_send_side
else ipc._actor.uid
),
)
mte._ipc_msg = err_msg
# XXX override the `msg` passed to
# `_raise_from_unexpected_msg()` (below) so so
# that we're effectively able to use that same
# func to unpack and raise an "emulated remote
# `Error`" of this local MTE.
msg = err_msg
# XXX NOTE: so when the `_raise_from_unexpected_msg()`
# raises the boxed `err_msg` from above it raises
# it from the above caught interchange-lib
# validation error.
src_err = valerr
# a runtime-internal RPC endpoint response.
# always passthrough since (internal) runtime
# responses are generally never exposed to consumer
# code.
case CancelAck(
pld=bool(cancelled)
):
return cancelled
case Error():
src_err = MessagingError(
'IPC ctx dialog terminated without `Return`-ing a result\n'
f'Instead it raised {msg.boxed_type_str!r}!'
)
# XXX NOTE XXX another super subtle runtime-y thing..
#
# - when user code (transitively) calls into this
# func (usually via a `Context/MsgStream` API) we
# generally want errors to propagate immediately
# and directly so that the user can define how it
# wants to handle them.
#
# HOWEVER,
#
# - for certain runtime calling cases, we don't want to
# directly raise since the calling code might have
# special logic around whether to raise the error
# or supress it silently (eg. a `ContextCancelled`
# received from the far end which was requested by
# this side, aka a self-cancel).
#
# SO, we offer a flag to control this.
if not raise_error:
return src_err
case Stop(cid=cid):
ctx: Context = getattr(ipc, 'ctx', ipc)
message: str = (
f'{ctx.side!r}-side of ctx received stream-`Stop` from '
f'{ctx.peer_side!r} peer ?\n'
f'|_cid: {cid}\n\n'
f'{pretty_struct.pformat(msg)}\n'
)
if ctx._stream is None:
explain: str = (
f'BUT, no `MsgStream` (was) open(ed) on this '
f'{ctx.side!r}-side of the IPC ctx?\n'
f'Maybe check your code for streaming phase race conditions?\n'
)
log.warning(
message
+
explain
)
# let caller decide what to do when only one
# side opened a stream, don't raise.
return msg
else:
explain: str = (
'Received a `Stop` when it should NEVER be possible!?!?\n'
)
# TODO: this is constructed inside
# `_raise_from_unexpected_msg()` but maybe we
# should pass it in?
# src_err = trio.EndOfChannel(explain)
src_err = None
case _:
src_err = InternalError(
'Invalid IPC msg ??\n\n'
f'{msg}\n'
)
# TODO: maybe use the new `.add_note()` from 3.11?
# |_https://docs.python.org/3.11/library/exceptions.html#BaseException.add_note
#
# fallthrough and raise from `src_err`
try:
_raise_from_unexpected_msg(
ctx=getattr(ipc, 'ctx', ipc),
msg=msg,
src_err=src_err,
log=log,
expect_msg=expect_msg,
hide_tb=hide_tb,
)
except UnboundLocalError:
# XXX if there's an internal lookup error in the above
# code (prolly on `src_err`) we want to show this frame
# in the tb!
__tracebackhide__: bool = False
raise
dec_msg = decode_pld
async def recv_msg_w_pld(
self,
ipc: Context|MsgStream,
expect_msg: MsgType,
# NOTE: generally speaking only for handling `Stop`-msgs that
# arrive during a call to `drain_to_final_msg()` above!
passthrough_non_pld_msgs: bool = True,
hide_tb: bool = True,
**kwargs,
) -> tuple[MsgType, PayloadT]:
'''
Retrieve the next avail IPC msg, decode it's payload, and return
the pair of refs.
'''
__tracebackhide__: bool = hide_tb
msg: MsgType = await ipc._rx_chan.receive()
if passthrough_non_pld_msgs:
match msg:
case Stop():
return msg, None
# TODO: is there some way we can inject the decoded
# payload into an existing output buffer for the original
# msg instance?
pld: PayloadT = self.decode_pld(
msg,
ipc=ipc,
expect_msg=expect_msg,
hide_tb=hide_tb,
**kwargs,
)
return msg, pld
@cm
def limit_plds(
spec: Union[Type[Struct]],
**dec_kwargs,
) -> MsgDec:
'''
Apply a `MsgCodec` that will natively decode the SC-msg set's
`PayloadMsg.pld: Union[Type[Struct]]` payload fields using
tagged-unions of `msgspec.Struct`s from the `payload_types`
for all IPC contexts in use by the current `trio.Task`.
'''
__tracebackhide__: bool = True
try:
curr_ctx: Context = current_ipc_ctx()
rx: PldRx = curr_ctx._pld_rx
orig_pldec: MsgDec = rx.pld_dec
with rx.limit_plds(
spec=spec,
**dec_kwargs,
) as pldec:
log.runtime(
'Applying payload-decoder\n\n'
f'{pldec}\n'
)
yield pldec
finally:
log.runtime(
'Reverted to previous payload-decoder\n\n'
f'{orig_pldec}\n'
)
# sanity on orig settings
assert rx.pld_dec is orig_pldec
@acm
async def maybe_limit_plds(
ctx: Context,
spec: Union[Type[Struct]]|None = None,
dec_hook: Callable|None = None,
**kwargs,
) -> MsgDec|None:
'''
Async compat maybe-payload type limiter.
Mostly for use inside other internal `@acm`s such that a separate
indent block isn't needed when an async one is already being
used.
'''
if (
spec is None
and
dec_hook is None
):
yield None
return
# sanity check on IPC scoping
curr_ctx: Context = current_ipc_ctx()
assert ctx is curr_ctx
with ctx._pld_rx.limit_plds(
spec=spec,
dec_hook=dec_hook,
**kwargs,
) as msgdec:
yield msgdec
# when the applied spec is unwound/removed, the same IPC-ctx
# should still be in scope.
curr_ctx: Context = current_ipc_ctx()
assert ctx is curr_ctx
async def drain_to_final_msg(
ctx: Context,
hide_tb: bool = True,
msg_limit: int = 6,
) -> tuple[
Return|None,
list[MsgType]
]:
'''
Drain IPC msgs delivered to the underlying IPC context's
rx-mem-chan (i.e. from `Context._rx_chan`) in search for a final
`Return` or `Error` msg.
Deliver the `Return` + preceding drained msgs (`list[MsgType]`)
as a pair unless an `Error` is found, in which unpack and raise
it.
The motivation here is to always capture any remote error relayed
by the remote peer task during a ctxc condition.
For eg. a ctxc-request may be sent to the peer as part of the
local task's (request for) cancellation but then that same task
**also errors** before executing the teardown in the
`Portal.open_context().__aexit__()` block. In such error-on-exit
cases we want to always capture and raise any delivered remote
error (like an expected ctxc-ACK) as part of the final
`ctx.wait_for_result()` teardown sequence such that the
`Context.outcome` related state always reflect what transpired
even after ctx closure and the `.open_context()` block exit.
'''
__tracebackhide__: bool = hide_tb
raise_overrun: bool = not ctx._allow_overruns
# wait for a final context result by collecting (but
# basically ignoring) any bi-dir-stream msgs still in transit
# from the far end.
pre_result_drained: list[MsgType] = []
result_msg: Return|Error|None = None
while not (
ctx.maybe_error
and not ctx._final_result_is_set()
):
try:
# receive all msgs, scanning for either a final result
# or error; the underlying call should never raise any
# remote error directly!
msg, pld = await ctx._pld_rx.recv_msg_w_pld(
ipc=ctx,
expect_msg=Return,
raise_error=False,
hide_tb=hide_tb,
)
# ^-TODO-^ some bad ideas?
# -[ ] wrap final outcome .receive() in a scope so
# it can be cancelled out of band if needed?
# |_with trio.CancelScope() as res_cs:
# ctx._res_scope = res_cs
# msg: dict = await ctx._rx_chan.receive()
# if res_cs.cancelled_caught:
#
# -[ ] make sure pause points work here for REPLing
# the runtime itself; i.e. ensure there's no hangs!
# |_from tractor.devx._debug import pause
# await pause()
# NOTE: we get here if the far end was
# `ContextCancelled` in 2 cases:
# 1. we requested the cancellation and thus
# SHOULD NOT raise that far end error,
# 2. WE DID NOT REQUEST that cancel and thus
# SHOULD RAISE HERE!
except trio.Cancelled as _taskc:
taskc: trio.Cancelled = _taskc
# report when the cancellation wasn't (ostensibly) due to
# RPC operation, some surrounding parent cancel-scope.
if not ctx._scope.cancel_called:
task: trio.lowlevel.Task = trio.lowlevel.current_task()
rent_n: trio.Nursery = task.parent_nursery
if (
(local_cs := rent_n.cancel_scope).cancel_called
):
log.cancel(
'RPC-ctx cancelled by local-parent scope during drain!\n\n'
f'c}}>\n'
f' |_{rent_n}\n'
f' |_.cancel_scope = {local_cs}\n'
f' |_>c}}\n'
f' |_{ctx.pformat(indent=" "*9)}'
# ^TODO, some (other) simpler repr here?
)
__tracebackhide__: bool = False
# CASE 2: mask the local cancelled-error(s)
# only when we are sure the remote error is
# the source cause of this local task's
# cancellation.
ctx.maybe_raise(
hide_tb=hide_tb,
from_src_exc=taskc,
# ?TODO? when *should* we use this?
)
# CASE 1: we DID request the cancel we simply
# continue to bubble up as normal.
raise taskc
match msg:
# final result arrived!
case Return():
log.runtime(
'Context delivered final draining msg:\n'
f'{pretty_struct.pformat(msg)}'
)
ctx._result: Any = pld
result_msg = msg
break
# far end task is still streaming to us so discard
# and report depending on local ctx state.
case Yield():
pre_result_drained.append(msg)
if (
(ctx._stream.closed
and (reason := 'stream was already closed')
)
or (ctx.cancel_acked
and (reason := 'ctx cancelled other side')
)
or (ctx._cancel_called
and (reason := 'ctx called `.cancel()`')
)
or (len(pre_result_drained) > msg_limit
and (reason := f'"yield" limit={msg_limit}')
)
):
log.cancel(
'Cancelling `MsgStream` drain since '
f'{reason}\n\n'
f'<= {ctx.chan.uid}\n'
f' |_{ctx._nsf}()\n\n'
f'=> {ctx._task}\n'
f' |_{ctx._stream}\n\n'
f'{pretty_struct.pformat(msg)}\n'
)
break
# drain up to the `msg_limit` hoping to get
# a final result or error/ctxc.
else:
log.warning(
'Ignoring "yield" msg during `ctx.result()` drain..\n'
f'<= {ctx.chan.uid}\n'
f' |_{ctx._nsf}()\n\n'
f'=> {ctx._task}\n'
f' |_{ctx._stream}\n\n'
f'{pretty_struct.pformat(msg)}\n'
)
continue
# stream terminated, but no result yet..
#
# TODO: work out edge cases here where
# a stream is open but the task also calls
# this?
# -[ ] should be a runtime error if a stream is open right?
# Stop()
case Stop():
pre_result_drained.append(msg)
log.runtime( # normal/expected shutdown transaction
'Remote stream terminated due to "stop" msg:\n\n'
f'{pretty_struct.pformat(msg)}\n'
)
continue
# remote error msg, likely already handled inside
# `Context._deliver_msg()`
case Error():
# TODO: can we replace this with `ctx.maybe_raise()`?
# -[ ] would this be handier for this case maybe?
# |_async with maybe_raise_on_exit() as raises:
# if raises:
# log.error('some msg about raising..')
#
re: Exception|None = ctx._remote_error
if re:
assert msg is ctx._cancel_msg
# NOTE: this solved a super duper edge case XD
# this was THE super duper edge case of:
# - local task opens a remote task,
# - requests remote cancellation of far end
# ctx/tasks,
# - needs to wait for the cancel ack msg
# (ctxc) or some result in the race case
# where the other side's task returns
# before the cancel request msg is ever
# rxed and processed,
# - here this surrounding drain loop (which
# iterates all ipc msgs until the ack or
# an early result arrives) was NOT exiting
# since we are the edge case: local task
# does not re-raise any ctxc it receives
# IFF **it** was the cancellation
# requester..
#
# XXX will raise if necessary but ow break
# from loop presuming any supressed error
# (ctxc) should terminate the context!
ctx._maybe_raise_remote_err(
re,
# NOTE: obvi we don't care if we
# overran the far end if we're already
# waiting on a final result (msg).
# raise_overrun_from_self=False,
raise_overrun_from_self=raise_overrun,
)
result_msg = msg
break # OOOOOF, yeah obvi we need this..
else:
# bubble the original src key error
raise
# XXX should pretty much never get here unless someone
# overrides the default `MsgType` spec.
case _:
pre_result_drained.append(msg)
# It's definitely an internal error if any other
# msg type without a`'cid'` field arrives here!
report: str = (
f'Invalid or unknown msg type {type(msg)!r}!?\n'
)
if not msg.cid:
report += (
'\nWhich also has no `.cid` field?\n'
)
raise MessagingError(
report
+
f'\n{msg}\n'
)
else:
log.cancel(
'Skipping `MsgStream` drain since final outcome is set\n\n'
f'{ctx.outcome}\n'
)
return (
result_msg,
pre_result_drained,
)
def validate_payload_msg(
pld_msg: Started|Yield|Return,
pld_value: PayloadT,
ipc: Context|MsgStream,
raise_mte: bool = True,
strict_pld_parity: bool = False,
hide_tb: bool = True,
) -> MsgTypeError|None:
'''
Validate a `PayloadMsg.pld` value with the current
IPC ctx's `PldRx` and raise an appropriate `MsgTypeError`
on failure.
'''
__tracebackhide__: bool = hide_tb
codec: MsgCodec = current_codec()
msg_bytes: bytes = codec.encode(pld_msg)
try:
roundtripped: Started = codec.decode(msg_bytes)
ctx: Context = getattr(ipc, 'ctx', ipc)
pld: PayloadT = ctx.pld_rx.decode_pld(
msg=roundtripped,
ipc=ipc,
expect_msg=Started,
hide_tb=hide_tb,
is_started_send_side=True,
)
if (
strict_pld_parity
and
pld != pld_value
):
# TODO: make that one a mod func too..
diff = pretty_struct.Struct.__sub__(
roundtripped,
pld_msg,
)
complaint: str = (
'Started value does not match after roundtrip?\n\n'
f'{diff}'
)
raise ValidationError(complaint)
# raise any msg type error NO MATTER WHAT!
except ValidationError as verr:
try:
mte: MsgTypeError = _mk_recv_mte(
msg=roundtripped,
codec=codec,
src_validation_error=verr,
verb_header='Trying to send ',
is_invalid_payload=True,
)
except BaseException:
__tracebackhide__: bool = False
raise
if not raise_mte:
return mte
raise mte from verr

View File

@ -1,292 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Prettified version of `msgspec.Struct` for easier console grokin.
'''
from __future__ import annotations
from collections import UserList
from typing import (
Any,
Iterator,
)
from msgspec import (
msgpack,
Struct as _Struct,
structs,
)
from pprint import (
saferepr,
)
from tractor.log import get_logger
log = get_logger()
# TODO: auto-gen type sig for input func both for
# type-msgs and logging of RPC tasks?
# taken and modified from:
# https://stackoverflow.com/a/57110117
# import inspect
# from typing import List
# def my_function(input_1: str, input_2: int) -> list[int]:
# pass
# def types_of(func):
# specs = inspect.getfullargspec(func)
# return_type = specs.annotations['return']
# input_types = [t.__name__ for s, t in specs.annotations.items() if s != 'return']
# return f'{func.__name__}({": ".join(input_types)}) -> {return_type}'
# types_of(my_function)
class DiffDump(UserList):
'''
Very simple list delegator that repr() dumps (presumed) tuple
elements of the form `tuple[str, Any, Any]` in a nice
multi-line readable form for analyzing `Struct` diffs.
'''
def __repr__(self) -> str:
if not len(self):
return super().__repr__()
# format by displaying item pair's ``repr()`` on multiple,
# indented lines such that they are more easily visually
# comparable when printed to console when printed to
# console.
repstr: str = '[\n'
for k, left, right in self:
repstr += (
f'({k},\n'
f'\t{repr(left)},\n'
f'\t{repr(right)},\n'
')\n'
)
repstr += ']\n'
return repstr
def iter_fields(struct: Struct) -> Iterator[
tuple[
structs.FieldIinfo,
str,
Any,
]
]:
'''
Iterate over all non-@property fields of this struct.
'''
fi: structs.FieldInfo
for fi in structs.fields(struct):
key: str = fi.name
val: Any = getattr(struct, key)
yield (
fi,
key,
val,
)
def pformat(
struct: Struct,
field_indent: int = 2,
indent: int = 0,
) -> str:
'''
Recursion-safe `pprint.pformat()` style formatting of
a `msgspec.Struct` for sane reading by a human using a REPL.
'''
# global whitespace indent
ws: str = ' '*indent
# field whitespace indent
field_ws: str = ' '*(field_indent + indent)
# qtn: str = ws + struct.__class__.__qualname__
qtn: str = struct.__class__.__qualname__
obj_str: str = '' # accumulator
fi: structs.FieldInfo
k: str
v: Any
for fi, k, v in iter_fields(struct):
# TODO: how can we prefer `Literal['option1', 'option2,
# ..]` over .__name__ == `Literal` but still get only the
# latter for simple types like `str | int | None` etc..?
ft: type = fi.type
typ_name: str = getattr(ft, '__name__', str(ft))
# recurse to get sub-struct's `.pformat()` output Bo
if isinstance(v, Struct):
val_str: str = v.pformat(
indent=field_indent + indent,
field_indent=indent + field_indent,
)
else: # the `pprint` recursion-safe format:
# https://docs.python.org/3.11/library/pprint.html#pprint.saferepr
try:
val_str: str = saferepr(v)
except Exception:
log.exception(
'Failed to `saferepr({type(struct)})` !?\n'
)
return _Struct.__repr__(struct)
# TODO: LOLOL use `textwrap.indent()` instead dawwwwwg!
obj_str += (field_ws + f'{k}: {typ_name} = {val_str},\n')
return (
f'{qtn}(\n'
f'{obj_str}'
f'{ws})'
)
class Struct(
_Struct,
# https://jcristharif.com/msgspec/structs.html#tagged-unions
# tag='pikerstruct',
# tag=True,
):
'''
A "human friendlier" (aka repl buddy) struct subtype.
'''
def to_dict(
self,
include_non_members: bool = True,
) -> dict:
'''
Like it sounds.. direct delegation to:
https://jcristharif.com/msgspec/api.html#msgspec.structs.asdict
BUT, by default we pop all non-member (aka not defined as
struct fields) fields by default.
'''
asdict: dict = structs.asdict(self)
if include_non_members:
return asdict
# only return a dict of the struct members
# which were provided as input, NOT anything
# added as type-defined `@property` methods!
sin_props: dict = {}
fi: structs.FieldInfo
for fi, k, v in iter_fields(self):
sin_props[k] = asdict[k]
return sin_props
pformat = pformat
# __repr__ = pformat
# __str__ = __repr__ = pformat
# TODO: use a pprint.PrettyPrinter instance around ONLY rendering
# inside a known tty?
# def __repr__(self) -> str:
# ...
def __repr__(self) -> str:
try:
return pformat(self)
except Exception:
log.exception(
f'Failed to `pformat({type(self)})` !?\n'
)
return _Struct.__repr__(self)
def copy(
self,
update: dict | None = None,
) -> Struct:
'''
Validate-typecast all self defined fields, return a copy of
us with all such fields.
NOTE: This is kinda like the default behaviour in
`pydantic.BaseModel` except a copy of the object is
returned making it compat with `frozen=True`.
'''
if update:
for k, v in update.items():
setattr(self, k, v)
# NOTE: roundtrip serialize to validate
# - enode to msgpack binary format,
# - decode that back to a struct.
return msgpack.Decoder(type=type(self)).decode(
msgpack.Encoder().encode(self)
)
def typecast(
self,
# TODO: allow only casting a named subset?
# fields: set[str] | None = None,
) -> None:
'''
Cast all fields using their declared type annotations
(kinda like what `pydantic` does by default).
NOTE: this of course won't work on frozen types, use
``.copy()`` above in such cases.
'''
# https://jcristharif.com/msgspec/api.html#msgspec.structs.fields
fi: structs.FieldInfo
for fi in structs.fields(self):
setattr(
self,
fi.name,
fi.type(getattr(self, fi.name)),
)
def __sub__(
self,
other: Struct,
) -> DiffDump[tuple[str, Any, Any]]:
'''
Compare fields/items key-wise and return a ``DiffDump``
for easy visual REPL comparison B)
'''
diffs: DiffDump[tuple[str, Any, Any]] = DiffDump()
for fi in structs.fields(self):
attr_name: str = fi.name
ours: Any = getattr(self, attr_name)
theirs: Any = getattr(other, attr_name)
if ours != theirs:
diffs.append((
attr_name,
ours,
theirs,
))
return diffs

View File

@ -1,139 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
IPC-compat cross-mem-boundary object pointer.
'''
# TODO: integration with our ``enable_modules: list[str]`` caps sys.
# ``pkgutil.resolve_name()`` internally uses
# ``importlib.import_module()`` which can be filtered by inserting
# a ``MetaPathFinder`` into ``sys.meta_path`` (which we could do before
# entering the ``_runtime.process_messages()`` loop).
# - https://github.com/python/cpython/blob/main/Lib/pkgutil.py#L645
# - https://stackoverflow.com/questions/1350466/preventing-python-code-from-importing-certain-modules
# - https://stackoverflow.com/a/63320902
# - https://docs.python.org/3/library/sys.html#sys.meta_path
# the new "Implicit Namespace Packages" might be relevant?
# - https://www.python.org/dev/peps/pep-0420/
# add implicit serialized message type support so that paths can be
# handed directly to IPC primitives such as streams and `Portal.run()`
# calls:
# - via ``msgspec``:
# - https://jcristharif.com/msgspec/api.html#struct
# - https://jcristharif.com/msgspec/extending.html
# via ``msgpack-python``:
# - https://github.com/msgpack/msgpack-python#packingunpacking-of-custom-data-type
from __future__ import annotations
from inspect import (
isfunction,
ismethod,
)
from pkgutil import resolve_name
class NamespacePath(str):
'''
A serializeable `str`-subtype implementing a "namespace
pointer" to any Python object reference (like a function)
using the same format as the built-in `pkgutil.resolve_name()`
system.
A value describes a target's module-path and namespace-key
separated by a ':' and thus can be easily used as
a IPC-message-native reference-type allowing memory isolated
actors to point-and-load objects via a minimal `str` value.
'''
_ref: object | type | None = None
# TODO: support providing the ns instance in
# order to support 'self.<meth>` style to make
# `Portal.run_from_ns()` work!
# _ns: ModuleType|type|None = None
def load_ref(self) -> object | type:
if self._ref is None:
self._ref = resolve_name(self)
return self._ref
@staticmethod
def _mk_fqnp(
ref: type|object,
) -> tuple[str, str]:
'''
Generate a minial `str` pair which describes a python
object's namespace path and object/type name.
In more precise terms something like:
- 'py.namespace.path:object_name',
- eg.'tractor.msg:NamespacePath' will be the ``str`` form
of THIS type XD
'''
if isfunction(ref):
name: str = getattr(ref, '__name__')
mod_name: str = ref.__module__
elif ismethod(ref):
# build out the path manually i guess..?
# TODO: better way?
name: str = '.'.join([
type(ref.__self__).__name__,
ref.__func__.__name__,
])
mod_name: str = ref.__self__.__module__
else: # object or other?
# isinstance(ref, object)
# and not isfunction(ref)
name: str = type(ref).__name__
mod_name: str = ref.__module__
# TODO: return static value direactly?
#
# fully qualified namespace path, tuple.
fqnp: tuple[str, str] = (
mod_name,
name,
)
return fqnp
@classmethod
def from_ref(
cls,
ref: type|object,
) -> NamespacePath:
fqnp: tuple[str, str] = cls._mk_fqnp(ref)
return cls(':'.join(fqnp))
def to_tuple(
self,
# TODO: could this work re `self:<meth>` case from above?
# load_ref: bool = True,
) -> tuple[str, str]:
return self._mk_fqnp(
self.load_ref()
)

View File

@ -1,730 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Define our strictly typed IPC message spec for the SCIPP:
that is,
the "Structurred-Concurrency-Inter-Process-(dialog)-(un)Protocol".
'''
from __future__ import annotations
import types
from typing import (
Any,
Generic,
Literal,
Type,
TypeVar,
TypeAlias,
Union,
)
from msgspec import (
defstruct,
# field,
Raw,
Struct,
# UNSET,
# UnsetType,
)
from tractor.msg import (
pretty_struct,
)
from tractor.log import get_logger
log = get_logger('tractor.msgspec')
# type variable for the boxed payload field `.pld`
PayloadT = TypeVar('PayloadT')
class PayloadMsg(
Struct,
Generic[PayloadT],
# https://jcristharif.com/msgspec/structs.html#tagged-unions
tag=True,
tag_field='msg_type',
# https://jcristharif.com/msgspec/structs.html#field-ordering
# kw_only=True,
# https://jcristharif.com/msgspec/structs.html#equality-and-order
# order=True,
# https://jcristharif.com/msgspec/structs.html#encoding-decoding-as-arrays
# as_array=True,
):
'''
An abstract payload boxing/shuttling IPC msg type.
Boxes data-values passed to/from user code
(i.e. any values passed by `tractor` application code using any of
|_ `._streaming.MsgStream.send/receive()`
|_ `._context.Context.started/result()`
|_ `._ipc.Channel.send/recv()`
aka our "IPC primitive APIs")
as message "payloads" set to the `.pld` field and uses
`msgspec`'s "tagged unions" feature to support a subset of our
"SC-transitive shuttle protocol" specification with
a `msgspec.Struct` inheritance tree.
'''
cid: str # call/context-id
# ^-TODO-^: more explicit type?
# -[ ] use UNSET here?
# https://jcristharif.com/msgspec/supported-types.html#unset
#
# -[ ] `uuid.UUID` which has multi-protocol support
# https://jcristharif.com/msgspec/supported-types.html#uuid
# The msg's "payload" (spelled without vowels):
# https://en.wikipedia.org/wiki/Payload_(computing)
pld: Raw
# ^-NOTE-^ inherited from any `PayloadMsg` (and maybe type
# overriden via the `._ops.limit_plds()` API), but by default is
# parameterized to be `Any`.
#
# XXX this `Union` must strictly NOT contain `Any` if
# a limited msg-type-spec is intended, such that when
# creating and applying a new `MsgCodec` its
# `.decoder: Decoder` is configured with a `Union[Type[Struct]]` which
# restricts the allowed payload content (this `.pld` field)
# by type system defined loading constraints B)
#
# TODO: could also be set to `msgspec.Raw` if the sub-decoders
# approach is preferred over the generic parameterization
# approach as take by `mk_msg_spec()` below.
# TODO: complete rename
Msg = PayloadMsg
class Aid(
Struct,
tag=True,
tag_field='msg_type',
):
'''
Actor-identity msg.
Initial contact exchange enabling an actor "mailbox handshake"
delivering the peer identity (and maybe eventually contact)
info.
Used by discovery protocol to register actors as well as
conduct the initial comms (capability) filtering.
'''
name: str
uuid: str
# TODO: use built-in support for UUIDs?
# -[ ] `uuid.UUID` which has multi-protocol support
# https://jcristharif.com/msgspec/supported-types.html#uuid
class SpawnSpec(
pretty_struct.Struct,
tag=True,
tag_field='msg_type',
):
'''
Initial runtime spec handed down from a spawning parent to its
child subactor immediately following first contact via an
`Aid` msg.
'''
# TODO: similar to the `Start` kwargs spec needed below, we need
# a hard `Struct` def for all of these fields!
_parent_main_data: dict
_runtime_vars: dict[str, Any]
# module import capability
enable_modules: dict[str, str]
# TODO: not just sockaddr pairs?
# -[ ] abstract into a `TransportAddr` type?
reg_addrs: list[tuple[str, int]]
bind_addrs: list[tuple[str, int]]
# TODO: caps based RPC support in the payload?
#
# -[ ] integration with our ``enable_modules: list[str]`` caps sys.
# ``pkgutil.resolve_name()`` internally uses
# ``importlib.import_module()`` which can be filtered by
# inserting a ``MetaPathFinder`` into ``sys.meta_path`` (which
# we could do before entering the ``Actor._process_messages()``
# loop)?
# - https://github.com/python/cpython/blob/main/Lib/pkgutil.py#L645
# - https://stackoverflow.com/questions/1350466/preventing-python-code-from-importing-certain-modules
# - https://stackoverflow.com/a/63320902
# - https://docs.python.org/3/library/sys.html#sys.meta_path
#
# -[ ] can we combine .ns + .func into a native `NamespacePath` field?
#
# -[ ] better name, like `Call/TaskInput`?
#
# -[ ] XXX a debugger lock msg transaction with payloads like,
# child -> `.pld: DebugLock` -> root
# child <- `.pld: DebugLocked` <- root
# child -> `.pld: DebugRelease` -> root
#
# WHY => when a pld spec is provided it might not allow for
# debug mode msgs as they currently are (using plain old `pld.
# str` payloads) so we only when debug_mode=True we need to
# union in this debugger payload set?
#
# mk_msg_spec(
# MyPldSpec,
# debug_mode=True,
# ) -> (
# Union[MyPldSpec]
# | Union[DebugLock, DebugLocked, DebugRelease]
# )
# class Params(
# Struct,
# Generic[PayloadT],
# ):
# spec: PayloadT|ParamSpec
# inputs: InputsT|dict[str, Any]
# TODO: for eg. we could stringently check the target
# task-func's type sig and enforce it?
# as an example for an IPTC,
# @tractor.context
# async def send_back_nsp(
# ctx: Context,
# expect_debug: bool,
# pld_spec_str: str,
# add_hooks: bool,
# started_msg_dict: dict,
# ) -> <WhatHere!>:
# TODO: figure out which of the `typing` feats we want to
# support:
# - plain ol `ParamSpec`:
# https://docs.python.org/3/library/typing.html#typing.ParamSpec
# - new in 3.12 type parameter lists Bo
# |_ https://docs.python.org/3/reference/compound_stmts.html#type-params
# |_ historical pep 695: https://peps.python.org/pep-0695/
# |_ full lang spec: https://typing.readthedocs.io/en/latest/spec/
# |_ on annotation scopes:
# https://docs.python.org/3/reference/executionmodel.html#annotation-scopes
# spec: ParamSpec[
# expect_debug: bool,
# pld_spec_str: str,
# add_hooks: bool,
# started_msg_dict: dict,
# ]
# TODO: possibly sub-type for runtime method requests?
# -[ ] `Runtime(Start)` with a `.ns: str = 'self' or
# we can just enforce any such method as having a strict
# ns for calling funcs, namely the `Actor` instance?
class Start(
Struct,
tag=True,
tag_field='msg_type',
):
'''
Initial request to remotely schedule an RPC `trio.Task` via
`Actor.start_remote_task()`.
It is called by all the following public APIs:
- `ActorNursery.run_in_actor()`
- `Portal.run()`
`|_.run_from_ns()`
`|_.open_stream_from()`
`|_._submit_for_result()`
- `Context.open_context()`
'''
cid: str
ns: str
func: str
# TODO: make this a sub-struct which can be further
# type-limited, maybe `Inputs`?
# => SEE ABOVE <=
kwargs: dict[str, Any]
uid: tuple[str, str] # (calling) actor-id
# TODO: enforcing a msg-spec in terms `Msg.pld`
# parameterizable msgs to be used in the appls IPC dialog.
# => SEE `._codec.MsgDec` for more <=
pld_spec: str = str(Any)
class StartAck(
Struct,
tag=True,
tag_field='msg_type',
):
'''
Init response to a `Cmd` request indicating the far
end's RPC spec, namely its callable "type".
'''
cid: str
# TODO: maybe better names for all these?
# -[ ] obvi ^ would need sync with `._rpc`
functype: Literal[
'asyncfunc',
'asyncgen',
'context', # TODO: the only one eventually?
]
# import typing
# eval(str(Any), {}, {'typing': typing})
# started_spec: str = str(Any)
# return_spec
class Started(
PayloadMsg,
Generic[PayloadT],
):
'''
Packet to shuttle the "first value" delivered by
`Context.started(value: Any)` from a `@tractor.context`
decorated IPC endpoint.
'''
pld: PayloadT|Raw
# TODO: cancel request dedicated msg?
# -[ ] instead of using our existing `Start`?
#
# class Cancel:
# cid: str
class Yield(
PayloadMsg,
Generic[PayloadT],
):
'''
Per IPC transmission of a value from `await MsgStream.send(<value>)`.
'''
pld: PayloadT|Raw
class Stop(
Struct,
tag=True,
tag_field='msg_type',
):
'''
Stream termination signal much like an IPC version
of `StopAsyncIteration`.
'''
cid: str
# TODO: do we want to support a payload on stop?
# pld: UnsetType = UNSET
# TODO: is `Result` or `Out[come]` a better name?
class Return(
PayloadMsg,
Generic[PayloadT],
):
'''
Final `return <value>` from a remotely scheduled
func-as-`trio.Task`.
'''
pld: PayloadT|Raw
class CancelAck(
PayloadMsg,
Generic[PayloadT],
):
'''
Deliver the `bool` return-value from a cancellation `Actor`
method scheduled via and prior RPC request.
- `Actor.cancel()`
`|_.cancel_soon()`
`|_.cancel_rpc_tasks()`
`|_._cancel_task()`
`|_.cancel_server()`
RPCs to these methods must **always** be able to deliver a result
despite the currently configured IPC msg spec such that graceful
cancellation is always functional in the runtime.
'''
pld: bool
# TODO: unify this with `._exceptions.RemoteActorError`
# such that we can have a msg which is both raisable and
# IPC-wire ready?
# B~o
class Error(
Struct,
tag=True,
tag_field='msg_type',
# TODO may omit defaults?
# https://jcristharif.com/msgspec/structs.html#omitting-default-values
# omit_defaults=True,
):
'''
A pkt that wraps `RemoteActorError`s for relay and raising.
Fields are 1-to-1 meta-data as needed originally by
`RemoteActorError.msgdata: dict` but now are defined here.
Note: this msg shuttles `ContextCancelled` and `StreamOverrun`
as well is used to rewrap any `MsgTypeError` for relay-reponse
to bad `Yield.pld` senders during an IPC ctx's streaming dialog
phase.
'''
src_uid: tuple[str, str]
src_type_str: str
boxed_type_str: str
relay_path: list[tuple[str, str]]
# normally either both are provided or just
# a message for certain special cases where
# we pack a message for a locally raised
# mte or ctxc.
message: str|None = None
tb_str: str = ''
# TODO: only optionally include sub-type specfic fields?
# -[ ] use UNSET or don't include them via `omit_defaults` (see
# inheritance-line options above)
#
# `ContextCancelled` reports the src cancelling `Actor.uid`
canceller: tuple[str, str]|None = None
# `StreamOverrun`-specific src `Actor.uid`
sender: tuple[str, str]|None = None
# `MsgTypeError` meta-data
cid: str|None = None
# when the receiver side fails to decode a delivered
# `PayloadMsg`-subtype; one and/or both the msg-struct instance
# and `Any`-decoded to `dict` of the msg are set and relayed
# (back to the sender) for introspection.
_bad_msg: Started|Yield|Return|None = None
_bad_msg_as_dict: dict|None = None
def from_dict_msg(
dict_msg: dict,
msgT: MsgType|None = None,
tag_field: str = 'msg_type',
use_pretty: bool = False,
) -> MsgType:
'''
Helper to build a specific `MsgType` struct from a "vanilla"
decoded `dict`-ified equivalent of the msg: i.e. if the
`msgpack.Decoder.type == Any`, the default when using
`msgspec.msgpack` and not "typed decoding" using
`msgspec.Struct`.
'''
msg_type_tag_field: str = (
msgT.__struct_config__.tag_field
if msgT is not None
else tag_field
)
# XXX ensure tag field is removed
msgT_name: str = dict_msg.pop(msg_type_tag_field)
msgT: MsgType = _msg_table[msgT_name]
if use_pretty:
msgT = defstruct(
name=msgT_name,
fields=[
(key, fi.type)
for fi, key, _
in pretty_struct.iter_fields(msgT)
],
bases=(
pretty_struct.Struct,
msgT,
),
)
return msgT(**dict_msg)
# TODO: should be make a set of cancel msgs?
# -[ ] a version of `ContextCancelled`?
# |_ and/or with a scope field?
# -[ ] or, a full `ActorCancelled`?
#
# class Cancelled(MsgType):
# cid: str
#
# -[ ] what about overruns?
#
# class Overrun(MsgType):
# cid: str
_runtime_msgs: list[Struct] = [
# identity handshake on first IPC `Channel` contact.
Aid,
# parent-to-child spawn specification passed as 2nd msg after
# handshake ONLY after child connects back to parent.
SpawnSpec,
# inter-actor RPC initiation
Start, # schedule remote task-as-func
StartAck, # ack the schedule request
# emission from `MsgStream.aclose()`
Stop,
# `Return` sub-type that we always accept from
# runtime-internal cancel endpoints
CancelAck,
# box remote errors, normally subtypes
# of `RemoteActorError`.
Error,
]
# the no-outcome-yet IAC (inter-actor-communication) sub-set which
# can be `PayloadMsg.pld` payload field type-limited by application code
# using `apply_codec()` and `limit_msg_spec()`.
_payload_msgs: list[PayloadMsg] = [
# first <value> from `Context.started(<value>)`
Started,
# any <value> sent via `MsgStream.send(<value>)`
Yield,
# the final value returned from a `@context` decorated
# IPC endpoint.
Return,
]
# built-in SC shuttle protocol msg type set in
# approx order of the IPC txn-state spaces.
__msg_types__: list[MsgType] = (
_runtime_msgs
+
_payload_msgs
)
_msg_table: dict[str, MsgType] = {
msgT.__name__: msgT
for msgT in __msg_types__
}
# TODO: use new type declaration syntax for msg-type-spec
# https://docs.python.org/3/library/typing.html#type-aliases
# https://docs.python.org/3/reference/simple_stmts.html#type
MsgType: TypeAlias = Union[*__msg_types__]
def mk_msg_spec(
payload_type_union: Union[Type] = Any,
spec_build_method: Literal[
'indexed_generics', # works
'defstruct',
'types_new_class',
] = 'indexed_generics',
) -> tuple[
Union[MsgType],
list[MsgType],
]:
'''
Create a payload-(data-)type-parameterized IPC message specification.
Allows generating IPC msg types from the above builtin set
with a payload (field) restricted data-type, the `Msg.pld: PayloadT`.
This allows runtime-task contexts to use the python type system
to limit/filter payload values as determined by the input
`payload_type_union: Union[Type]`.
Notes: originally multiple approaches for constructing the
type-union passed to `msgspec` were attempted as selected via the
`spec_build_method`, but it turns out only the defaul method
'indexed_generics' seems to work reliably in all use cases. As
such, the others will likely be removed in the near future.
'''
submsg_types: list[MsgType] = Msg.__subclasses__()
bases: tuple = (
# XXX NOTE XXX the below generic-parameterization seems to
# be THE ONLY way to get this to work correctly in terms
# of getting ValidationError on a roundtrip?
Msg[payload_type_union],
Generic[PayloadT],
)
defstruct_bases: tuple = (
Msg, # [payload_type_union],
# Generic[PayloadT],
# ^-XXX-^: not allowed? lul..
)
ipc_msg_types: list[Msg] = []
idx_msg_types: list[Msg] = []
defs_msg_types: list[Msg] = []
nc_msg_types: list[Msg] = []
for msgtype in __msg_types__:
# for the NON-payload (user api) type specify-able
# msgs types, we simply aggregate the def as is
# for inclusion in the output type `Union`.
if msgtype not in _payload_msgs:
ipc_msg_types.append(msgtype)
continue
# check inheritance sanity
assert msgtype in submsg_types
# TODO: wait why do we need the dynamic version here?
# XXX ANSWER XXX -> BC INHERITANCE.. don't work w generics..
#
# NOTE previously bc msgtypes WERE NOT inheritting
# directly the `Generic[PayloadT]` type, the manual method
# of generic-paraming with `.__class_getitem__()` wasn't
# working..
#
# XXX but bc i changed that to make every subtype inherit
# it, this manual "indexed parameterization" method seems
# to work?
#
# -[x] paraming the `PayloadT` values via `Generic[T]`
# does work it seems but WITHOUT inheritance of generics
#
# -[-] is there a way to get it to work at module level
# just using inheritance or maybe a metaclass?
# => thot that `defstruct` might work, but NOPE, see
# below..
#
idxed_msg_type: Msg = msgtype[payload_type_union]
idx_msg_types.append(idxed_msg_type)
# TODO: WHY do we need to dynamically generate the
# subtype-msgs here to ensure the `.pld` parameterization
# propagates as well as works at all in terms of the
# `msgpack.Decoder()`..?
#
# dynamically create the payload type-spec-limited msg set.
newclass_msgtype: Type = types.new_class(
name=msgtype.__name__,
bases=bases,
kwds={},
)
nc_msg_types.append(
newclass_msgtype[payload_type_union]
)
# with `msgspec.structs.defstruct`
# XXX ALSO DOESN'T WORK
defstruct_msgtype = defstruct(
name=msgtype.__name__,
fields=[
('cid', str),
# XXX doesn't seem to work..
# ('pld', PayloadT),
('pld', payload_type_union),
],
bases=defstruct_bases,
)
defs_msg_types.append(defstruct_msgtype)
# assert index_paramed_msg_type == manual_paramed_msg_subtype
# paramed_msg_type = manual_paramed_msg_subtype
# ipc_payload_msgs_type_union |= index_paramed_msg_type
idx_spec: Union[Type[Msg]] = Union[*idx_msg_types]
def_spec: Union[Type[Msg]] = Union[*defs_msg_types]
nc_spec: Union[Type[Msg]] = Union[*nc_msg_types]
specs: dict[str, Union[Type[Msg]]] = {
'indexed_generics': idx_spec,
'defstruct': def_spec,
'types_new_class': nc_spec,
}
msgtypes_table: dict[str, list[Msg]] = {
'indexed_generics': idx_msg_types,
'defstruct': defs_msg_types,
'types_new_class': nc_msg_types,
}
# XXX lol apparently type unions can't ever
# be equal eh?
# TODO: grok the diff here better..
#
# assert (
# idx_spec
# ==
# nc_spec
# ==
# def_spec
# )
# breakpoint()
pld_spec: Union[Type] = specs[spec_build_method]
runtime_spec: Union[Type] = Union[*ipc_msg_types]
ipc_spec = pld_spec | runtime_spec
log.runtime(
'Generating new IPC msg-spec\n'
f'{ipc_spec}\n'
)
assert (
ipc_spec
and
ipc_spec is not Any
)
return (
ipc_spec,
msgtypes_table[spec_build_method]
+
ipc_msg_types,
)

View File

@ -18,44 +18,34 @@
Infection apis for ``asyncio`` loops running ``trio`` using guest mode.
'''
from __future__ import annotations
import asyncio
from asyncio.exceptions import CancelledError
from contextlib import asynccontextmanager as acm
from dataclasses import dataclass
import inspect
import traceback
from typing import (
Any,
Callable,
AsyncIterator,
Awaitable,
Optional,
)
import tractor
from tractor._exceptions import AsyncioCancelled
from tractor._state import (
debug_mode,
)
from tractor.devx import _debug
from tractor.log import get_logger
from tractor.trionics._broadcast import (
import trio
from outcome import Error
from .log import get_logger
from ._state import current_actor
from ._exceptions import AsyncioCancelled
from .trionics._broadcast import (
broadcast_receiver,
BroadcastReceiver,
)
import trio
from outcome import (
Error,
Outcome,
)
log = get_logger(__name__)
__all__ = [
'run_task',
'run_as_asyncio_guest',
]
__all__ = ['run_task', 'run_as_asyncio_guest']
@dataclass
@ -75,9 +65,9 @@ class LinkedTaskChannel(trio.abc.Channel):
_trio_exited: bool = False
# set after ``asyncio.create_task()``
_aio_task: asyncio.Task|None = None
_aio_err: BaseException|None = None
_broadcaster: BroadcastReceiver|None = None
_aio_task: Optional[asyncio.Task] = None
_aio_err: Optional[BaseException] = None
_broadcaster: Optional[BroadcastReceiver] = None
async def aclose(self) -> None:
await self._from_aio.aclose()
@ -159,20 +149,17 @@ def _run_asyncio_task(
*,
qsize: int = 1,
provide_channels: bool = False,
hide_tb: bool = False,
**kwargs,
) -> LinkedTaskChannel:
'''
Run an ``asyncio`` async function or generator in a task, return
or stream the result back to the caller `trio.lowleve.Task`.
or stream the result back to ``trio``.
'''
__tracebackhide__: bool = hide_tb
if not tractor.current_actor().is_infected_aio():
raise RuntimeError(
"`infect_asyncio` mode is not enabled!?"
)
__tracebackhide__ = True
if not current_actor().is_infected_aio():
raise RuntimeError("`infect_asyncio` mode is not enabled!?")
# ITC (inter task comms), these channel/queue names are mostly from
# ``asyncio``'s perspective.
@ -180,6 +167,7 @@ def _run_asyncio_task(
to_trio, from_aio = trio.open_memory_channel(qsize) # type: ignore
args = tuple(inspect.getfullargspec(func).args)
if getattr(func, '_tractor_steam_function', None):
# the assumption is that the target async routine accepts the
# send channel then it intends to yield more then one return
@ -200,7 +188,7 @@ def _run_asyncio_task(
cancel_scope = trio.CancelScope()
aio_task_complete = trio.Event()
aio_err: BaseException|None = None
aio_err: Optional[BaseException] = None
chan = LinkedTaskChannel(
aio_q, # asyncio.Queue
@ -229,15 +217,8 @@ def _run_asyncio_task(
try:
result = await coro
except BaseException as aio_err:
log.exception('asyncio task errored')
chan._aio_err = aio_err
if isinstance(aio_err, CancelledError):
log.runtime(
'`asyncio` task was cancelled..\n'
)
else:
log.exception(
'`asyncio` task errored\n'
)
raise
else:
@ -266,69 +247,37 @@ def _run_asyncio_task(
if not inspect.isawaitable(coro):
raise TypeError(f"No support for invoking {coro}")
task: asyncio.Task = asyncio.create_task(
task = asyncio.create_task(
wait_on_coro_final_result(
to_trio,
coro,
aio_task_complete
)
)
chan._aio_task: asyncio.Task = task
# XXX TODO XXX get this actually workin.. XD
# -[ ] we need logic to setup `greenback` for `asyncio`-side task
# REPLing.. which should normally be nearly the same as for
# `trio`?
# -[ ] add to a new `.devx._greenback.maybe_init_for_asyncio()`?
if (
debug_mode()
and
(greenback := _debug.maybe_import_greenback(
force_reload=True,
raise_not_found=False,
))
):
greenback.bestow_portal(task)
chan._aio_task = task
def cancel_trio(task: asyncio.Task) -> None:
'''
Cancel the calling `trio` task on error.
Cancel the calling ``trio`` task on error.
'''
nonlocal chan
aio_err: BaseException|None = chan._aio_err
task_err: BaseException|None = None
aio_err = chan._aio_err
task_err: Optional[BaseException] = None
# only to avoid `asyncio` complaining about uncaptured
# only to avoid ``asyncio`` complaining about uncaptured
# task exceptions
try:
res: Any = task.result()
task.exception()
except BaseException as terr:
task_err: BaseException = terr
task_err = terr
msg: str = (
'Infected `asyncio` task {etype_str}\n'
)
if isinstance(terr, CancelledError):
msg += (
f'c)>\n'
f' |_{task}\n'
)
log.cancel(
msg.format(etype_str='cancelled')
)
log.cancel(f'`asyncio` task cancelled: {task.get_name()}')
else:
msg += (
f'x)>\n'
f' |_{task}\n'
)
log.exception(
msg.format(etype_str='errored')
)
log.exception(f'`asyncio` task: {task.get_name()} errored')
assert type(terr) is type(aio_err), (
'`asyncio` task error mismatch?!?'
)
assert type(terr) is type(aio_err), 'Asyncio task error mismatch?'
if aio_err is not None:
# XXX: uhh is this true?
@ -341,47 +290,27 @@ def _run_asyncio_task(
# We might want to change this in the future though.
from_aio.close()
if task_err is None:
if type(aio_err) is CancelledError:
log.cancel("infected task was cancelled")
# TODO: show that the cancellation originated
# from the ``trio`` side? right?
# if cancel_scope.cancelled:
# raise aio_err from err
elif task_err is None:
assert aio_err
# wait, wut?
# aio_err.with_traceback(aio_err.__traceback__)
aio_err.with_traceback(aio_err.__traceback__)
log.error('infected task errorred')
# TODO: show when cancellation originated
# from each side more pedantically?
# elif (
# type(aio_err) is CancelledError
# and # trio was the cause?
# cancel_scope.cancel_called
# ):
# log.cancel(
# 'infected task was cancelled by `trio`-side'
# )
# raise aio_err from task_err
# XXX: if not already, alway cancel the scope
# on a task error in case the trio task is blocking on
# a checkpoint.
# XXX: alway cancel the scope on error
# in case the trio task is blocking
# on a checkpoint.
cancel_scope.cancel()
if (
task_err
and
aio_err is not task_err
):
raise aio_err from task_err
# raise any `asyncio` side error.
# raise any ``asyncio`` side error.
raise aio_err
log.info(
'`trio` received final result from {task}\n'
f'|_{res}\n'
)
# TODO: do we need this?
# if task_err:
# cancel_scope.cancel()
# raise task_err
task.add_done_callback(cancel_trio)
return chan
@ -400,17 +329,15 @@ async def translate_aio_errors(
'''
trio_task = trio.lowlevel.current_task()
aio_err: BaseException|None = None
aio_err: Optional[BaseException] = None
# TODO: make thisi a channel method?
def maybe_raise_aio_err(
err: Exception|None = None
err: Optional[Exception] = None
) -> None:
aio_err = chan._aio_err
if (
aio_err is not None
and
# not isinstance(aio_err, CancelledError)
aio_err is not None and
type(aio_err) != CancelledError
):
# always raise from any captured asyncio error
@ -442,17 +369,13 @@ async def translate_aio_errors(
):
aio_err = chan._aio_err
if (
task.cancelled()
and
task.cancelled() and
type(aio_err) is CancelledError
):
# if an underlying `asyncio.CancelledError` triggered this
# if an underlying ``asyncio.CancelledError`` triggered this
# channel close, raise our (non-``BaseException``) wrapper
# error: ``AsyncioCancelled`` from that source error.
raise AsyncioCancelled(
f'Task cancelled\n'
f'|_{task}\n'
) from aio_err
raise AsyncioCancelled from aio_err
else:
raise
@ -495,8 +418,8 @@ async def run_task(
) -> Any:
'''
Run an `asyncio` async function or generator in a task, return
or stream the result back to `trio`.
Run an ``asyncio`` async function or generator in a task, return
or stream the result back to ``trio``.
'''
# simple async func
@ -554,124 +477,11 @@ async def open_channel_from(
chan._to_trio.close()
class AsyncioRuntimeTranslationError(RuntimeError):
'''
We failed to correctly relay runtime semantics and/or maintain SC
supervision rules cross-event-loop.
'''
def run_trio_task_in_future(
async_fn,
*args,
) -> asyncio.Future:
'''
Run an async-func as a `trio` task from an `asyncio.Task` wrapped
in a `asyncio.Future` which is returned to the caller.
Another astounding feat by the great @oremanj !!
Bo
'''
result_future = asyncio.Future()
cancel_scope = trio.CancelScope()
finished: bool = False
# monkey-patch the future's `.cancel()` meth to
# allow cancellation relay to `trio`-task.
cancel_message: str|None = None
orig_cancel = result_future.cancel
def wrapped_cancel(
msg: str|None = None,
):
nonlocal cancel_message
if finished:
# We're being called back after the task completed
if msg is not None:
return orig_cancel(msg)
elif cancel_message is not None:
return orig_cancel(cancel_message)
else:
return orig_cancel()
if result_future.done():
return False
# Forward cancellation to the Trio task, don't mark
# future as cancelled until it completes
cancel_message = msg
cancel_scope.cancel()
return True
result_future.cancel = wrapped_cancel
async def trio_task() -> None:
nonlocal finished
try:
with cancel_scope:
try:
# TODO: type this with new tech in 3.13
result: Any = await async_fn(*args)
finally:
finished = True
# Propagate result or cancellation to the Future
if cancel_scope.cancelled_caught:
result_future.cancel()
elif not result_future.cancelled():
result_future.set_result(result)
except BaseException as exc:
# the result future gets all the non-Cancelled
# exceptions. Any Cancelled need to keep propagating
# out of this stack frame in order to reach the cancel
# scope for which they're intended.
cancelled: BaseException|None
rest: BaseException|None
if isinstance(exc, BaseExceptionGroup):
cancelled, rest = exc.split(trio.Cancelled)
elif isinstance(exc, trio.Cancelled):
cancelled, rest = exc, None
else:
cancelled, rest = None, exc
if not result_future.cancelled():
if rest:
result_future.set_exception(rest)
else:
result_future.cancel()
if cancelled:
raise cancelled
trio.lowlevel.spawn_system_task(
trio_task,
name=async_fn,
)
return result_future
def run_as_asyncio_guest(
trio_main: Callable,
# ^-NOTE-^ when spawned with `infected_aio=True` this func is
# normally `Actor._async_main()` as is passed by some boostrap
# entrypoint like `._entry._trio_main()`.
_sigint_loop_pump_delay: float = 0,
trio_main: Callable,
) -> None:
# ^-TODO-^ technically whatever `trio_main` returns.. we should
# try to use func-typevar-params at leaast by 3.13!
# -[ ] https://typing.readthedocs.io/en/latest/spec/callables.html#callback-protocols
# -[ ] https://peps.python.org/pep-0646/#using-type-variable-tuples-in-functions
# -[ ] https://typing.readthedocs.io/en/latest/spec/callables.html#unpack-for-keyword-arguments
# -[ ] https://peps.python.org/pep-0718/
'''
Entry for an "infected ``asyncio`` actor".
@ -697,213 +507,37 @@ def run_as_asyncio_guest(
# :)
async def aio_main(trio_main):
'''
Main `asyncio.Task` which calls
`trio.lowlevel.start_guest_run()` to "infect" the `asyncio`
event-loop by embedding the `trio` scheduler allowing us to
boot the `tractor` runtime and connect back to our parent.
'''
loop = asyncio.get_running_loop()
trio_done_fute = asyncio.Future()
startup_msg: str = (
'Starting `asyncio` guest-loop-run\n'
'-> got running loop\n'
'-> built a `trio`-done future\n'
)
# TODO: shoudn't this be done in the guest-run trio task?
# if debug_mode():
# # XXX make it obvi we know this isn't supported yet!
# log.error(
# 'Attempting to enter unsupported `greenback` init '
# 'from `asyncio` task..'
# )
# await _debug.maybe_init_greenback(
# force_reload=True,
# )
trio_done_fut = asyncio.Future()
def trio_done_callback(main_outcome):
log.runtime(
f'`trio` guest-run finishing with outcome\n'
f'>) {main_outcome}\n'
f'|_{trio_done_fute}\n'
)
if isinstance(main_outcome, Error):
error: BaseException = main_outcome.error
error = main_outcome.error
trio_done_fut.set_exception(error)
# show an dedicated `asyncio`-side tb from the error
tb_str: str = ''.join(traceback.format_exception(error))
log.exception(
'Guest-run errored!?\n\n'
f'{main_outcome}\n'
f'{error}\n\n'
f'{tb_str}\n'
)
trio_done_fute.set_exception(error)
# TODO: explicit asyncio tb?
# traceback.print_exception(error)
# XXX: do we need this?
# actor.cancel_soon()
# raise inline
main_outcome.unwrap()
else:
trio_done_fute.set_result(main_outcome)
log.info(
f'`trio` guest-run finished with outcome\n'
f')>\n'
f'|_{trio_done_fute}\n'
)
startup_msg += (
f'-> created {trio_done_callback!r}\n'
f'-> scheduling `trio_main`: {trio_main!r}\n'
)
trio_done_fut.set_result(main_outcome)
log.runtime(f"trio_main finished: {main_outcome!r}")
# start the infection: run trio on the asyncio loop in "guest mode"
log.runtime(
f'{startup_msg}\n\n'
+
'Infecting `asyncio`-process with a `trio` guest-run!\n'
)
log.info(f"Infecting asyncio process with {trio_main}")
trio.lowlevel.start_guest_run(
trio_main,
run_sync_soon_threadsafe=loop.call_soon_threadsafe,
done_callback=trio_done_callback,
)
fute_err: BaseException|None = None
try:
out: Outcome = await asyncio.shield(trio_done_fute)
# NOTE will raise (via `Error.unwrap()`) from any
# exception packed into the guest-run's `main_outcome`.
return out.unwrap()
except (
# XXX special SIGINT-handling is required since
# `asyncio.shield()`-ing seems to NOT handle that case as
# per recent changes in 3.11:
# https://docs.python.org/3/library/asyncio-runner.html#handling-keyboard-interruption
#
# NOTE: further, apparently ONLY need to handle this
# special SIGINT case since all other `asyncio`-side
# errors can be processed via our `chan._aio_err`
# relaying (right?); SIGINT seems to be totally diff
# error path in `asyncio`'s runtime..?
asyncio.CancelledError,
) as _fute_err:
fute_err = _fute_err
err_message: str = (
'main `asyncio` task '
)
if isinstance(fute_err, asyncio.CancelledError):
err_message += 'was cancelled!\n'
else:
err_message += f'errored with {out.error!r}\n'
actor: tractor.Actor = tractor.current_actor()
log.exception(
err_message
+
'Cancelling `trio`-side `tractor`-runtime..\n'
f'c)>\n'
f' |_{actor}.cancel_soon()\n'
)
# XXX WARNING XXX the next LOCs are super important, since
# without them, we can get guest-run abandonment cases
# where `asyncio` will not schedule or wait on the `trio`
# guest-run task before final shutdown! This is
# particularly true if the `trio` side has tasks doing
# shielded work when a SIGINT condition occurs.
#
# We now have the
# `test_infected_asyncio.test_sigint_closes_lifetime_stack()`
# suite to ensure we do not suffer this issues
# (hopefully) ever again.
#
# The original abandonment issue surfaced as 2 different
# race-condition dependent types scenarios all to do with
# `asyncio` handling SIGINT from the system:
#
# - "silent-abandon" (WORST CASE):
# `asyncio` abandons the `trio` guest-run task silently
# and no `trio`-guest-run or `tractor`-actor-runtime
# teardown happens whatsoever..
#
# - "loud-abandon" (BEST-ish CASE):
# the guest run get's abaondoned "loudly" with `trio`
# reporting a console traceback and further tbs of all
# the (failed) GC-triggered shutdown routines which
# thankfully does get dumped to console..
#
# The abandonment is most easily reproduced if the `trio`
# side has tasks doing shielded work where those tasks
# ignore the normal `Cancelled` condition and continue to
# run, but obviously `asyncio` isn't aware of this and at
# some point bails on the guest-run unless we take manual
# intervention..
#
# To repeat, *WITHOUT THIS* stuff below the guest-run can
# get race-conditionally abandoned!!
#
# XXX SOLUTION XXX
# ------ - ------
# XXX FIRST PART:
# ------ - ------
# the obvious fix to the "silent-abandon" case is to
# explicitly cancel the actor runtime such that no
# runtime tasks are even left unaware that the guest-run
# should be terminated due to OS cancellation.
#
actor.cancel_soon()
# ------ - ------
# XXX SECOND PART:
# ------ - ------
# Pump the `asyncio` event-loop to allow
# `trio`-side to `trio`-guest-run to complete and
# teardown !!
#
# oh `asyncio`, how i don't miss you at all XD
while not trio_done_fute.done():
log.runtime(
'Waiting on main guest-run `asyncio` task to complete..\n'
f'|_trio_done_fut: {trio_done_fute}\n'
)
await asyncio.sleep(_sigint_loop_pump_delay)
# XXX is there any alt API/approach like the internal
# call below but that doesn't block indefinitely..?
# loop._run_once()
try:
return trio_done_fute.result()
except asyncio.exceptions.InvalidStateError as state_err:
# XXX be super dupere noisy about abandonment issues!
aio_task: asyncio.Task = asyncio.current_task()
message: str = (
'The `asyncio`-side task likely exited before the '
'`trio`-side guest-run completed!\n\n'
)
if fute_err:
message += (
f'The main {aio_task}\n'
f'STOPPED due to {type(fute_err)}\n\n'
)
message += (
f'Likely something inside our guest-run-as-task impl is '
f'not effectively waiting on the `trio`-side to complete ?!\n'
f'This code -> {aio_main!r}\n\n'
'Below you will likely see a '
'"RuntimeWarning: Trio guest run got abandoned.." !!\n'
)
raise AsyncioRuntimeTranslationError(message) from state_err
# ``.unwrap()`` will raise here on error
return (await trio_done_fut).unwrap()
# might as well if it's installed.
try:
@ -911,8 +545,6 @@ def run_as_asyncio_guest(
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
except ImportError:
log.runtime('`uvloop` not available..')
pass
return asyncio.run(
aio_main(trio_main),
)
return asyncio.run(aio_main(trio_main))

View File

@ -19,13 +19,22 @@ Sugary patterns for trio + tractor designs.
'''
from ._mngrs import (
gather_contexts as gather_contexts,
maybe_open_context as maybe_open_context,
maybe_open_nursery as maybe_open_nursery,
gather_contexts,
maybe_open_context,
maybe_open_nursery,
)
from ._broadcast import (
AsyncReceiver as AsyncReceiver,
broadcast_receiver as broadcast_receiver,
BroadcastReceiver as BroadcastReceiver,
Lagged as Lagged,
broadcast_receiver,
BroadcastReceiver,
Lagged,
)
__all__ = [
'gather_contexts',
'broadcast_receiver',
'BroadcastReceiver',
'Lagged',
'maybe_open_context',
'maybe_open_nursery',
]

View File

@ -25,15 +25,8 @@ from collections import deque
from contextlib import asynccontextmanager
from functools import partial
from operator import ne
from typing import (
Callable,
Awaitable,
Any,
AsyncIterator,
Protocol,
Generic,
TypeVar,
)
from typing import Optional, Callable, Awaitable, Any, AsyncIterator, Protocol
from typing import Generic, TypeVar
import trio
from trio._core._run import Task
@ -44,11 +37,6 @@ from tractor.log import get_logger
log = get_logger(__name__)
# TODO: use new type-vars syntax from 3.12
# https://realpython.com/python312-new-features/#dedicated-type-variable-syntax
# https://docs.python.org/3/whatsnew/3.12.html#whatsnew312-pep695
# https://docs.python.org/3/reference/simple_stmts.html#type
#
# A regular invariant generic type
T = TypeVar("T")
@ -114,7 +102,7 @@ class BroadcastState(Struct):
# broadcast event to wake up all sleeping consumer tasks
# on a newly produced value from the sender.
recv_ready: tuple[int, trio.Event]|None = None
recv_ready: Optional[tuple[int, trio.Event]] = None
# if a ``trio.EndOfChannel`` is received on any
# consumer all consumers should be placed in this state
@ -156,12 +144,11 @@ class BroadcastState(Struct):
class BroadcastReceiver(ReceiveChannel):
'''
A memory receive channel broadcaster which is non-lossy for
the fastest consumer.
A memory receive channel broadcaster which is non-lossy for the
fastest consumer.
Additional consumer tasks can receive all produced values by
registering with ``.subscribe()`` and receiving from the new
instance it delivers.
Additional consumer tasks can receive all produced values by registering
with ``.subscribe()`` and receiving from the new instance it delivers.
'''
def __init__(
@ -169,7 +156,7 @@ class BroadcastReceiver(ReceiveChannel):
rx_chan: AsyncReceiver,
state: BroadcastState,
receive_afunc: Callable[[], Awaitable[Any]]|None = None,
receive_afunc: Optional[Callable[[], Awaitable[Any]]] = None,
raise_on_lag: bool = True,
) -> None:
@ -457,7 +444,7 @@ def broadcast_receiver(
recv_chan: AsyncReceiver,
max_buffer_size: int,
receive_afunc: Callable[[], Awaitable[Any]]|None = None,
receive_afunc: Optional[Callable[[], Awaitable[Any]]] = None,
raise_on_lag: bool = True,
) -> BroadcastReceiver:

View File

@ -18,12 +18,8 @@
Async context manager primitives with hard ``trio``-aware semantics
'''
from __future__ import annotations
from contextlib import (
asynccontextmanager as acm,
)
from contextlib import asynccontextmanager as acm
import inspect
from types import ModuleType
from typing import (
Any,
AsyncContextManager,
@ -34,15 +30,13 @@ from typing import (
Optional,
Sequence,
TypeVar,
TYPE_CHECKING,
)
import trio
from tractor._state import current_actor
from tractor.log import get_logger
from trio_typing import TaskStatus
if TYPE_CHECKING:
from tractor import ActorNursery
from .._state import current_actor
from ..log import get_logger
log = get_logger(__name__)
@ -53,10 +47,8 @@ T = TypeVar("T")
@acm
async def maybe_open_nursery(
nursery: trio.Nursery|ActorNursery|None = None,
nursery: trio.Nursery | None = None,
shield: bool = False,
lib: ModuleType = trio,
) -> AsyncGenerator[trio.Nursery, Any]:
'''
Create a new nursery if None provided.
@ -67,17 +59,17 @@ async def maybe_open_nursery(
if nursery is not None:
yield nursery
else:
async with lib.open_nursery() as nursery:
async with trio.open_nursery() as nursery:
nursery.cancel_scope.shield = shield
yield nursery
async def _enter_and_wait(
mngr: AsyncContextManager[T],
unwrapped: dict[int, T],
all_entered: trio.Event,
parent_exit: trio.Event,
seed: int,
) -> None:
'''
@ -88,10 +80,7 @@ async def _enter_and_wait(
async with mngr as value:
unwrapped[id(mngr)] = value
if all(
val != seed
for val in unwrapped.values()
):
if all(unwrapped.values()):
all_entered.set()
await parent_exit.wait()
@ -99,34 +88,23 @@ async def _enter_and_wait(
@acm
async def gather_contexts(
mngrs: Sequence[AsyncContextManager[T]],
) -> AsyncGenerator[
tuple[
T | None,
...
],
None,
]:
) -> AsyncGenerator[tuple[Optional[T], ...], None]:
'''
Concurrently enter a sequence of async context managers (acms),
each from a separate `trio` task and deliver the unwrapped
`yield`-ed values in the same order once all managers have entered.
Concurrently enter a sequence of async context managers, each in
a separate ``trio`` task and deliver the unwrapped values in the
same order once all managers have entered. On exit all contexts are
subsequently and concurrently exited.
On exit, all acms are subsequently and concurrently exited.
This function is somewhat similar to a batch of non-blocking
calls to `contextlib.AsyncExitStack.enter_async_context()`
(inside a loop) *in combo with* a `asyncio.gather()` to get the
`.__aenter__()`-ed values, except the managers are both
concurrently entered and exited and *cancellation just works*(R).
This function is somewhat similar to common usage of
``contextlib.AsyncExitStack.enter_async_context()`` (in a loop) in
combo with ``asyncio.gather()`` except the managers are concurrently
entered and exited, and cancellation just works.
'''
seed: int = id(mngrs)
unwrapped: dict[int, T | None] = {}.fromkeys(
(id(mngr) for mngr in mngrs),
seed,
)
unwrapped: dict[int, Optional[T]] = {}.fromkeys(id(mngr) for mngr in mngrs)
all_entered = trio.Event()
parent_exit = trio.Event()
@ -138,9 +116,8 @@ async def gather_contexts(
if not mngrs:
raise ValueError(
'`.trionics.gather_contexts()` input mngrs is empty?\n'
'Did try to use inline generator syntax?\n'
'Use a non-lazy iterator or sequence type intead!'
'input mngrs is empty?\n'
'Did try to use inline generator syntax?'
)
async with trio.open_nursery() as n:
@ -151,7 +128,6 @@ async def gather_contexts(
unwrapped,
all_entered,
parent_exit,
seed,
)
# deliver control once all managers have started up
@ -192,7 +168,7 @@ class _Cache:
cls,
mng,
ctx_key: tuple,
task_status: trio.TaskStatus[T] = trio.TASK_STATUS_IGNORED,
task_status: TaskStatus[T] = trio.TASK_STATUS_IGNORED,
) -> None:
async with mng as value:
@ -219,10 +195,9 @@ async def maybe_open_context(
) -> AsyncIterator[tuple[bool, T]]:
'''
Maybe open an async-context-manager (acm) if there is not already
a `_Cached` version for the provided (input) `key` for *this* actor.
Return the `_Cached` instance on a _Cache hit.
Maybe open a context manager if there is not already a _Cached
version for the provided ``key`` for *this* actor. Return the
_Cached instance on a _Cache hit.
'''
fid = id(acm_func)
@ -234,7 +209,6 @@ async def maybe_open_context(
# yielded output
yielded: Any = None
lock_registered: bool = False
# Lock resource acquisition around task racing / ``trio``'s
# scheduler protocol.
@ -242,7 +216,6 @@ async def maybe_open_context(
# to allow re-entrant use cases where one `maybe_open_context()`
# wrapped factor may want to call into another.
lock = _Cache.locks.setdefault(fid, trio.Lock())
lock_registered: bool = True
await lock.acquire()
# XXX: one singleton nursery per actor and we want to
@ -264,7 +237,7 @@ async def maybe_open_context(
yielded = _Cache.values[ctx_key]
except KeyError:
log.debug(f'Allocating new {acm_func} for {ctx_key}')
log.info(f'Allocating new {acm_func} for {ctx_key}')
mngr = acm_func(**kwargs)
resources = _Cache.resources
assert not resources.get(ctx_key), f'Resource exists? {ctx_key}'
@ -281,16 +254,8 @@ async def maybe_open_context(
yield False, yielded
else:
log.info(f'Reusing _Cached resource for {ctx_key}')
_Cache.users += 1
log.runtime(
f'Re-using cached resource for user {_Cache.users}\n\n'
f'{ctx_key!r} -> {type(yielded)}\n'
# TODO: make this work with values but without
# `msgspec.Struct` causing frickin crashes on field-type
# lookups..
# f'{ctx_key!r} -> {yielded!r}\n'
)
lock.release()
yield True, yielded
@ -300,7 +265,7 @@ async def maybe_open_context(
if yielded is not None:
# if no more consumers, teardown the client
if _Cache.users <= 0:
log.debug(f'De-allocating resource for {ctx_key}')
log.info(f'De-allocating resource for {ctx_key}')
# XXX: if we're cancelled we the entry may have never
# been entered since the nursery task was killed.
@ -310,9 +275,4 @@ async def maybe_open_context(
_, no_more_users = entry
no_more_users.set()
if lock_registered:
maybe_lock = _Cache.locks.pop(fid, None)
if maybe_lock is None:
log.error(
f'Resource lock for {fid} ALREADY POPPED?'
)
_Cache.locks.pop(fid)